x86/amd-iommu: Rearrange dma_ops related functions
[deliverable/linux.git] / arch / x86 / kernel / amd_iommu.c
CommitLineData
b6c02715 1/*
bf3118c1 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
b6c02715
JR
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/gfp.h>
22#include <linux/bitops.h>
7f26508b 23#include <linux/debugfs.h>
b6c02715 24#include <linux/scatterlist.h>
51491367 25#include <linux/dma-mapping.h>
b6c02715 26#include <linux/iommu-helper.h>
c156e347 27#include <linux/iommu.h>
b6c02715 28#include <asm/proto.h>
46a7fa27 29#include <asm/iommu.h>
1d9b16d1 30#include <asm/gart.h>
6a9401a7 31#include <asm/amd_iommu_proto.h>
b6c02715 32#include <asm/amd_iommu_types.h>
c6da992e 33#include <asm/amd_iommu.h>
b6c02715
JR
34
35#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
36
136f78a1
JR
37#define EXIT_LOOP_COUNT 10000000
38
b6c02715
JR
39static DEFINE_RWLOCK(amd_iommu_devtable_lock);
40
bd60b735
JR
41/* A list of preallocated protection domains */
42static LIST_HEAD(iommu_pd_list);
43static DEFINE_SPINLOCK(iommu_pd_list_lock);
44
0feae533
JR
45/*
46 * Domain for untranslated devices - only allocated
47 * if iommu=pt passed on kernel cmd line.
48 */
49static struct protection_domain *pt_domain;
50
26961efe 51static struct iommu_ops amd_iommu_ops;
26961efe 52
431b2a20
JR
53/*
54 * general struct to manage commands send to an IOMMU
55 */
d6449536 56struct iommu_cmd {
b6c02715
JR
57 u32 data[4];
58};
59
a345b23b 60static void reset_iommu_command_buffer(struct amd_iommu *iommu);
04bfdd84 61static void update_domain(struct protection_domain *domain);
c1eee67b 62
15898bbc
JR
63/****************************************************************************
64 *
65 * Helper functions
66 *
67 ****************************************************************************/
68
69static inline u16 get_device_id(struct device *dev)
70{
71 struct pci_dev *pdev = to_pci_dev(dev);
72
73 return calc_devid(pdev->bus->number, pdev->devfn);
74}
75
71c70984
JR
76/*
77 * In this function the list of preallocated protection domains is traversed to
78 * find the domain for a specific device
79 */
80static struct dma_ops_domain *find_protection_domain(u16 devid)
81{
82 struct dma_ops_domain *entry, *ret = NULL;
83 unsigned long flags;
84 u16 alias = amd_iommu_alias_table[devid];
85
86 if (list_empty(&iommu_pd_list))
87 return NULL;
88
89 spin_lock_irqsave(&iommu_pd_list_lock, flags);
90
91 list_for_each_entry(entry, &iommu_pd_list, list) {
92 if (entry->target_dev == devid ||
93 entry->target_dev == alias) {
94 ret = entry;
95 break;
96 }
97 }
98
99 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
100
101 return ret;
102}
103
98fc5a69
JR
104/*
105 * This function checks if the driver got a valid device from the caller to
106 * avoid dereferencing invalid pointers.
107 */
108static bool check_device(struct device *dev)
109{
110 u16 devid;
111
112 if (!dev || !dev->dma_mask)
113 return false;
114
115 /* No device or no PCI device */
116 if (!dev || dev->bus != &pci_bus_type)
117 return false;
118
119 devid = get_device_id(dev);
120
121 /* Out of our scope? */
122 if (devid > amd_iommu_last_bdf)
123 return false;
124
125 if (amd_iommu_rlookup_table[devid] == NULL)
126 return false;
127
128 return true;
129}
130
7f26508b
JR
131#ifdef CONFIG_AMD_IOMMU_STATS
132
133/*
134 * Initialization code for statistics collection
135 */
136
da49f6df 137DECLARE_STATS_COUNTER(compl_wait);
0f2a86f2 138DECLARE_STATS_COUNTER(cnt_map_single);
146a6917 139DECLARE_STATS_COUNTER(cnt_unmap_single);
d03f067a 140DECLARE_STATS_COUNTER(cnt_map_sg);
55877a6b 141DECLARE_STATS_COUNTER(cnt_unmap_sg);
c8f0fb36 142DECLARE_STATS_COUNTER(cnt_alloc_coherent);
5d31ee7e 143DECLARE_STATS_COUNTER(cnt_free_coherent);
c1858976 144DECLARE_STATS_COUNTER(cross_page);
f57d98ae 145DECLARE_STATS_COUNTER(domain_flush_single);
18811f55 146DECLARE_STATS_COUNTER(domain_flush_all);
5774f7c5 147DECLARE_STATS_COUNTER(alloced_io_mem);
8ecaf8f1 148DECLARE_STATS_COUNTER(total_map_requests);
da49f6df 149
7f26508b
JR
150static struct dentry *stats_dir;
151static struct dentry *de_isolate;
152static struct dentry *de_fflush;
153
154static void amd_iommu_stats_add(struct __iommu_counter *cnt)
155{
156 if (stats_dir == NULL)
157 return;
158
159 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
160 &cnt->value);
161}
162
163static void amd_iommu_stats_init(void)
164{
165 stats_dir = debugfs_create_dir("amd-iommu", NULL);
166 if (stats_dir == NULL)
167 return;
168
169 de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
170 (u32 *)&amd_iommu_isolate);
171
172 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
173 (u32 *)&amd_iommu_unmap_flush);
da49f6df
JR
174
175 amd_iommu_stats_add(&compl_wait);
0f2a86f2 176 amd_iommu_stats_add(&cnt_map_single);
146a6917 177 amd_iommu_stats_add(&cnt_unmap_single);
d03f067a 178 amd_iommu_stats_add(&cnt_map_sg);
55877a6b 179 amd_iommu_stats_add(&cnt_unmap_sg);
c8f0fb36 180 amd_iommu_stats_add(&cnt_alloc_coherent);
5d31ee7e 181 amd_iommu_stats_add(&cnt_free_coherent);
c1858976 182 amd_iommu_stats_add(&cross_page);
f57d98ae 183 amd_iommu_stats_add(&domain_flush_single);
18811f55 184 amd_iommu_stats_add(&domain_flush_all);
5774f7c5 185 amd_iommu_stats_add(&alloced_io_mem);
8ecaf8f1 186 amd_iommu_stats_add(&total_map_requests);
7f26508b
JR
187}
188
189#endif
190
a80dc3e0
JR
191/****************************************************************************
192 *
193 * Interrupt handling functions
194 *
195 ****************************************************************************/
196
e3e59876
JR
197static void dump_dte_entry(u16 devid)
198{
199 int i;
200
201 for (i = 0; i < 8; ++i)
202 pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
203 amd_iommu_dev_table[devid].data[i]);
204}
205
945b4ac4
JR
206static void dump_command(unsigned long phys_addr)
207{
208 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
209 int i;
210
211 for (i = 0; i < 4; ++i)
212 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
213}
214
a345b23b 215static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
90008ee4
JR
216{
217 u32 *event = __evt;
218 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
219 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
220 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
221 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
222 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
223
4c6f40d4 224 printk(KERN_ERR "AMD-Vi: Event logged [");
90008ee4
JR
225
226 switch (type) {
227 case EVENT_TYPE_ILL_DEV:
228 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
229 "address=0x%016llx flags=0x%04x]\n",
230 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
231 address, flags);
e3e59876 232 dump_dte_entry(devid);
90008ee4
JR
233 break;
234 case EVENT_TYPE_IO_FAULT:
235 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
236 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
237 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
238 domid, address, flags);
239 break;
240 case EVENT_TYPE_DEV_TAB_ERR:
241 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
242 "address=0x%016llx flags=0x%04x]\n",
243 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
244 address, flags);
245 break;
246 case EVENT_TYPE_PAGE_TAB_ERR:
247 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
248 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
249 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
250 domid, address, flags);
251 break;
252 case EVENT_TYPE_ILL_CMD:
253 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
a345b23b 254 reset_iommu_command_buffer(iommu);
945b4ac4 255 dump_command(address);
90008ee4
JR
256 break;
257 case EVENT_TYPE_CMD_HARD_ERR:
258 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
259 "flags=0x%04x]\n", address, flags);
260 break;
261 case EVENT_TYPE_IOTLB_INV_TO:
262 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
263 "address=0x%016llx]\n",
264 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
265 address);
266 break;
267 case EVENT_TYPE_INV_DEV_REQ:
268 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
269 "address=0x%016llx flags=0x%04x]\n",
270 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
271 address, flags);
272 break;
273 default:
274 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
275 }
276}
277
278static void iommu_poll_events(struct amd_iommu *iommu)
279{
280 u32 head, tail;
281 unsigned long flags;
282
283 spin_lock_irqsave(&iommu->lock, flags);
284
285 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
286 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
287
288 while (head != tail) {
a345b23b 289 iommu_print_event(iommu, iommu->evt_buf + head);
90008ee4
JR
290 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
291 }
292
293 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
294
295 spin_unlock_irqrestore(&iommu->lock, flags);
296}
297
a80dc3e0
JR
298irqreturn_t amd_iommu_int_handler(int irq, void *data)
299{
90008ee4
JR
300 struct amd_iommu *iommu;
301
3bd22172 302 for_each_iommu(iommu)
90008ee4
JR
303 iommu_poll_events(iommu);
304
305 return IRQ_HANDLED;
a80dc3e0
JR
306}
307
431b2a20
JR
308/****************************************************************************
309 *
310 * IOMMU command queuing functions
311 *
312 ****************************************************************************/
313
314/*
315 * Writes the command to the IOMMUs command buffer and informs the
316 * hardware about the new command. Must be called with iommu->lock held.
317 */
d6449536 318static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
a19ae1ec
JR
319{
320 u32 tail, head;
321 u8 *target;
322
323 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
8a7c5ef3 324 target = iommu->cmd_buf + tail;
a19ae1ec
JR
325 memcpy_toio(target, cmd, sizeof(*cmd));
326 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
327 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
328 if (tail == head)
329 return -ENOMEM;
330 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
331
332 return 0;
333}
334
431b2a20
JR
335/*
336 * General queuing function for commands. Takes iommu->lock and calls
337 * __iommu_queue_command().
338 */
d6449536 339static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
a19ae1ec
JR
340{
341 unsigned long flags;
342 int ret;
343
344 spin_lock_irqsave(&iommu->lock, flags);
345 ret = __iommu_queue_command(iommu, cmd);
09ee17eb 346 if (!ret)
0cfd7aa9 347 iommu->need_sync = true;
a19ae1ec
JR
348 spin_unlock_irqrestore(&iommu->lock, flags);
349
350 return ret;
351}
352
8d201968
JR
353/*
354 * This function waits until an IOMMU has completed a completion
355 * wait command
356 */
357static void __iommu_wait_for_completion(struct amd_iommu *iommu)
358{
359 int ready = 0;
360 unsigned status = 0;
361 unsigned long i = 0;
362
da49f6df
JR
363 INC_STATS_COUNTER(compl_wait);
364
8d201968
JR
365 while (!ready && (i < EXIT_LOOP_COUNT)) {
366 ++i;
367 /* wait for the bit to become one */
368 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
369 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
370 }
371
372 /* set bit back to zero */
373 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
374 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
375
6a1eddd2
JR
376 if (unlikely(i == EXIT_LOOP_COUNT)) {
377 spin_unlock(&iommu->lock);
378 reset_iommu_command_buffer(iommu);
379 spin_lock(&iommu->lock);
380 }
8d201968
JR
381}
382
383/*
384 * This function queues a completion wait command into the command
385 * buffer of an IOMMU
386 */
387static int __iommu_completion_wait(struct amd_iommu *iommu)
388{
389 struct iommu_cmd cmd;
390
391 memset(&cmd, 0, sizeof(cmd));
392 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
393 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
394
395 return __iommu_queue_command(iommu, &cmd);
396}
397
431b2a20
JR
398/*
399 * This function is called whenever we need to ensure that the IOMMU has
400 * completed execution of all commands we sent. It sends a
401 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
402 * us about that by writing a value to a physical address we pass with
403 * the command.
404 */
a19ae1ec
JR
405static int iommu_completion_wait(struct amd_iommu *iommu)
406{
8d201968
JR
407 int ret = 0;
408 unsigned long flags;
a19ae1ec 409
7e4f88da
JR
410 spin_lock_irqsave(&iommu->lock, flags);
411
09ee17eb
JR
412 if (!iommu->need_sync)
413 goto out;
414
8d201968 415 ret = __iommu_completion_wait(iommu);
09ee17eb 416
0cfd7aa9 417 iommu->need_sync = false;
a19ae1ec
JR
418
419 if (ret)
7e4f88da 420 goto out;
a19ae1ec 421
8d201968 422 __iommu_wait_for_completion(iommu);
84df8175 423
7e4f88da
JR
424out:
425 spin_unlock_irqrestore(&iommu->lock, flags);
a19ae1ec
JR
426
427 return 0;
428}
429
0518a3a4
JR
430static void iommu_flush_complete(struct protection_domain *domain)
431{
432 int i;
433
434 for (i = 0; i < amd_iommus_present; ++i) {
435 if (!domain->dev_iommu[i])
436 continue;
437
438 /*
439 * Devices of this domain are behind this IOMMU
440 * We need to wait for completion of all commands.
441 */
442 iommu_completion_wait(amd_iommus[i]);
443 }
444}
445
431b2a20
JR
446/*
447 * Command send function for invalidating a device table entry
448 */
a19ae1ec
JR
449static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
450{
d6449536 451 struct iommu_cmd cmd;
ee2fa743 452 int ret;
a19ae1ec
JR
453
454 BUG_ON(iommu == NULL);
455
456 memset(&cmd, 0, sizeof(cmd));
457 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
458 cmd.data[0] = devid;
459
ee2fa743
JR
460 ret = iommu_queue_command(iommu, &cmd);
461
ee2fa743 462 return ret;
a19ae1ec
JR
463}
464
237b6f33
JR
465static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
466 u16 domid, int pde, int s)
467{
468 memset(cmd, 0, sizeof(*cmd));
469 address &= PAGE_MASK;
470 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
471 cmd->data[1] |= domid;
472 cmd->data[2] = lower_32_bits(address);
473 cmd->data[3] = upper_32_bits(address);
474 if (s) /* size bit - we flush more than one 4kb page */
475 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
476 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
477 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
478}
479
431b2a20
JR
480/*
481 * Generic command send function for invalidaing TLB entries
482 */
a19ae1ec
JR
483static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
484 u64 address, u16 domid, int pde, int s)
485{
d6449536 486 struct iommu_cmd cmd;
ee2fa743 487 int ret;
a19ae1ec 488
237b6f33 489 __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
a19ae1ec 490
ee2fa743
JR
491 ret = iommu_queue_command(iommu, &cmd);
492
ee2fa743 493 return ret;
a19ae1ec
JR
494}
495
431b2a20
JR
496/*
497 * TLB invalidation function which is called from the mapping functions.
498 * It invalidates a single PTE if the range to flush is within a single
499 * page. Otherwise it flushes the whole TLB of the IOMMU.
500 */
6de8ad9b
JR
501static void __iommu_flush_pages(struct protection_domain *domain,
502 u64 address, size_t size, int pde)
a19ae1ec 503{
6de8ad9b 504 int s = 0, i;
dcd1e92e 505 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
a19ae1ec
JR
506
507 address &= PAGE_MASK;
508
999ba417
JR
509 if (pages > 1) {
510 /*
511 * If we have to flush more than one page, flush all
512 * TLB entries for this domain
513 */
514 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
515 s = 1;
a19ae1ec
JR
516 }
517
999ba417 518
6de8ad9b
JR
519 for (i = 0; i < amd_iommus_present; ++i) {
520 if (!domain->dev_iommu[i])
521 continue;
522
523 /*
524 * Devices of this domain are behind this IOMMU
525 * We need a TLB flush
526 */
527 iommu_queue_inv_iommu_pages(amd_iommus[i], address,
528 domain->id, pde, s);
529 }
530
531 return;
532}
533
534static void iommu_flush_pages(struct protection_domain *domain,
535 u64 address, size_t size)
536{
537 __iommu_flush_pages(domain, address, size, 0);
a19ae1ec 538}
b6c02715 539
1c655773 540/* Flush the whole IO/TLB for a given protection domain */
dcd1e92e 541static void iommu_flush_tlb(struct protection_domain *domain)
1c655773 542{
dcd1e92e 543 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1c655773
JR
544}
545
42a49f96 546/* Flush the whole IO/TLB for a given protection domain - including PDE */
dcd1e92e 547static void iommu_flush_tlb_pde(struct protection_domain *domain)
42a49f96 548{
dcd1e92e 549 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
42a49f96
CW
550}
551
43f49609 552/*
09b42804 553 * This function flushes all domains that have devices on the given IOMMU
43f49609 554 */
09b42804 555static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
43f49609 556{
09b42804
JR
557 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
558 struct protection_domain *domain;
e394d72a 559 unsigned long flags;
18811f55 560
09b42804 561 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
bfd1be18 562
09b42804
JR
563 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
564 if (domain->dev_iommu[iommu->index] == 0)
bfd1be18 565 continue;
09b42804
JR
566
567 spin_lock(&domain->lock);
568 iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
569 iommu_flush_complete(domain);
570 spin_unlock(&domain->lock);
bfd1be18 571 }
e394d72a 572
09b42804 573 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
e394d72a
JR
574}
575
09b42804
JR
576/*
577 * This function uses heavy locking and may disable irqs for some time. But
578 * this is no issue because it is only called during resume.
579 */
bfd1be18 580void amd_iommu_flush_all_domains(void)
e394d72a 581{
e3306664 582 struct protection_domain *domain;
09b42804
JR
583 unsigned long flags;
584
585 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
e394d72a 586
e3306664 587 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
09b42804 588 spin_lock(&domain->lock);
e3306664
JR
589 iommu_flush_tlb_pde(domain);
590 iommu_flush_complete(domain);
09b42804 591 spin_unlock(&domain->lock);
e3306664 592 }
09b42804
JR
593
594 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
bfd1be18
JR
595}
596
d586d785 597static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
bfd1be18
JR
598{
599 int i;
600
d586d785
JR
601 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
602 if (iommu != amd_iommu_rlookup_table[i])
bfd1be18 603 continue;
d586d785
JR
604
605 iommu_queue_inv_dev_entry(iommu, i);
606 iommu_completion_wait(iommu);
bfd1be18
JR
607 }
608}
609
6a0dbcbe 610static void flush_devices_by_domain(struct protection_domain *domain)
7d7a110c
JR
611{
612 struct amd_iommu *iommu;
613 int i;
614
615 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
6a0dbcbe
JR
616 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
617 (amd_iommu_pd_table[i] != domain))
7d7a110c
JR
618 continue;
619
620 iommu = amd_iommu_rlookup_table[i];
621 if (!iommu)
622 continue;
623
624 iommu_queue_inv_dev_entry(iommu, i);
625 iommu_completion_wait(iommu);
626 }
627}
628
a345b23b
JR
629static void reset_iommu_command_buffer(struct amd_iommu *iommu)
630{
631 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
632
b26e81b8
JR
633 if (iommu->reset_in_progress)
634 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
635
636 iommu->reset_in_progress = true;
637
a345b23b
JR
638 amd_iommu_reset_cmd_buffer(iommu);
639 flush_all_devices_for_iommu(iommu);
640 flush_all_domains_on_iommu(iommu);
b26e81b8
JR
641
642 iommu->reset_in_progress = false;
a345b23b
JR
643}
644
6a0dbcbe
JR
645void amd_iommu_flush_all_devices(void)
646{
647 flush_devices_by_domain(NULL);
648}
649
431b2a20
JR
650/****************************************************************************
651 *
652 * The functions below are used the create the page table mappings for
653 * unity mapped regions.
654 *
655 ****************************************************************************/
656
308973d3
JR
657/*
658 * This function is used to add another level to an IO page table. Adding
659 * another level increases the size of the address space by 9 bits to a size up
660 * to 64 bits.
661 */
662static bool increase_address_space(struct protection_domain *domain,
663 gfp_t gfp)
664{
665 u64 *pte;
666
667 if (domain->mode == PAGE_MODE_6_LEVEL)
668 /* address space already 64 bit large */
669 return false;
670
671 pte = (void *)get_zeroed_page(gfp);
672 if (!pte)
673 return false;
674
675 *pte = PM_LEVEL_PDE(domain->mode,
676 virt_to_phys(domain->pt_root));
677 domain->pt_root = pte;
678 domain->mode += 1;
679 domain->updated = true;
680
681 return true;
682}
683
684static u64 *alloc_pte(struct protection_domain *domain,
685 unsigned long address,
686 int end_lvl,
687 u64 **pte_page,
688 gfp_t gfp)
689{
690 u64 *pte, *page;
691 int level;
692
693 while (address > PM_LEVEL_SIZE(domain->mode))
694 increase_address_space(domain, gfp);
695
696 level = domain->mode - 1;
697 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
698
699 while (level > end_lvl) {
700 if (!IOMMU_PTE_PRESENT(*pte)) {
701 page = (u64 *)get_zeroed_page(gfp);
702 if (!page)
703 return NULL;
704 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
705 }
706
707 level -= 1;
708
709 pte = IOMMU_PTE_PAGE(*pte);
710
711 if (pte_page && level == end_lvl)
712 *pte_page = pte;
713
714 pte = &pte[PM_LEVEL_INDEX(level, address)];
715 }
716
717 return pte;
718}
719
720/*
721 * This function checks if there is a PTE for a given dma address. If
722 * there is one, it returns the pointer to it.
723 */
724static u64 *fetch_pte(struct protection_domain *domain,
725 unsigned long address, int map_size)
726{
727 int level;
728 u64 *pte;
729
730 level = domain->mode - 1;
731 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
732
733 while (level > map_size) {
734 if (!IOMMU_PTE_PRESENT(*pte))
735 return NULL;
736
737 level -= 1;
738
739 pte = IOMMU_PTE_PAGE(*pte);
740 pte = &pte[PM_LEVEL_INDEX(level, address)];
741
742 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
743 pte = NULL;
744 break;
745 }
746 }
747
748 return pte;
749}
750
431b2a20
JR
751/*
752 * Generic mapping functions. It maps a physical address into a DMA
753 * address space. It allocates the page table pages if necessary.
754 * In the future it can be extended to a generic mapping function
755 * supporting all features of AMD IOMMU page tables like level skipping
756 * and full 64 bit address spaces.
757 */
38e817fe
JR
758static int iommu_map_page(struct protection_domain *dom,
759 unsigned long bus_addr,
760 unsigned long phys_addr,
abdc5eb3
JR
761 int prot,
762 int map_size)
bd0e5211 763{
8bda3092 764 u64 __pte, *pte;
bd0e5211
JR
765
766 bus_addr = PAGE_ALIGN(bus_addr);
bb9d4ff8 767 phys_addr = PAGE_ALIGN(phys_addr);
bd0e5211 768
abdc5eb3
JR
769 BUG_ON(!PM_ALIGNED(map_size, bus_addr));
770 BUG_ON(!PM_ALIGNED(map_size, phys_addr));
771
bad1cac2 772 if (!(prot & IOMMU_PROT_MASK))
bd0e5211
JR
773 return -EINVAL;
774
abdc5eb3 775 pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
bd0e5211
JR
776
777 if (IOMMU_PTE_PRESENT(*pte))
778 return -EBUSY;
779
780 __pte = phys_addr | IOMMU_PTE_P;
781 if (prot & IOMMU_PROT_IR)
782 __pte |= IOMMU_PTE_IR;
783 if (prot & IOMMU_PROT_IW)
784 __pte |= IOMMU_PTE_IW;
785
786 *pte = __pte;
787
04bfdd84
JR
788 update_domain(dom);
789
bd0e5211
JR
790 return 0;
791}
792
eb74ff6c 793static void iommu_unmap_page(struct protection_domain *dom,
a6b256b4 794 unsigned long bus_addr, int map_size)
eb74ff6c 795{
a6b256b4 796 u64 *pte = fetch_pte(dom, bus_addr, map_size);
eb74ff6c 797
38a76eee
JR
798 if (pte)
799 *pte = 0;
eb74ff6c 800}
eb74ff6c 801
431b2a20
JR
802/*
803 * This function checks if a specific unity mapping entry is needed for
804 * this specific IOMMU.
805 */
bd0e5211
JR
806static int iommu_for_unity_map(struct amd_iommu *iommu,
807 struct unity_map_entry *entry)
808{
809 u16 bdf, i;
810
811 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
812 bdf = amd_iommu_alias_table[i];
813 if (amd_iommu_rlookup_table[bdf] == iommu)
814 return 1;
815 }
816
817 return 0;
818}
819
431b2a20
JR
820/*
821 * This function actually applies the mapping to the page table of the
822 * dma_ops domain.
823 */
bd0e5211
JR
824static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
825 struct unity_map_entry *e)
826{
827 u64 addr;
828 int ret;
829
830 for (addr = e->address_start; addr < e->address_end;
831 addr += PAGE_SIZE) {
abdc5eb3
JR
832 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
833 PM_MAP_4k);
bd0e5211
JR
834 if (ret)
835 return ret;
836 /*
837 * if unity mapping is in aperture range mark the page
838 * as allocated in the aperture
839 */
840 if (addr < dma_dom->aperture_size)
c3239567 841 __set_bit(addr >> PAGE_SHIFT,
384de729 842 dma_dom->aperture[0]->bitmap);
bd0e5211
JR
843 }
844
845 return 0;
846}
847
171e7b37
JR
848/*
849 * Init the unity mappings for a specific IOMMU in the system
850 *
851 * Basically iterates over all unity mapping entries and applies them to
852 * the default domain DMA of that IOMMU if necessary.
853 */
854static int iommu_init_unity_mappings(struct amd_iommu *iommu)
855{
856 struct unity_map_entry *entry;
857 int ret;
858
859 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
860 if (!iommu_for_unity_map(iommu, entry))
861 continue;
862 ret = dma_ops_unity_map(iommu->default_dom, entry);
863 if (ret)
864 return ret;
865 }
866
867 return 0;
868}
869
431b2a20
JR
870/*
871 * Inits the unity mappings required for a specific device
872 */
bd0e5211
JR
873static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
874 u16 devid)
875{
876 struct unity_map_entry *e;
877 int ret;
878
879 list_for_each_entry(e, &amd_iommu_unity_map, list) {
880 if (!(devid >= e->devid_start && devid <= e->devid_end))
881 continue;
882 ret = dma_ops_unity_map(dma_dom, e);
883 if (ret)
884 return ret;
885 }
886
887 return 0;
888}
889
431b2a20
JR
890/****************************************************************************
891 *
892 * The next functions belong to the address allocator for the dma_ops
893 * interface functions. They work like the allocators in the other IOMMU
894 * drivers. Its basically a bitmap which marks the allocated pages in
895 * the aperture. Maybe it could be enhanced in the future to a more
896 * efficient allocator.
897 *
898 ****************************************************************************/
d3086444 899
431b2a20 900/*
384de729 901 * The address allocator core functions.
431b2a20
JR
902 *
903 * called with domain->lock held
904 */
384de729 905
171e7b37
JR
906/*
907 * Used to reserve address ranges in the aperture (e.g. for exclusion
908 * ranges.
909 */
910static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
911 unsigned long start_page,
912 unsigned int pages)
913{
914 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
915
916 if (start_page + pages > last_page)
917 pages = last_page - start_page;
918
919 for (i = start_page; i < start_page + pages; ++i) {
920 int index = i / APERTURE_RANGE_PAGES;
921 int page = i % APERTURE_RANGE_PAGES;
922 __set_bit(page, dom->aperture[index]->bitmap);
923 }
924}
925
9cabe89b
JR
926/*
927 * This function is used to add a new aperture range to an existing
928 * aperture in case of dma_ops domain allocation or address allocation
929 * failure.
930 */
576175c2 931static int alloc_new_range(struct dma_ops_domain *dma_dom,
9cabe89b
JR
932 bool populate, gfp_t gfp)
933{
934 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
576175c2 935 struct amd_iommu *iommu;
00cd122a 936 int i;
9cabe89b 937
f5e9705c
JR
938#ifdef CONFIG_IOMMU_STRESS
939 populate = false;
940#endif
941
9cabe89b
JR
942 if (index >= APERTURE_MAX_RANGES)
943 return -ENOMEM;
944
945 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
946 if (!dma_dom->aperture[index])
947 return -ENOMEM;
948
949 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
950 if (!dma_dom->aperture[index]->bitmap)
951 goto out_free;
952
953 dma_dom->aperture[index]->offset = dma_dom->aperture_size;
954
955 if (populate) {
956 unsigned long address = dma_dom->aperture_size;
957 int i, num_ptes = APERTURE_RANGE_PAGES / 512;
958 u64 *pte, *pte_page;
959
960 for (i = 0; i < num_ptes; ++i) {
abdc5eb3 961 pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
9cabe89b
JR
962 &pte_page, gfp);
963 if (!pte)
964 goto out_free;
965
966 dma_dom->aperture[index]->pte_pages[i] = pte_page;
967
968 address += APERTURE_RANGE_SIZE / 64;
969 }
970 }
971
972 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
973
00cd122a 974 /* Intialize the exclusion range if necessary */
576175c2
JR
975 for_each_iommu(iommu) {
976 if (iommu->exclusion_start &&
977 iommu->exclusion_start >= dma_dom->aperture[index]->offset
978 && iommu->exclusion_start < dma_dom->aperture_size) {
979 unsigned long startpage;
980 int pages = iommu_num_pages(iommu->exclusion_start,
981 iommu->exclusion_length,
982 PAGE_SIZE);
983 startpage = iommu->exclusion_start >> PAGE_SHIFT;
984 dma_ops_reserve_addresses(dma_dom, startpage, pages);
985 }
00cd122a
JR
986 }
987
988 /*
989 * Check for areas already mapped as present in the new aperture
990 * range and mark those pages as reserved in the allocator. Such
991 * mappings may already exist as a result of requested unity
992 * mappings for devices.
993 */
994 for (i = dma_dom->aperture[index]->offset;
995 i < dma_dom->aperture_size;
996 i += PAGE_SIZE) {
a6b256b4 997 u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
00cd122a
JR
998 if (!pte || !IOMMU_PTE_PRESENT(*pte))
999 continue;
1000
1001 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
1002 }
1003
04bfdd84
JR
1004 update_domain(&dma_dom->domain);
1005
9cabe89b
JR
1006 return 0;
1007
1008out_free:
04bfdd84
JR
1009 update_domain(&dma_dom->domain);
1010
9cabe89b
JR
1011 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
1012
1013 kfree(dma_dom->aperture[index]);
1014 dma_dom->aperture[index] = NULL;
1015
1016 return -ENOMEM;
1017}
1018
384de729
JR
1019static unsigned long dma_ops_area_alloc(struct device *dev,
1020 struct dma_ops_domain *dom,
1021 unsigned int pages,
1022 unsigned long align_mask,
1023 u64 dma_mask,
1024 unsigned long start)
1025{
803b8cb4 1026 unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
384de729
JR
1027 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1028 int i = start >> APERTURE_RANGE_SHIFT;
1029 unsigned long boundary_size;
1030 unsigned long address = -1;
1031 unsigned long limit;
1032
803b8cb4
JR
1033 next_bit >>= PAGE_SHIFT;
1034
384de729
JR
1035 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1036 PAGE_SIZE) >> PAGE_SHIFT;
1037
1038 for (;i < max_index; ++i) {
1039 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
1040
1041 if (dom->aperture[i]->offset >= dma_mask)
1042 break;
1043
1044 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1045 dma_mask >> PAGE_SHIFT);
1046
1047 address = iommu_area_alloc(dom->aperture[i]->bitmap,
1048 limit, next_bit, pages, 0,
1049 boundary_size, align_mask);
1050 if (address != -1) {
1051 address = dom->aperture[i]->offset +
1052 (address << PAGE_SHIFT);
803b8cb4 1053 dom->next_address = address + (pages << PAGE_SHIFT);
384de729
JR
1054 break;
1055 }
1056
1057 next_bit = 0;
1058 }
1059
1060 return address;
1061}
1062
d3086444
JR
1063static unsigned long dma_ops_alloc_addresses(struct device *dev,
1064 struct dma_ops_domain *dom,
6d4f343f 1065 unsigned int pages,
832a90c3
JR
1066 unsigned long align_mask,
1067 u64 dma_mask)
d3086444 1068{
d3086444 1069 unsigned long address;
d3086444 1070
fe16f088
JR
1071#ifdef CONFIG_IOMMU_STRESS
1072 dom->next_address = 0;
1073 dom->need_flush = true;
1074#endif
d3086444 1075
384de729 1076 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
803b8cb4 1077 dma_mask, dom->next_address);
d3086444 1078
1c655773 1079 if (address == -1) {
803b8cb4 1080 dom->next_address = 0;
384de729
JR
1081 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1082 dma_mask, 0);
1c655773
JR
1083 dom->need_flush = true;
1084 }
d3086444 1085
384de729 1086 if (unlikely(address == -1))
8fd524b3 1087 address = DMA_ERROR_CODE;
d3086444
JR
1088
1089 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1090
1091 return address;
1092}
1093
431b2a20
JR
1094/*
1095 * The address free function.
1096 *
1097 * called with domain->lock held
1098 */
d3086444
JR
1099static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1100 unsigned long address,
1101 unsigned int pages)
1102{
384de729
JR
1103 unsigned i = address >> APERTURE_RANGE_SHIFT;
1104 struct aperture_range *range = dom->aperture[i];
80be308d 1105
384de729
JR
1106 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1107
47bccd6b
JR
1108#ifdef CONFIG_IOMMU_STRESS
1109 if (i < 4)
1110 return;
1111#endif
80be308d 1112
803b8cb4 1113 if (address >= dom->next_address)
80be308d 1114 dom->need_flush = true;
384de729
JR
1115
1116 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
803b8cb4 1117
384de729
JR
1118 iommu_area_free(range->bitmap, address, pages);
1119
d3086444
JR
1120}
1121
431b2a20
JR
1122/****************************************************************************
1123 *
1124 * The next functions belong to the domain allocation. A domain is
1125 * allocated for every IOMMU as the default domain. If device isolation
1126 * is enabled, every device get its own domain. The most important thing
1127 * about domains is the page table mapping the DMA address space they
1128 * contain.
1129 *
1130 ****************************************************************************/
1131
aeb26f55
JR
1132/*
1133 * This function adds a protection domain to the global protection domain list
1134 */
1135static void add_domain_to_list(struct protection_domain *domain)
1136{
1137 unsigned long flags;
1138
1139 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1140 list_add(&domain->list, &amd_iommu_pd_list);
1141 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1142}
1143
1144/*
1145 * This function removes a protection domain to the global
1146 * protection domain list
1147 */
1148static void del_domain_from_list(struct protection_domain *domain)
1149{
1150 unsigned long flags;
1151
1152 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1153 list_del(&domain->list);
1154 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1155}
1156
ec487d1a
JR
1157static u16 domain_id_alloc(void)
1158{
1159 unsigned long flags;
1160 int id;
1161
1162 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1163 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1164 BUG_ON(id == 0);
1165 if (id > 0 && id < MAX_DOMAIN_ID)
1166 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1167 else
1168 id = 0;
1169 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1170
1171 return id;
1172}
1173
a2acfb75
JR
1174static void domain_id_free(int id)
1175{
1176 unsigned long flags;
1177
1178 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1179 if (id > 0 && id < MAX_DOMAIN_ID)
1180 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1181 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1182}
a2acfb75 1183
86db2e5d 1184static void free_pagetable(struct protection_domain *domain)
ec487d1a
JR
1185{
1186 int i, j;
1187 u64 *p1, *p2, *p3;
1188
86db2e5d 1189 p1 = domain->pt_root;
ec487d1a
JR
1190
1191 if (!p1)
1192 return;
1193
1194 for (i = 0; i < 512; ++i) {
1195 if (!IOMMU_PTE_PRESENT(p1[i]))
1196 continue;
1197
1198 p2 = IOMMU_PTE_PAGE(p1[i]);
3cc3d84b 1199 for (j = 0; j < 512; ++j) {
ec487d1a
JR
1200 if (!IOMMU_PTE_PRESENT(p2[j]))
1201 continue;
1202 p3 = IOMMU_PTE_PAGE(p2[j]);
1203 free_page((unsigned long)p3);
1204 }
1205
1206 free_page((unsigned long)p2);
1207 }
1208
1209 free_page((unsigned long)p1);
86db2e5d
JR
1210
1211 domain->pt_root = NULL;
ec487d1a
JR
1212}
1213
431b2a20
JR
1214/*
1215 * Free a domain, only used if something went wrong in the
1216 * allocation path and we need to free an already allocated page table
1217 */
ec487d1a
JR
1218static void dma_ops_domain_free(struct dma_ops_domain *dom)
1219{
384de729
JR
1220 int i;
1221
ec487d1a
JR
1222 if (!dom)
1223 return;
1224
aeb26f55
JR
1225 del_domain_from_list(&dom->domain);
1226
86db2e5d 1227 free_pagetable(&dom->domain);
ec487d1a 1228
384de729
JR
1229 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1230 if (!dom->aperture[i])
1231 continue;
1232 free_page((unsigned long)dom->aperture[i]->bitmap);
1233 kfree(dom->aperture[i]);
1234 }
ec487d1a
JR
1235
1236 kfree(dom);
1237}
1238
431b2a20
JR
1239/*
1240 * Allocates a new protection domain usable for the dma_ops functions.
1241 * It also intializes the page table and the address allocator data
1242 * structures required for the dma_ops interface
1243 */
87a64d52 1244static struct dma_ops_domain *dma_ops_domain_alloc(void)
ec487d1a
JR
1245{
1246 struct dma_ops_domain *dma_dom;
ec487d1a
JR
1247
1248 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1249 if (!dma_dom)
1250 return NULL;
1251
1252 spin_lock_init(&dma_dom->domain.lock);
1253
1254 dma_dom->domain.id = domain_id_alloc();
1255 if (dma_dom->domain.id == 0)
1256 goto free_dma_dom;
8f7a017c 1257 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
ec487d1a 1258 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
9fdb19d6 1259 dma_dom->domain.flags = PD_DMA_OPS_MASK;
ec487d1a
JR
1260 dma_dom->domain.priv = dma_dom;
1261 if (!dma_dom->domain.pt_root)
1262 goto free_dma_dom;
ec487d1a 1263
1c655773 1264 dma_dom->need_flush = false;
bd60b735 1265 dma_dom->target_dev = 0xffff;
1c655773 1266
aeb26f55
JR
1267 add_domain_to_list(&dma_dom->domain);
1268
576175c2 1269 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
ec487d1a 1270 goto free_dma_dom;
ec487d1a 1271
431b2a20 1272 /*
ec487d1a
JR
1273 * mark the first page as allocated so we never return 0 as
1274 * a valid dma-address. So we can use 0 as error value
431b2a20 1275 */
384de729 1276 dma_dom->aperture[0]->bitmap[0] = 1;
803b8cb4 1277 dma_dom->next_address = 0;
ec487d1a 1278
ec487d1a
JR
1279
1280 return dma_dom;
1281
1282free_dma_dom:
1283 dma_ops_domain_free(dma_dom);
1284
1285 return NULL;
1286}
1287
5b28df6f
JR
1288/*
1289 * little helper function to check whether a given protection domain is a
1290 * dma_ops domain
1291 */
1292static bool dma_ops_domain(struct protection_domain *domain)
1293{
1294 return domain->flags & PD_DMA_OPS_MASK;
1295}
1296
407d733e 1297static void set_dte_entry(u16 devid, struct protection_domain *domain)
b20ac0d4 1298{
15898bbc 1299 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
b20ac0d4 1300 u64 pte_root = virt_to_phys(domain->pt_root);
863c74eb 1301
15898bbc
JR
1302 BUG_ON(amd_iommu_pd_table[devid] != NULL);
1303
38ddf41b
JR
1304 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1305 << DEV_ENTRY_MODE_SHIFT;
1306 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
b20ac0d4 1307
b20ac0d4 1308 amd_iommu_dev_table[devid].data[2] = domain->id;
aa879fff
JR
1309 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1310 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
b20ac0d4
JR
1311
1312 amd_iommu_pd_table[devid] = domain;
15898bbc
JR
1313
1314 /* Do reference counting */
1315 domain->dev_iommu[iommu->index] += 1;
1316 domain->dev_cnt += 1;
1317
1318 /* Flush the changes DTE entry */
1319 iommu_queue_inv_dev_entry(iommu, devid);
1320}
1321
1322static void clear_dte_entry(u16 devid)
1323{
1324 struct protection_domain *domain = amd_iommu_pd_table[devid];
1325 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1326
1327 BUG_ON(domain == NULL);
1328
1329 /* remove domain from the lookup table */
1330 amd_iommu_pd_table[devid] = NULL;
1331
1332 /* remove entry from the device table seen by the hardware */
1333 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1334 amd_iommu_dev_table[devid].data[1] = 0;
1335 amd_iommu_dev_table[devid].data[2] = 0;
1336
1337 amd_iommu_apply_erratum_63(devid);
1338
1339 /* decrease reference counters */
1340 domain->dev_iommu[iommu->index] -= 1;
1341 domain->dev_cnt -= 1;
1342
1343 iommu_queue_inv_dev_entry(iommu, devid);
2b681faf
JR
1344}
1345
1346/*
1347 * If a device is not yet associated with a domain, this function does
1348 * assigns it visible for the hardware
1349 */
15898bbc
JR
1350static int __attach_device(struct device *dev,
1351 struct protection_domain *domain)
2b681faf 1352{
15898bbc
JR
1353 u16 devid = get_device_id(dev);
1354 u16 alias = amd_iommu_alias_table[devid];
1355
2b681faf
JR
1356 /* lock domain */
1357 spin_lock(&domain->lock);
1358
15898bbc
JR
1359 /* Some sanity checks */
1360 if (amd_iommu_pd_table[alias] != NULL &&
1361 amd_iommu_pd_table[alias] != domain)
1362 return -EBUSY;
eba6ac60 1363
15898bbc
JR
1364 if (amd_iommu_pd_table[devid] != NULL &&
1365 amd_iommu_pd_table[devid] != domain)
1366 return -EBUSY;
1367
1368 /* Do real assignment */
1369 if (alias != devid &&
1370 amd_iommu_pd_table[alias] == NULL)
1371 set_dte_entry(alias, domain);
1372
1373 if (amd_iommu_pd_table[devid] == NULL)
1374 set_dte_entry(devid, domain);
eba6ac60
JR
1375
1376 /* ready */
1377 spin_unlock(&domain->lock);
15898bbc
JR
1378
1379 return 0;
0feae533 1380}
b20ac0d4 1381
407d733e
JR
1382/*
1383 * If a device is not yet associated with a domain, this function does
1384 * assigns it visible for the hardware
1385 */
15898bbc
JR
1386static int attach_device(struct device *dev,
1387 struct protection_domain *domain)
0feae533 1388{
eba6ac60 1389 unsigned long flags;
15898bbc 1390 int ret;
eba6ac60
JR
1391
1392 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
15898bbc 1393 ret = __attach_device(dev, domain);
b20ac0d4
JR
1394 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1395
0feae533
JR
1396 /*
1397 * We might boot into a crash-kernel here. The crashed kernel
1398 * left the caches in the IOMMU dirty. So we have to flush
1399 * here to evict all dirty stuff.
1400 */
dcd1e92e 1401 iommu_flush_tlb_pde(domain);
15898bbc
JR
1402
1403 return ret;
b20ac0d4
JR
1404}
1405
355bf553
JR
1406/*
1407 * Removes a device from a protection domain (unlocked)
1408 */
15898bbc 1409static void __detach_device(struct device *dev)
355bf553 1410{
15898bbc 1411 u16 devid = get_device_id(dev);
c4596114
JR
1412 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1413
1414 BUG_ON(!iommu);
355bf553 1415
15898bbc 1416 clear_dte_entry(devid);
21129f78
JR
1417
1418 /*
1419 * If we run in passthrough mode the device must be assigned to the
1420 * passthrough domain if it is detached from any other domain
1421 */
15898bbc
JR
1422 if (iommu_pass_through)
1423 __attach_device(dev, pt_domain);
355bf553
JR
1424}
1425
1426/*
1427 * Removes a device from a protection domain (with devtable_lock held)
1428 */
15898bbc 1429static void detach_device(struct device *dev)
355bf553
JR
1430{
1431 unsigned long flags;
1432
1433 /* lock device table */
1434 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
15898bbc 1435 __detach_device(dev);
355bf553
JR
1436 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1437}
e275a2a0 1438
15898bbc
JR
1439/*
1440 * Find out the protection domain structure for a given PCI device. This
1441 * will give us the pointer to the page table root for example.
1442 */
1443static struct protection_domain *domain_for_device(struct device *dev)
1444{
1445 struct protection_domain *dom;
1446 unsigned long flags;
1447 u16 devid, alias;
1448
1449 devid = get_device_id(dev);
1450 alias = amd_iommu_alias_table[devid];
1451
1452 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1453 dom = amd_iommu_pd_table[devid];
1454 if (dom == NULL &&
1455 amd_iommu_pd_table[alias] != NULL) {
1456 __attach_device(dev, amd_iommu_pd_table[alias]);
1457 dom = amd_iommu_pd_table[devid];
1458 }
1459
1460 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1461
1462 return dom;
1463}
1464
e275a2a0
JR
1465static int device_change_notifier(struct notifier_block *nb,
1466 unsigned long action, void *data)
1467{
1468 struct device *dev = data;
98fc5a69 1469 u16 devid;
e275a2a0
JR
1470 struct protection_domain *domain;
1471 struct dma_ops_domain *dma_domain;
1472 struct amd_iommu *iommu;
1ac4cbbc 1473 unsigned long flags;
e275a2a0 1474
98fc5a69
JR
1475 if (!check_device(dev))
1476 return 0;
e275a2a0 1477
98fc5a69
JR
1478 devid = get_device_id(dev);
1479 iommu = amd_iommu_rlookup_table[devid];
15898bbc 1480 domain = domain_for_device(dev);
e275a2a0
JR
1481
1482 if (domain && !dma_ops_domain(domain))
1483 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
1484 "to a non-dma-ops domain\n", dev_name(dev));
1485
1486 switch (action) {
c1eee67b 1487 case BUS_NOTIFY_UNBOUND_DRIVER:
e275a2a0
JR
1488 if (!domain)
1489 goto out;
a1ca331c
JR
1490 if (iommu_pass_through)
1491 break;
15898bbc 1492 detach_device(dev);
1ac4cbbc
JR
1493 break;
1494 case BUS_NOTIFY_ADD_DEVICE:
1495 /* allocate a protection domain if a device is added */
1496 dma_domain = find_protection_domain(devid);
1497 if (dma_domain)
1498 goto out;
87a64d52 1499 dma_domain = dma_ops_domain_alloc();
1ac4cbbc
JR
1500 if (!dma_domain)
1501 goto out;
1502 dma_domain->target_dev = devid;
1503
1504 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1505 list_add_tail(&dma_domain->list, &iommu_pd_list);
1506 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1507
e275a2a0
JR
1508 break;
1509 default:
1510 goto out;
1511 }
1512
1513 iommu_queue_inv_dev_entry(iommu, devid);
1514 iommu_completion_wait(iommu);
1515
1516out:
1517 return 0;
1518}
1519
b25ae679 1520static struct notifier_block device_nb = {
e275a2a0
JR
1521 .notifier_call = device_change_notifier,
1522};
355bf553 1523
431b2a20
JR
1524/*****************************************************************************
1525 *
1526 * The next functions belong to the dma_ops mapping/unmapping code.
1527 *
1528 *****************************************************************************/
1529
1530/*
1531 * In the dma_ops path we only have the struct device. This function
1532 * finds the corresponding IOMMU, the protection domain and the
1533 * requestor id for a given device.
1534 * If the device is not yet associated with a domain this is also done
1535 * in this function.
1536 */
94f6d190 1537static struct protection_domain *get_domain(struct device *dev)
b20ac0d4 1538{
94f6d190 1539 struct protection_domain *domain;
b20ac0d4 1540 struct dma_ops_domain *dma_dom;
94f6d190 1541 u16 devid = get_device_id(dev);
b20ac0d4 1542
f99c0f1c 1543 if (!check_device(dev))
94f6d190 1544 return ERR_PTR(-EINVAL);
b20ac0d4 1545
94f6d190
JR
1546 domain = domain_for_device(dev);
1547 if (domain != NULL && !dma_ops_domain(domain))
1548 return ERR_PTR(-EBUSY);
f99c0f1c 1549
94f6d190
JR
1550 if (domain != NULL)
1551 return domain;
b20ac0d4 1552
15898bbc 1553 /* Device not bount yet - bind it */
94f6d190 1554 dma_dom = find_protection_domain(devid);
15898bbc 1555 if (!dma_dom)
94f6d190
JR
1556 dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
1557 attach_device(dev, &dma_dom->domain);
15898bbc 1558 DUMP_printk("Using protection domain %d for device %s\n",
94f6d190 1559 dma_dom->domain.id, dev_name(dev));
f91ba190 1560
94f6d190 1561 return &dma_dom->domain;
b20ac0d4
JR
1562}
1563
04bfdd84
JR
1564static void update_device_table(struct protection_domain *domain)
1565{
2b681faf 1566 unsigned long flags;
04bfdd84
JR
1567 int i;
1568
1569 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
1570 if (amd_iommu_pd_table[i] != domain)
1571 continue;
2b681faf 1572 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
04bfdd84 1573 set_dte_entry(i, domain);
2b681faf 1574 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
04bfdd84
JR
1575 }
1576}
1577
1578static void update_domain(struct protection_domain *domain)
1579{
1580 if (!domain->updated)
1581 return;
1582
1583 update_device_table(domain);
1584 flush_devices_by_domain(domain);
601367d7 1585 iommu_flush_tlb_pde(domain);
04bfdd84
JR
1586
1587 domain->updated = false;
1588}
1589
8bda3092
JR
1590/*
1591 * This function fetches the PTE for a given address in the aperture
1592 */
1593static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1594 unsigned long address)
1595{
384de729 1596 struct aperture_range *aperture;
8bda3092
JR
1597 u64 *pte, *pte_page;
1598
384de729
JR
1599 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1600 if (!aperture)
1601 return NULL;
1602
1603 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
8bda3092 1604 if (!pte) {
abdc5eb3
JR
1605 pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
1606 GFP_ATOMIC);
384de729
JR
1607 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1608 } else
8c8c143c 1609 pte += PM_LEVEL_INDEX(0, address);
8bda3092 1610
04bfdd84 1611 update_domain(&dom->domain);
8bda3092
JR
1612
1613 return pte;
1614}
1615
431b2a20
JR
1616/*
1617 * This is the generic map function. It maps one 4kb page at paddr to
1618 * the given address in the DMA address space for the domain.
1619 */
680525e0 1620static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
cb76c322
JR
1621 unsigned long address,
1622 phys_addr_t paddr,
1623 int direction)
1624{
1625 u64 *pte, __pte;
1626
1627 WARN_ON(address > dom->aperture_size);
1628
1629 paddr &= PAGE_MASK;
1630
8bda3092 1631 pte = dma_ops_get_pte(dom, address);
53812c11 1632 if (!pte)
8fd524b3 1633 return DMA_ERROR_CODE;
cb76c322
JR
1634
1635 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
1636
1637 if (direction == DMA_TO_DEVICE)
1638 __pte |= IOMMU_PTE_IR;
1639 else if (direction == DMA_FROM_DEVICE)
1640 __pte |= IOMMU_PTE_IW;
1641 else if (direction == DMA_BIDIRECTIONAL)
1642 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
1643
1644 WARN_ON(*pte);
1645
1646 *pte = __pte;
1647
1648 return (dma_addr_t)address;
1649}
1650
431b2a20
JR
1651/*
1652 * The generic unmapping function for on page in the DMA address space.
1653 */
680525e0 1654static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
cb76c322
JR
1655 unsigned long address)
1656{
384de729 1657 struct aperture_range *aperture;
cb76c322
JR
1658 u64 *pte;
1659
1660 if (address >= dom->aperture_size)
1661 return;
1662
384de729
JR
1663 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1664 if (!aperture)
1665 return;
1666
1667 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1668 if (!pte)
1669 return;
cb76c322 1670
8c8c143c 1671 pte += PM_LEVEL_INDEX(0, address);
cb76c322
JR
1672
1673 WARN_ON(!*pte);
1674
1675 *pte = 0ULL;
1676}
1677
431b2a20
JR
1678/*
1679 * This function contains common code for mapping of a physically
24f81160
JR
1680 * contiguous memory region into DMA address space. It is used by all
1681 * mapping functions provided with this IOMMU driver.
431b2a20
JR
1682 * Must be called with the domain lock held.
1683 */
cb76c322 1684static dma_addr_t __map_single(struct device *dev,
cb76c322
JR
1685 struct dma_ops_domain *dma_dom,
1686 phys_addr_t paddr,
1687 size_t size,
6d4f343f 1688 int dir,
832a90c3
JR
1689 bool align,
1690 u64 dma_mask)
cb76c322
JR
1691{
1692 dma_addr_t offset = paddr & ~PAGE_MASK;
53812c11 1693 dma_addr_t address, start, ret;
cb76c322 1694 unsigned int pages;
6d4f343f 1695 unsigned long align_mask = 0;
cb76c322
JR
1696 int i;
1697
e3c449f5 1698 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
cb76c322
JR
1699 paddr &= PAGE_MASK;
1700
8ecaf8f1
JR
1701 INC_STATS_COUNTER(total_map_requests);
1702
c1858976
JR
1703 if (pages > 1)
1704 INC_STATS_COUNTER(cross_page);
1705
6d4f343f
JR
1706 if (align)
1707 align_mask = (1UL << get_order(size)) - 1;
1708
11b83888 1709retry:
832a90c3
JR
1710 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
1711 dma_mask);
8fd524b3 1712 if (unlikely(address == DMA_ERROR_CODE)) {
11b83888
JR
1713 /*
1714 * setting next_address here will let the address
1715 * allocator only scan the new allocated range in the
1716 * first run. This is a small optimization.
1717 */
1718 dma_dom->next_address = dma_dom->aperture_size;
1719
576175c2 1720 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
11b83888
JR
1721 goto out;
1722
1723 /*
1724 * aperture was sucessfully enlarged by 128 MB, try
1725 * allocation again
1726 */
1727 goto retry;
1728 }
cb76c322
JR
1729
1730 start = address;
1731 for (i = 0; i < pages; ++i) {
680525e0 1732 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
8fd524b3 1733 if (ret == DMA_ERROR_CODE)
53812c11
JR
1734 goto out_unmap;
1735
cb76c322
JR
1736 paddr += PAGE_SIZE;
1737 start += PAGE_SIZE;
1738 }
1739 address += offset;
1740
5774f7c5
JR
1741 ADD_STATS_COUNTER(alloced_io_mem, size);
1742
afa9fdc2 1743 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
dcd1e92e 1744 iommu_flush_tlb(&dma_dom->domain);
1c655773 1745 dma_dom->need_flush = false;
318afd41 1746 } else if (unlikely(amd_iommu_np_cache))
6de8ad9b 1747 iommu_flush_pages(&dma_dom->domain, address, size);
270cab24 1748
cb76c322
JR
1749out:
1750 return address;
53812c11
JR
1751
1752out_unmap:
1753
1754 for (--i; i >= 0; --i) {
1755 start -= PAGE_SIZE;
680525e0 1756 dma_ops_domain_unmap(dma_dom, start);
53812c11
JR
1757 }
1758
1759 dma_ops_free_addresses(dma_dom, address, pages);
1760
8fd524b3 1761 return DMA_ERROR_CODE;
cb76c322
JR
1762}
1763
431b2a20
JR
1764/*
1765 * Does the reverse of the __map_single function. Must be called with
1766 * the domain lock held too
1767 */
cd8c82e8 1768static void __unmap_single(struct dma_ops_domain *dma_dom,
cb76c322
JR
1769 dma_addr_t dma_addr,
1770 size_t size,
1771 int dir)
1772{
1773 dma_addr_t i, start;
1774 unsigned int pages;
1775
8fd524b3 1776 if ((dma_addr == DMA_ERROR_CODE) ||
b8d9905d 1777 (dma_addr + size > dma_dom->aperture_size))
cb76c322
JR
1778 return;
1779
e3c449f5 1780 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
cb76c322
JR
1781 dma_addr &= PAGE_MASK;
1782 start = dma_addr;
1783
1784 for (i = 0; i < pages; ++i) {
680525e0 1785 dma_ops_domain_unmap(dma_dom, start);
cb76c322
JR
1786 start += PAGE_SIZE;
1787 }
1788
5774f7c5
JR
1789 SUB_STATS_COUNTER(alloced_io_mem, size);
1790
cb76c322 1791 dma_ops_free_addresses(dma_dom, dma_addr, pages);
270cab24 1792
80be308d 1793 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
6de8ad9b 1794 iommu_flush_pages(&dma_dom->domain, dma_addr, size);
80be308d
JR
1795 dma_dom->need_flush = false;
1796 }
cb76c322
JR
1797}
1798
431b2a20
JR
1799/*
1800 * The exported map_single function for dma_ops.
1801 */
51491367
FT
1802static dma_addr_t map_page(struct device *dev, struct page *page,
1803 unsigned long offset, size_t size,
1804 enum dma_data_direction dir,
1805 struct dma_attrs *attrs)
4da70b9e
JR
1806{
1807 unsigned long flags;
4da70b9e 1808 struct protection_domain *domain;
4da70b9e 1809 dma_addr_t addr;
832a90c3 1810 u64 dma_mask;
51491367 1811 phys_addr_t paddr = page_to_phys(page) + offset;
4da70b9e 1812
0f2a86f2
JR
1813 INC_STATS_COUNTER(cnt_map_single);
1814
94f6d190
JR
1815 domain = get_domain(dev);
1816 if (PTR_ERR(domain) == -EINVAL)
4da70b9e 1817 return (dma_addr_t)paddr;
94f6d190
JR
1818 else if (IS_ERR(domain))
1819 return DMA_ERROR_CODE;
4da70b9e 1820
f99c0f1c
JR
1821 dma_mask = *dev->dma_mask;
1822
4da70b9e 1823 spin_lock_irqsave(&domain->lock, flags);
94f6d190 1824
cd8c82e8 1825 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
832a90c3 1826 dma_mask);
8fd524b3 1827 if (addr == DMA_ERROR_CODE)
4da70b9e
JR
1828 goto out;
1829
0518a3a4 1830 iommu_flush_complete(domain);
4da70b9e
JR
1831
1832out:
1833 spin_unlock_irqrestore(&domain->lock, flags);
1834
1835 return addr;
1836}
1837
431b2a20
JR
1838/*
1839 * The exported unmap_single function for dma_ops.
1840 */
51491367
FT
1841static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1842 enum dma_data_direction dir, struct dma_attrs *attrs)
4da70b9e
JR
1843{
1844 unsigned long flags;
4da70b9e 1845 struct protection_domain *domain;
4da70b9e 1846
146a6917
JR
1847 INC_STATS_COUNTER(cnt_unmap_single);
1848
94f6d190
JR
1849 domain = get_domain(dev);
1850 if (IS_ERR(domain))
5b28df6f
JR
1851 return;
1852
4da70b9e
JR
1853 spin_lock_irqsave(&domain->lock, flags);
1854
cd8c82e8 1855 __unmap_single(domain->priv, dma_addr, size, dir);
4da70b9e 1856
0518a3a4 1857 iommu_flush_complete(domain);
4da70b9e
JR
1858
1859 spin_unlock_irqrestore(&domain->lock, flags);
1860}
1861
431b2a20
JR
1862/*
1863 * This is a special map_sg function which is used if we should map a
1864 * device which is not handled by an AMD IOMMU in the system.
1865 */
65b050ad
JR
1866static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1867 int nelems, int dir)
1868{
1869 struct scatterlist *s;
1870 int i;
1871
1872 for_each_sg(sglist, s, nelems, i) {
1873 s->dma_address = (dma_addr_t)sg_phys(s);
1874 s->dma_length = s->length;
1875 }
1876
1877 return nelems;
1878}
1879
431b2a20
JR
1880/*
1881 * The exported map_sg function for dma_ops (handles scatter-gather
1882 * lists).
1883 */
65b050ad 1884static int map_sg(struct device *dev, struct scatterlist *sglist,
160c1d8e
FT
1885 int nelems, enum dma_data_direction dir,
1886 struct dma_attrs *attrs)
65b050ad
JR
1887{
1888 unsigned long flags;
65b050ad 1889 struct protection_domain *domain;
65b050ad
JR
1890 int i;
1891 struct scatterlist *s;
1892 phys_addr_t paddr;
1893 int mapped_elems = 0;
832a90c3 1894 u64 dma_mask;
65b050ad 1895
d03f067a
JR
1896 INC_STATS_COUNTER(cnt_map_sg);
1897
94f6d190
JR
1898 domain = get_domain(dev);
1899 if (PTR_ERR(domain) == -EINVAL)
f99c0f1c 1900 return map_sg_no_iommu(dev, sglist, nelems, dir);
94f6d190
JR
1901 else if (IS_ERR(domain))
1902 return 0;
dbcc112e 1903
832a90c3 1904 dma_mask = *dev->dma_mask;
65b050ad 1905
65b050ad
JR
1906 spin_lock_irqsave(&domain->lock, flags);
1907
1908 for_each_sg(sglist, s, nelems, i) {
1909 paddr = sg_phys(s);
1910
cd8c82e8 1911 s->dma_address = __map_single(dev, domain->priv,
832a90c3
JR
1912 paddr, s->length, dir, false,
1913 dma_mask);
65b050ad
JR
1914
1915 if (s->dma_address) {
1916 s->dma_length = s->length;
1917 mapped_elems++;
1918 } else
1919 goto unmap;
65b050ad
JR
1920 }
1921
0518a3a4 1922 iommu_flush_complete(domain);
65b050ad
JR
1923
1924out:
1925 spin_unlock_irqrestore(&domain->lock, flags);
1926
1927 return mapped_elems;
1928unmap:
1929 for_each_sg(sglist, s, mapped_elems, i) {
1930 if (s->dma_address)
cd8c82e8 1931 __unmap_single(domain->priv, s->dma_address,
65b050ad
JR
1932 s->dma_length, dir);
1933 s->dma_address = s->dma_length = 0;
1934 }
1935
1936 mapped_elems = 0;
1937
1938 goto out;
1939}
1940
431b2a20
JR
1941/*
1942 * The exported map_sg function for dma_ops (handles scatter-gather
1943 * lists).
1944 */
65b050ad 1945static void unmap_sg(struct device *dev, struct scatterlist *sglist,
160c1d8e
FT
1946 int nelems, enum dma_data_direction dir,
1947 struct dma_attrs *attrs)
65b050ad
JR
1948{
1949 unsigned long flags;
65b050ad
JR
1950 struct protection_domain *domain;
1951 struct scatterlist *s;
65b050ad
JR
1952 int i;
1953
55877a6b
JR
1954 INC_STATS_COUNTER(cnt_unmap_sg);
1955
94f6d190
JR
1956 domain = get_domain(dev);
1957 if (IS_ERR(domain))
5b28df6f
JR
1958 return;
1959
65b050ad
JR
1960 spin_lock_irqsave(&domain->lock, flags);
1961
1962 for_each_sg(sglist, s, nelems, i) {
cd8c82e8 1963 __unmap_single(domain->priv, s->dma_address,
65b050ad 1964 s->dma_length, dir);
65b050ad
JR
1965 s->dma_address = s->dma_length = 0;
1966 }
1967
0518a3a4 1968 iommu_flush_complete(domain);
65b050ad
JR
1969
1970 spin_unlock_irqrestore(&domain->lock, flags);
1971}
1972
431b2a20
JR
1973/*
1974 * The exported alloc_coherent function for dma_ops.
1975 */
5d8b53cf
JR
1976static void *alloc_coherent(struct device *dev, size_t size,
1977 dma_addr_t *dma_addr, gfp_t flag)
1978{
1979 unsigned long flags;
1980 void *virt_addr;
5d8b53cf 1981 struct protection_domain *domain;
5d8b53cf 1982 phys_addr_t paddr;
832a90c3 1983 u64 dma_mask = dev->coherent_dma_mask;
5d8b53cf 1984
c8f0fb36
JR
1985 INC_STATS_COUNTER(cnt_alloc_coherent);
1986
94f6d190
JR
1987 domain = get_domain(dev);
1988 if (PTR_ERR(domain) == -EINVAL) {
f99c0f1c
JR
1989 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1990 *dma_addr = __pa(virt_addr);
1991 return virt_addr;
94f6d190
JR
1992 } else if (IS_ERR(domain))
1993 return NULL;
5d8b53cf 1994
f99c0f1c
JR
1995 dma_mask = dev->coherent_dma_mask;
1996 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1997 flag |= __GFP_ZERO;
5d8b53cf
JR
1998
1999 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2000 if (!virt_addr)
b25ae679 2001 return NULL;
5d8b53cf 2002
5d8b53cf
JR
2003 paddr = virt_to_phys(virt_addr);
2004
832a90c3
JR
2005 if (!dma_mask)
2006 dma_mask = *dev->dma_mask;
2007
5d8b53cf
JR
2008 spin_lock_irqsave(&domain->lock, flags);
2009
cd8c82e8 2010 *dma_addr = __map_single(dev, domain->priv, paddr,
832a90c3 2011 size, DMA_BIDIRECTIONAL, true, dma_mask);
5d8b53cf 2012
8fd524b3 2013 if (*dma_addr == DMA_ERROR_CODE) {
367d04c4 2014 spin_unlock_irqrestore(&domain->lock, flags);
5b28df6f 2015 goto out_free;
367d04c4 2016 }
5d8b53cf 2017
0518a3a4 2018 iommu_flush_complete(domain);
5d8b53cf 2019
5d8b53cf
JR
2020 spin_unlock_irqrestore(&domain->lock, flags);
2021
2022 return virt_addr;
5b28df6f
JR
2023
2024out_free:
2025
2026 free_pages((unsigned long)virt_addr, get_order(size));
2027
2028 return NULL;
5d8b53cf
JR
2029}
2030
431b2a20
JR
2031/*
2032 * The exported free_coherent function for dma_ops.
431b2a20 2033 */
5d8b53cf
JR
2034static void free_coherent(struct device *dev, size_t size,
2035 void *virt_addr, dma_addr_t dma_addr)
2036{
2037 unsigned long flags;
5d8b53cf 2038 struct protection_domain *domain;
5d8b53cf 2039
5d31ee7e
JR
2040 INC_STATS_COUNTER(cnt_free_coherent);
2041
94f6d190
JR
2042 domain = get_domain(dev);
2043 if (IS_ERR(domain))
5b28df6f
JR
2044 goto free_mem;
2045
5d8b53cf
JR
2046 spin_lock_irqsave(&domain->lock, flags);
2047
cd8c82e8 2048 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
5d8b53cf 2049
0518a3a4 2050 iommu_flush_complete(domain);
5d8b53cf
JR
2051
2052 spin_unlock_irqrestore(&domain->lock, flags);
2053
2054free_mem:
2055 free_pages((unsigned long)virt_addr, get_order(size));
2056}
2057
b39ba6ad
JR
2058/*
2059 * This function is called by the DMA layer to find out if we can handle a
2060 * particular device. It is part of the dma_ops.
2061 */
2062static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2063{
420aef8a 2064 return check_device(dev);
b39ba6ad
JR
2065}
2066
c432f3df 2067/*
431b2a20
JR
2068 * The function for pre-allocating protection domains.
2069 *
c432f3df
JR
2070 * If the driver core informs the DMA layer if a driver grabs a device
2071 * we don't need to preallocate the protection domains anymore.
2072 * For now we have to.
2073 */
0e93dd88 2074static void prealloc_protection_domains(void)
c432f3df
JR
2075{
2076 struct pci_dev *dev = NULL;
2077 struct dma_ops_domain *dma_dom;
98fc5a69 2078 u16 devid;
c432f3df
JR
2079
2080 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
98fc5a69
JR
2081
2082 /* Do we handle this device? */
2083 if (!check_device(&dev->dev))
c432f3df 2084 continue;
98fc5a69
JR
2085
2086 /* Is there already any domain for it? */
15898bbc 2087 if (domain_for_device(&dev->dev))
c432f3df 2088 continue;
98fc5a69
JR
2089
2090 devid = get_device_id(&dev->dev);
2091
87a64d52 2092 dma_dom = dma_ops_domain_alloc();
c432f3df
JR
2093 if (!dma_dom)
2094 continue;
2095 init_unity_mappings_for_device(dma_dom, devid);
bd60b735
JR
2096 dma_dom->target_dev = devid;
2097
15898bbc 2098 attach_device(&dev->dev, &dma_dom->domain);
be831297 2099
bd60b735 2100 list_add_tail(&dma_dom->list, &iommu_pd_list);
c432f3df
JR
2101 }
2102}
2103
160c1d8e 2104static struct dma_map_ops amd_iommu_dma_ops = {
6631ee9d
JR
2105 .alloc_coherent = alloc_coherent,
2106 .free_coherent = free_coherent,
51491367
FT
2107 .map_page = map_page,
2108 .unmap_page = unmap_page,
6631ee9d
JR
2109 .map_sg = map_sg,
2110 .unmap_sg = unmap_sg,
b39ba6ad 2111 .dma_supported = amd_iommu_dma_supported,
6631ee9d
JR
2112};
2113
431b2a20
JR
2114/*
2115 * The function which clues the AMD IOMMU driver into dma_ops.
2116 */
6631ee9d
JR
2117int __init amd_iommu_init_dma_ops(void)
2118{
2119 struct amd_iommu *iommu;
6631ee9d
JR
2120 int ret;
2121
431b2a20
JR
2122 /*
2123 * first allocate a default protection domain for every IOMMU we
2124 * found in the system. Devices not assigned to any other
2125 * protection domain will be assigned to the default one.
2126 */
3bd22172 2127 for_each_iommu(iommu) {
87a64d52 2128 iommu->default_dom = dma_ops_domain_alloc();
6631ee9d
JR
2129 if (iommu->default_dom == NULL)
2130 return -ENOMEM;
e2dc14a2 2131 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
6631ee9d
JR
2132 ret = iommu_init_unity_mappings(iommu);
2133 if (ret)
2134 goto free_domains;
2135 }
2136
431b2a20
JR
2137 /*
2138 * If device isolation is enabled, pre-allocate the protection
2139 * domains for each device.
2140 */
6631ee9d
JR
2141 if (amd_iommu_isolate)
2142 prealloc_protection_domains();
2143
2144 iommu_detected = 1;
75f1cdf1 2145 swiotlb = 0;
92af4e29 2146#ifdef CONFIG_GART_IOMMU
6631ee9d
JR
2147 gart_iommu_aperture_disabled = 1;
2148 gart_iommu_aperture = 0;
92af4e29 2149#endif
6631ee9d 2150
431b2a20 2151 /* Make the driver finally visible to the drivers */
6631ee9d
JR
2152 dma_ops = &amd_iommu_dma_ops;
2153
26961efe 2154 register_iommu(&amd_iommu_ops);
26961efe 2155
e275a2a0
JR
2156 bus_register_notifier(&pci_bus_type, &device_nb);
2157
7f26508b
JR
2158 amd_iommu_stats_init();
2159
6631ee9d
JR
2160 return 0;
2161
2162free_domains:
2163
3bd22172 2164 for_each_iommu(iommu) {
6631ee9d
JR
2165 if (iommu->default_dom)
2166 dma_ops_domain_free(iommu->default_dom);
2167 }
2168
2169 return ret;
2170}
6d98cd80
JR
2171
2172/*****************************************************************************
2173 *
2174 * The following functions belong to the exported interface of AMD IOMMU
2175 *
2176 * This interface allows access to lower level functions of the IOMMU
2177 * like protection domain handling and assignement of devices to domains
2178 * which is not possible with the dma_ops interface.
2179 *
2180 *****************************************************************************/
2181
6d98cd80
JR
2182static void cleanup_domain(struct protection_domain *domain)
2183{
2184 unsigned long flags;
2185 u16 devid;
2186
2187 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2188
2189 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2190 if (amd_iommu_pd_table[devid] == domain)
15898bbc 2191 clear_dte_entry(devid);
6d98cd80
JR
2192
2193 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2194}
2195
2650815f
JR
2196static void protection_domain_free(struct protection_domain *domain)
2197{
2198 if (!domain)
2199 return;
2200
aeb26f55
JR
2201 del_domain_from_list(domain);
2202
2650815f
JR
2203 if (domain->id)
2204 domain_id_free(domain->id);
2205
2206 kfree(domain);
2207}
2208
2209static struct protection_domain *protection_domain_alloc(void)
c156e347
JR
2210{
2211 struct protection_domain *domain;
2212
2213 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2214 if (!domain)
2650815f 2215 return NULL;
c156e347
JR
2216
2217 spin_lock_init(&domain->lock);
c156e347
JR
2218 domain->id = domain_id_alloc();
2219 if (!domain->id)
2650815f
JR
2220 goto out_err;
2221
aeb26f55
JR
2222 add_domain_to_list(domain);
2223
2650815f
JR
2224 return domain;
2225
2226out_err:
2227 kfree(domain);
2228
2229 return NULL;
2230}
2231
2232static int amd_iommu_domain_init(struct iommu_domain *dom)
2233{
2234 struct protection_domain *domain;
2235
2236 domain = protection_domain_alloc();
2237 if (!domain)
c156e347 2238 goto out_free;
2650815f
JR
2239
2240 domain->mode = PAGE_MODE_3_LEVEL;
c156e347
JR
2241 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2242 if (!domain->pt_root)
2243 goto out_free;
2244
2245 dom->priv = domain;
2246
2247 return 0;
2248
2249out_free:
2650815f 2250 protection_domain_free(domain);
c156e347
JR
2251
2252 return -ENOMEM;
2253}
2254
98383fc3
JR
2255static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2256{
2257 struct protection_domain *domain = dom->priv;
2258
2259 if (!domain)
2260 return;
2261
2262 if (domain->dev_cnt > 0)
2263 cleanup_domain(domain);
2264
2265 BUG_ON(domain->dev_cnt != 0);
2266
2267 free_pagetable(domain);
2268
2269 domain_id_free(domain->id);
2270
2271 kfree(domain);
2272
2273 dom->priv = NULL;
2274}
2275
684f2888
JR
2276static void amd_iommu_detach_device(struct iommu_domain *dom,
2277 struct device *dev)
2278{
684f2888 2279 struct amd_iommu *iommu;
684f2888
JR
2280 u16 devid;
2281
98fc5a69 2282 if (!check_device(dev))
684f2888
JR
2283 return;
2284
98fc5a69 2285 devid = get_device_id(dev);
684f2888 2286
98fc5a69 2287 if (amd_iommu_pd_table[devid] != NULL)
15898bbc 2288 detach_device(dev);
684f2888
JR
2289
2290 iommu = amd_iommu_rlookup_table[devid];
2291 if (!iommu)
2292 return;
2293
2294 iommu_queue_inv_dev_entry(iommu, devid);
2295 iommu_completion_wait(iommu);
2296}
2297
01106066
JR
2298static int amd_iommu_attach_device(struct iommu_domain *dom,
2299 struct device *dev)
2300{
2301 struct protection_domain *domain = dom->priv;
2302 struct protection_domain *old_domain;
2303 struct amd_iommu *iommu;
15898bbc 2304 int ret;
01106066
JR
2305 u16 devid;
2306
98fc5a69 2307 if (!check_device(dev))
01106066
JR
2308 return -EINVAL;
2309
98fc5a69 2310 devid = get_device_id(dev);
01106066
JR
2311
2312 iommu = amd_iommu_rlookup_table[devid];
2313 if (!iommu)
2314 return -EINVAL;
2315
15898bbc 2316 old_domain = amd_iommu_pd_table[devid];
01106066 2317 if (old_domain)
15898bbc 2318 detach_device(dev);
01106066 2319
15898bbc 2320 ret = attach_device(dev, domain);
01106066
JR
2321
2322 iommu_completion_wait(iommu);
2323
15898bbc 2324 return ret;
01106066
JR
2325}
2326
c6229ca6
JR
2327static int amd_iommu_map_range(struct iommu_domain *dom,
2328 unsigned long iova, phys_addr_t paddr,
2329 size_t size, int iommu_prot)
2330{
2331 struct protection_domain *domain = dom->priv;
2332 unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
2333 int prot = 0;
2334 int ret;
2335
2336 if (iommu_prot & IOMMU_READ)
2337 prot |= IOMMU_PROT_IR;
2338 if (iommu_prot & IOMMU_WRITE)
2339 prot |= IOMMU_PROT_IW;
2340
2341 iova &= PAGE_MASK;
2342 paddr &= PAGE_MASK;
2343
2344 for (i = 0; i < npages; ++i) {
abdc5eb3 2345 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
c6229ca6
JR
2346 if (ret)
2347 return ret;
2348
2349 iova += PAGE_SIZE;
2350 paddr += PAGE_SIZE;
2351 }
2352
2353 return 0;
2354}
2355
eb74ff6c
JR
2356static void amd_iommu_unmap_range(struct iommu_domain *dom,
2357 unsigned long iova, size_t size)
2358{
2359
2360 struct protection_domain *domain = dom->priv;
2361 unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
2362
2363 iova &= PAGE_MASK;
2364
2365 for (i = 0; i < npages; ++i) {
a6b256b4 2366 iommu_unmap_page(domain, iova, PM_MAP_4k);
eb74ff6c
JR
2367 iova += PAGE_SIZE;
2368 }
2369
601367d7 2370 iommu_flush_tlb_pde(domain);
eb74ff6c
JR
2371}
2372
645c4c8d
JR
2373static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2374 unsigned long iova)
2375{
2376 struct protection_domain *domain = dom->priv;
2377 unsigned long offset = iova & ~PAGE_MASK;
2378 phys_addr_t paddr;
2379 u64 *pte;
2380
a6b256b4 2381 pte = fetch_pte(domain, iova, PM_MAP_4k);
645c4c8d 2382
a6d41a40 2383 if (!pte || !IOMMU_PTE_PRESENT(*pte))
645c4c8d
JR
2384 return 0;
2385
2386 paddr = *pte & IOMMU_PAGE_MASK;
2387 paddr |= offset;
2388
2389 return paddr;
2390}
2391
dbb9fd86
SY
2392static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
2393 unsigned long cap)
2394{
2395 return 0;
2396}
2397
26961efe
JR
2398static struct iommu_ops amd_iommu_ops = {
2399 .domain_init = amd_iommu_domain_init,
2400 .domain_destroy = amd_iommu_domain_destroy,
2401 .attach_dev = amd_iommu_attach_device,
2402 .detach_dev = amd_iommu_detach_device,
2403 .map = amd_iommu_map_range,
2404 .unmap = amd_iommu_unmap_range,
2405 .iova_to_phys = amd_iommu_iova_to_phys,
dbb9fd86 2406 .domain_has_cap = amd_iommu_domain_has_cap,
26961efe
JR
2407};
2408
0feae533
JR
2409/*****************************************************************************
2410 *
2411 * The next functions do a basic initialization of IOMMU for pass through
2412 * mode
2413 *
2414 * In passthrough mode the IOMMU is initialized and enabled but not used for
2415 * DMA-API translation.
2416 *
2417 *****************************************************************************/
2418
2419int __init amd_iommu_init_passthrough(void)
2420{
15898bbc 2421 struct amd_iommu *iommu;
0feae533 2422 struct pci_dev *dev = NULL;
15898bbc 2423 u16 devid;
0feae533
JR
2424
2425 /* allocate passthroug domain */
2426 pt_domain = protection_domain_alloc();
2427 if (!pt_domain)
2428 return -ENOMEM;
2429
2430 pt_domain->mode |= PAGE_MODE_NONE;
2431
2432 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
0feae533 2433
98fc5a69 2434 if (!check_device(&dev->dev))
0feae533
JR
2435 continue;
2436
98fc5a69
JR
2437 devid = get_device_id(&dev->dev);
2438
15898bbc 2439 iommu = amd_iommu_rlookup_table[devid];
0feae533
JR
2440 if (!iommu)
2441 continue;
2442
15898bbc 2443 attach_device(&dev->dev, pt_domain);
0feae533
JR
2444 }
2445
2446 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
2447
2448 return 0;
2449}
This page took 0.385209 seconds and 5 git commands to generate.