Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[deliverable/linux.git] / arch / powerpc / kernel / iommu.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
1da177e4
LT
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
a66022c4 33#include <linux/bitmap.h>
fb3475e9 34#include <linux/iommu-helper.h>
62a8bd6c 35#include <linux/crash_dump.h>
b4c3a872 36#include <linux/hash.h>
d6b9a81b
AB
37#include <linux/fault-inject.h>
38#include <linux/pci.h>
4e13c1ac
AK
39#include <linux/iommu.h>
40#include <linux/sched.h>
1da177e4
LT
41#include <asm/io.h>
42#include <asm/prom.h>
43#include <asm/iommu.h>
44#include <asm/pci-bridge.h>
45#include <asm/machdep.h>
5f50867b 46#include <asm/kdump.h>
3ccc00a7 47#include <asm/fadump.h>
d6b9a81b 48#include <asm/vio.h>
4e13c1ac 49#include <asm/tce.h>
1da177e4
LT
50
51#define DBG(...)
52
191aee58 53static int novmerge;
56997559 54
6490c490
RJ
55static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
56
1da177e4
LT
57static int __init setup_iommu(char *str)
58{
59 if (!strcmp(str, "novmerge"))
60 novmerge = 1;
61 else if (!strcmp(str, "vmerge"))
62 novmerge = 0;
63 return 1;
64}
65
66__setup("iommu=", setup_iommu);
67
b4c3a872
AB
68static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
69
70/*
71 * We precalculate the hash to avoid doing it on every allocation.
72 *
73 * The hash is important to spread CPUs across all the pools. For example,
74 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
75 * with 4 pools all primary threads would map to the same pool.
76 */
77static int __init setup_iommu_pool_hash(void)
78{
79 unsigned int i;
80
81 for_each_possible_cpu(i)
82 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
83
84 return 0;
85}
86subsys_initcall(setup_iommu_pool_hash);
87
d6b9a81b
AB
88#ifdef CONFIG_FAIL_IOMMU
89
90static DECLARE_FAULT_ATTR(fail_iommu);
91
92static int __init setup_fail_iommu(char *str)
93{
94 return setup_fault_attr(&fail_iommu, str);
95}
96__setup("fail_iommu=", setup_fail_iommu);
97
98static bool should_fail_iommu(struct device *dev)
99{
100 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
101}
102
103static int __init fail_iommu_debugfs(void)
104{
105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
106 NULL, &fail_iommu);
107
8c6ffba0 108 return PTR_ERR_OR_ZERO(dir);
d6b9a81b
AB
109}
110late_initcall(fail_iommu_debugfs);
111
112static ssize_t fail_iommu_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
116}
117
118static ssize_t fail_iommu_store(struct device *dev,
119 struct device_attribute *attr, const char *buf,
120 size_t count)
121{
122 int i;
123
124 if (count > 0 && sscanf(buf, "%d", &i) > 0)
125 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
126
127 return count;
128}
129
130static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
131 fail_iommu_store);
132
133static int fail_iommu_bus_notify(struct notifier_block *nb,
134 unsigned long action, void *data)
135{
136 struct device *dev = data;
137
138 if (action == BUS_NOTIFY_ADD_DEVICE) {
139 if (device_create_file(dev, &dev_attr_fail_iommu))
140 pr_warn("Unable to create IOMMU fault injection sysfs "
141 "entries\n");
142 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
143 device_remove_file(dev, &dev_attr_fail_iommu);
144 }
145
146 return 0;
147}
148
149static struct notifier_block fail_iommu_bus_notifier = {
150 .notifier_call = fail_iommu_bus_notify
151};
152
153static int __init fail_iommu_setup(void)
154{
155#ifdef CONFIG_PCI
156 bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
157#endif
158#ifdef CONFIG_IBMVIO
159 bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
160#endif
161
162 return 0;
163}
164/*
165 * Must execute after PCI and VIO subsystem have initialised but before
166 * devices are probed.
167 */
168arch_initcall(fail_iommu_setup);
169#else
170static inline bool should_fail_iommu(struct device *dev)
171{
172 return false;
173}
174#endif
175
fb3475e9
FT
176static unsigned long iommu_range_alloc(struct device *dev,
177 struct iommu_table *tbl,
1da177e4
LT
178 unsigned long npages,
179 unsigned long *handle,
7daa411b 180 unsigned long mask,
1da177e4
LT
181 unsigned int align_order)
182{
fb3475e9 183 unsigned long n, end, start;
1da177e4
LT
184 unsigned long limit;
185 int largealloc = npages > 15;
186 int pass = 0;
187 unsigned long align_mask;
fb3475e9 188 unsigned long boundary_size;
d3622137 189 unsigned long flags;
b4c3a872
AB
190 unsigned int pool_nr;
191 struct iommu_pool *pool;
1da177e4
LT
192
193 align_mask = 0xffffffffffffffffl >> (64 - align_order);
194
195 /* This allocator was derived from x86_64's bit string search */
196
197 /* Sanity check */
13a2eea1 198 if (unlikely(npages == 0)) {
1da177e4
LT
199 if (printk_ratelimit())
200 WARN_ON(1);
201 return DMA_ERROR_CODE;
202 }
203
d6b9a81b
AB
204 if (should_fail_iommu(dev))
205 return DMA_ERROR_CODE;
206
b4c3a872
AB
207 /*
208 * We don't need to disable preemption here because any CPU can
209 * safely use any IOMMU pool.
210 */
69111bac 211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
d3622137 212
b4c3a872
AB
213 if (largealloc)
214 pool = &(tbl->large_pool);
1da177e4 215 else
b4c3a872 216 pool = &(tbl->pools[pool_nr]);
1da177e4 217
b4c3a872
AB
218 spin_lock_irqsave(&(pool->lock), flags);
219
220again:
d900bd73
AB
221 if ((pass == 0) && handle && *handle &&
222 (*handle >= pool->start) && (*handle < pool->end))
b4c3a872
AB
223 start = *handle;
224 else
225 start = pool->hint;
1da177e4 226
b4c3a872 227 limit = pool->end;
1da177e4
LT
228
229 /* The case below can happen if we have a small segment appended
230 * to a large, or when the previous alloc was at the very end of
231 * the available space. If so, go back to the initial start.
232 */
233 if (start >= limit)
b4c3a872 234 start = pool->start;
1da177e4 235
7daa411b
OJ
236 if (limit + tbl->it_offset > mask) {
237 limit = mask - tbl->it_offset + 1;
238 /* If we're constrained on address range, first try
239 * at the masked hint to avoid O(n) search complexity,
b4c3a872 240 * but on second pass, start at 0 in pool 0.
7daa411b 241 */
b4c3a872 242 if ((start & mask) >= limit || pass > 0) {
d900bd73 243 spin_unlock(&(pool->lock));
b4c3a872 244 pool = &(tbl->pools[0]);
d900bd73 245 spin_lock(&(pool->lock));
b4c3a872
AB
246 start = pool->start;
247 } else {
7daa411b 248 start &= mask;
b4c3a872 249 }
7daa411b
OJ
250 }
251
fb3475e9
FT
252 if (dev)
253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
d0847757 254 1 << tbl->it_page_shift);
fb3475e9 255 else
d0847757 256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
fb3475e9 257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
1da177e4 258
d0847757
AP
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
260 boundary_size >> tbl->it_page_shift, align_mask);
fb3475e9 261 if (n == -1) {
b4c3a872
AB
262 if (likely(pass == 0)) {
263 /* First try the pool from the start */
264 pool->hint = pool->start;
1da177e4
LT
265 pass++;
266 goto again;
b4c3a872
AB
267
268 } else if (pass <= tbl->nr_pools) {
269 /* Now try scanning all the other pools */
270 spin_unlock(&(pool->lock));
271 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
272 pool = &tbl->pools[pool_nr];
273 spin_lock(&(pool->lock));
274 pool->hint = pool->start;
275 pass++;
276 goto again;
277
1da177e4 278 } else {
b4c3a872
AB
279 /* Give up */
280 spin_unlock_irqrestore(&(pool->lock), flags);
1da177e4
LT
281 return DMA_ERROR_CODE;
282 }
283 }
284
fb3475e9 285 end = n + npages;
1da177e4
LT
286
287 /* Bump the hint to a new block for small allocs. */
288 if (largealloc) {
289 /* Don't bump to new block to avoid fragmentation */
b4c3a872 290 pool->hint = end;
1da177e4
LT
291 } else {
292 /* Overflow will be taken care of at the next allocation */
b4c3a872 293 pool->hint = (end + tbl->it_blocksize - 1) &
1da177e4
LT
294 ~(tbl->it_blocksize - 1);
295 }
296
297 /* Update handle for SG allocations */
298 if (handle)
299 *handle = end;
300
b4c3a872
AB
301 spin_unlock_irqrestore(&(pool->lock), flags);
302
1da177e4
LT
303 return n;
304}
305
fb3475e9
FT
306static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
307 void *page, unsigned int npages,
308 enum dma_data_direction direction,
4f3dd8a0
MN
309 unsigned long mask, unsigned int align_order,
310 struct dma_attrs *attrs)
1da177e4 311{
d3622137 312 unsigned long entry;
1da177e4 313 dma_addr_t ret = DMA_ERROR_CODE;
6490c490 314 int build_fail;
7daa411b 315
fb3475e9 316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
1da177e4 317
0e4bc95d 318 if (unlikely(entry == DMA_ERROR_CODE))
1da177e4 319 return DMA_ERROR_CODE;
1da177e4
LT
320
321 entry += tbl->it_offset; /* Offset into real TCE table */
d0847757 322 ret = entry << tbl->it_page_shift; /* Set the return dma address */
1da177e4
LT
323
324 /* Put the TCEs in the HW table */
da004c36 325 build_fail = tbl->it_ops->set(tbl, entry, npages,
d0847757
AP
326 (unsigned long)page &
327 IOMMU_PAGE_MASK(tbl), direction, attrs);
6490c490 328
da004c36 329 /* tbl->it_ops->set() only returns non-zero for transient errors.
6490c490
RJ
330 * Clean up the table bitmap in this case and return
331 * DMA_ERROR_CODE. For all other errors the functionality is
332 * not altered.
333 */
334 if (unlikely(build_fail)) {
335 __iommu_free(tbl, ret, npages);
6490c490
RJ
336 return DMA_ERROR_CODE;
337 }
1da177e4
LT
338
339 /* Flush/invalidate TLB caches if necessary */
da004c36
AK
340 if (tbl->it_ops->flush)
341 tbl->it_ops->flush(tbl);
1da177e4 342
1da177e4
LT
343 /* Make sure updates are seen by hardware */
344 mb();
345
346 return ret;
347}
348
67ca1415
AB
349static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
350 unsigned int npages)
1da177e4
LT
351{
352 unsigned long entry, free_entry;
1da177e4 353
d0847757 354 entry = dma_addr >> tbl->it_page_shift;
1da177e4
LT
355 free_entry = entry - tbl->it_offset;
356
357 if (((free_entry + npages) > tbl->it_size) ||
358 (entry < tbl->it_offset)) {
359 if (printk_ratelimit()) {
360 printk(KERN_INFO "iommu_free: invalid entry\n");
361 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
fe333321
IM
362 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
363 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
364 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
365 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
366 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
367 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
1da177e4
LT
368 WARN_ON(1);
369 }
67ca1415
AB
370
371 return false;
1da177e4
LT
372 }
373
67ca1415
AB
374 return true;
375}
376
b4c3a872
AB
377static struct iommu_pool *get_pool(struct iommu_table *tbl,
378 unsigned long entry)
379{
380 struct iommu_pool *p;
381 unsigned long largepool_start = tbl->large_pool.start;
382
383 /* The large pool is the last pool at the top of the table */
384 if (entry >= largepool_start) {
385 p = &tbl->large_pool;
386 } else {
387 unsigned int pool_nr = entry / tbl->poolsize;
388
389 BUG_ON(pool_nr > tbl->nr_pools);
390 p = &tbl->pools[pool_nr];
391 }
392
393 return p;
394}
395
67ca1415
AB
396static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
397 unsigned int npages)
1da177e4 398{
67ca1415 399 unsigned long entry, free_entry;
1da177e4 400 unsigned long flags;
b4c3a872 401 struct iommu_pool *pool;
1da177e4 402
d0847757 403 entry = dma_addr >> tbl->it_page_shift;
67ca1415
AB
404 free_entry = entry - tbl->it_offset;
405
b4c3a872
AB
406 pool = get_pool(tbl, free_entry);
407
67ca1415
AB
408 if (!iommu_free_check(tbl, dma_addr, npages))
409 return;
410
da004c36 411 tbl->it_ops->clear(tbl, entry, npages);
67ca1415 412
b4c3a872 413 spin_lock_irqsave(&(pool->lock), flags);
67ca1415 414 bitmap_clear(tbl->it_map, free_entry, npages);
b4c3a872 415 spin_unlock_irqrestore(&(pool->lock), flags);
67ca1415
AB
416}
417
418static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
419 unsigned int npages)
420{
421 __iommu_free(tbl, dma_addr, npages);
1da177e4
LT
422
423 /* Make sure TLB cache is flushed if the HW needs it. We do
424 * not do an mb() here on purpose, it is not needed on any of
425 * the current platforms.
426 */
da004c36
AK
427 if (tbl->it_ops->flush)
428 tbl->it_ops->flush(tbl);
1da177e4
LT
429}
430
0690cbd2
JR
431int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
432 struct scatterlist *sglist, int nelems,
433 unsigned long mask, enum dma_data_direction direction,
434 struct dma_attrs *attrs)
1da177e4
LT
435{
436 dma_addr_t dma_next = 0, dma_addr;
1da177e4 437 struct scatterlist *s, *outs, *segstart;
6490c490 438 int outcount, incount, i, build_fail = 0;
d262c32a 439 unsigned int align;
1da177e4 440 unsigned long handle;
740c3ce6 441 unsigned int max_seg_size;
1da177e4
LT
442
443 BUG_ON(direction == DMA_NONE);
444
445 if ((nelems == 0) || !tbl)
446 return 0;
447
448 outs = s = segstart = &sglist[0];
449 outcount = 1;
ac9af7cb 450 incount = nelems;
1da177e4
LT
451 handle = 0;
452
453 /* Init first segment length for backout at failure */
454 outs->dma_length = 0;
455
5d2efba6 456 DBG("sg mapping %d elements:\n", nelems);
1da177e4 457
740c3ce6 458 max_seg_size = dma_get_max_seg_size(dev);
78bdc310 459 for_each_sg(sglist, s, nelems, i) {
1da177e4
LT
460 unsigned long vaddr, npages, entry, slen;
461
462 slen = s->length;
463 /* Sanity check */
464 if (slen == 0) {
465 dma_next = 0;
466 continue;
467 }
468 /* Allocate iommu entries for that segment */
58b053e4 469 vaddr = (unsigned long) sg_virt(s);
d0847757 470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
d262c32a 471 align = 0;
d0847757 472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
d262c32a 473 (vaddr & ~PAGE_MASK) == 0)
d0847757 474 align = PAGE_SHIFT - tbl->it_page_shift;
fb3475e9 475 entry = iommu_range_alloc(dev, tbl, npages, &handle,
d0847757 476 mask >> tbl->it_page_shift, align);
1da177e4
LT
477
478 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
479
480 /* Handle failure */
481 if (unlikely(entry == DMA_ERROR_CODE)) {
482 if (printk_ratelimit())
4dfa9c47
AB
483 dev_info(dev, "iommu_alloc failed, tbl %p "
484 "vaddr %lx npages %lu\n", tbl, vaddr,
485 npages);
1da177e4
LT
486 goto failure;
487 }
488
489 /* Convert entry to a dma_addr_t */
490 entry += tbl->it_offset;
d0847757
AP
491 dma_addr = entry << tbl->it_page_shift;
492 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
1da177e4 493
5d2efba6 494 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
1da177e4
LT
495 npages, entry, dma_addr);
496
497 /* Insert into HW table */
da004c36 498 build_fail = tbl->it_ops->set(tbl, entry, npages,
d0847757
AP
499 vaddr & IOMMU_PAGE_MASK(tbl),
500 direction, attrs);
6490c490
RJ
501 if(unlikely(build_fail))
502 goto failure;
1da177e4
LT
503
504 /* If we are in an open segment, try merging */
505 if (segstart != s) {
506 DBG(" - trying merge...\n");
507 /* We cannot merge if:
508 * - allocated dma_addr isn't contiguous to previous allocation
509 */
740c3ce6
FT
510 if (novmerge || (dma_addr != dma_next) ||
511 (outs->dma_length + s->length > max_seg_size)) {
1da177e4
LT
512 /* Can't merge: create a new segment */
513 segstart = s;
78bdc310
JA
514 outcount++;
515 outs = sg_next(outs);
1da177e4
LT
516 DBG(" can't merge, new segment.\n");
517 } else {
518 outs->dma_length += s->length;
5d2efba6 519 DBG(" merged, new len: %ux\n", outs->dma_length);
1da177e4
LT
520 }
521 }
522
523 if (segstart == s) {
524 /* This is a new segment, fill entries */
525 DBG(" - filling new segment.\n");
526 outs->dma_address = dma_addr;
527 outs->dma_length = slen;
528 }
529
530 /* Calculate next page pointer for contiguous check */
531 dma_next = dma_addr + slen;
532
533 DBG(" - dma next is: %lx\n", dma_next);
534 }
535
536 /* Flush/invalidate TLB caches if necessary */
da004c36
AK
537 if (tbl->it_ops->flush)
538 tbl->it_ops->flush(tbl);
1da177e4 539
1da177e4
LT
540 DBG("mapped %d elements:\n", outcount);
541
0690cbd2 542 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
1da177e4
LT
543 * next entry of the sglist if we didn't fill the list completely
544 */
ac9af7cb 545 if (outcount < incount) {
78bdc310 546 outs = sg_next(outs);
1da177e4
LT
547 outs->dma_address = DMA_ERROR_CODE;
548 outs->dma_length = 0;
549 }
a958a264
JM
550
551 /* Make sure updates are seen by hardware */
552 mb();
553
1da177e4
LT
554 return outcount;
555
556 failure:
78bdc310 557 for_each_sg(sglist, s, nelems, i) {
1da177e4
LT
558 if (s->dma_length != 0) {
559 unsigned long vaddr, npages;
560
d0847757 561 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
2994a3b2 562 npages = iommu_num_pages(s->dma_address, s->dma_length,
d0847757 563 IOMMU_PAGE_SIZE(tbl));
d3622137 564 __iommu_free(tbl, vaddr, npages);
a958a264
JM
565 s->dma_address = DMA_ERROR_CODE;
566 s->dma_length = 0;
1da177e4 567 }
78bdc310
JA
568 if (s == outs)
569 break;
1da177e4 570 }
1da177e4
LT
571 return 0;
572}
573
574
0690cbd2
JR
575void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
576 int nelems, enum dma_data_direction direction,
577 struct dma_attrs *attrs)
1da177e4 578{
78bdc310 579 struct scatterlist *sg;
1da177e4
LT
580
581 BUG_ON(direction == DMA_NONE);
582
583 if (!tbl)
584 return;
585
78bdc310 586 sg = sglist;
1da177e4
LT
587 while (nelems--) {
588 unsigned int npages;
78bdc310 589 dma_addr_t dma_handle = sg->dma_address;
1da177e4 590
78bdc310 591 if (sg->dma_length == 0)
1da177e4 592 break;
2994a3b2 593 npages = iommu_num_pages(dma_handle, sg->dma_length,
d0847757 594 IOMMU_PAGE_SIZE(tbl));
d3622137 595 __iommu_free(tbl, dma_handle, npages);
78bdc310 596 sg = sg_next(sg);
1da177e4
LT
597 }
598
599 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
600 * do not do an mb() here, the affected platforms do not need it
601 * when freeing.
602 */
da004c36
AK
603 if (tbl->it_ops->flush)
604 tbl->it_ops->flush(tbl);
1da177e4
LT
605}
606
54622f10
MK
607static void iommu_table_clear(struct iommu_table *tbl)
608{
3ccc00a7
MS
609 /*
610 * In case of firmware assisted dump system goes through clean
611 * reboot process at the time of system crash. Hence it's safe to
612 * clear the TCE entries if firmware assisted dump is active.
613 */
614 if (!is_kdump_kernel() || is_fadump_active()) {
54622f10 615 /* Clear the table in case firmware left allocations in it */
da004c36 616 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
54622f10
MK
617 return;
618 }
619
620#ifdef CONFIG_CRASH_DUMP
da004c36 621 if (tbl->it_ops->get) {
54622f10
MK
622 unsigned long index, tceval, tcecount = 0;
623
624 /* Reserve the existing mappings left by the first kernel. */
625 for (index = 0; index < tbl->it_size; index++) {
da004c36 626 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
54622f10
MK
627 /*
628 * Freed TCE entry contains 0x7fffffffffffffff on JS20
629 */
630 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
631 __set_bit(index, tbl->it_map);
632 tcecount++;
633 }
634 }
635
636 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
637 printk(KERN_WARNING "TCE table is full; freeing ");
638 printk(KERN_WARNING "%d entries for the kdump boot\n",
639 KDUMP_MIN_TCE_ENTRIES);
640 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
641 index < tbl->it_size; index++)
642 __clear_bit(index, tbl->it_map);
643 }
644 }
645#endif
646}
647
1da177e4
LT
648/*
649 * Build a iommu_table structure. This contains a bit map which
650 * is used to manage allocation of the tce space.
651 */
ca1588e7 652struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
1da177e4
LT
653{
654 unsigned long sz;
655 static int welcomed = 0;
ca1588e7 656 struct page *page;
b4c3a872
AB
657 unsigned int i;
658 struct iommu_pool *p;
1da177e4 659
da004c36
AK
660 BUG_ON(!tbl->it_ops);
661
1da177e4 662 /* number of bytes needed for the bitmap */
c5a0809a 663 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
1da177e4 664
1cf389df 665 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
ca1588e7 666 if (!page)
1da177e4 667 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
ca1588e7 668 tbl->it_map = page_address(page);
1da177e4
LT
669 memset(tbl->it_map, 0, sz);
670
d12b524f
TLSC
671 /*
672 * Reserve page 0 so it will not be used for any mappings.
673 * This avoids buggy drivers that consider page 0 to be invalid
674 * to crash the machine or even lose data.
675 */
676 if (tbl->it_offset == 0)
677 set_bit(0, tbl->it_map);
678
b4c3a872 679 /* We only split the IOMMU table if we have 1GB or more of space */
d0847757 680 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
b4c3a872
AB
681 tbl->nr_pools = IOMMU_NR_POOLS;
682 else
683 tbl->nr_pools = 1;
684
685 /* We reserve the top 1/4 of the table for large allocations */
dcd261ba 686 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
b4c3a872 687
dcd261ba 688 for (i = 0; i < tbl->nr_pools; i++) {
b4c3a872
AB
689 p = &tbl->pools[i];
690 spin_lock_init(&(p->lock));
691 p->start = tbl->poolsize * i;
692 p->hint = p->start;
693 p->end = p->start + tbl->poolsize;
694 }
695
696 p = &tbl->large_pool;
697 spin_lock_init(&(p->lock));
698 p->start = tbl->poolsize * i;
699 p->hint = p->start;
700 p->end = tbl->it_size;
1da177e4 701
54622f10 702 iommu_table_clear(tbl);
d3588ba9 703
1da177e4
LT
704 if (!welcomed) {
705 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
706 novmerge ? "disabled" : "enabled");
707 welcomed = 1;
708 }
709
710 return tbl;
711}
712
68d315f5 713void iommu_free_table(struct iommu_table *tbl, const char *node_name)
1da177e4 714{
c5a0809a 715 unsigned long bitmap_sz;
1da177e4
LT
716 unsigned int order;
717
8aca92d8
AK
718 if (!tbl)
719 return;
720
721 if (!tbl->it_map) {
722 kfree(tbl);
1da177e4
LT
723 return;
724 }
725
7f966d39
TLSC
726 /*
727 * In case we have reserved the first bit, we should not emit
728 * the warning below.
729 */
730 if (tbl->it_offset == 0)
731 clear_bit(0, tbl->it_map);
732
1da177e4 733 /* verify that table contains no entries */
c5a0809a
AM
734 if (!bitmap_empty(tbl->it_map, tbl->it_size))
735 pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
1da177e4
LT
736
737 /* calculate bitmap size in bytes */
c5a0809a 738 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
1da177e4
LT
739
740 /* free bitmap */
741 order = get_order(bitmap_sz);
742 free_pages((unsigned long) tbl->it_map, order);
743
744 /* free table */
745 kfree(tbl);
746}
747
748/* Creates TCEs for a user provided buffer. The user buffer must be
f9226d57
MN
749 * contiguous real kernel storage (not vmalloc). The address passed here
750 * comprises a page address and offset into that page. The dma_addr_t
751 * returned will point to the same byte within the page as was passed in.
1da177e4 752 */
f9226d57
MN
753dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
754 struct page *page, unsigned long offset, size_t size,
755 unsigned long mask, enum dma_data_direction direction,
756 struct dma_attrs *attrs)
1da177e4
LT
757{
758 dma_addr_t dma_handle = DMA_ERROR_CODE;
f9226d57 759 void *vaddr;
1da177e4 760 unsigned long uaddr;
d262c32a 761 unsigned int npages, align;
1da177e4
LT
762
763 BUG_ON(direction == DMA_NONE);
764
f9226d57 765 vaddr = page_address(page) + offset;
1da177e4 766 uaddr = (unsigned long)vaddr;
d0847757 767 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
1da177e4
LT
768
769 if (tbl) {
d262c32a 770 align = 0;
d0847757 771 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
d262c32a 772 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
d0847757 773 align = PAGE_SHIFT - tbl->it_page_shift;
d262c32a 774
fb3475e9 775 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
d0847757 776 mask >> tbl->it_page_shift, align,
4f3dd8a0 777 attrs);
1da177e4
LT
778 if (dma_handle == DMA_ERROR_CODE) {
779 if (printk_ratelimit()) {
4dfa9c47
AB
780 dev_info(dev, "iommu_alloc failed, tbl %p "
781 "vaddr %p npages %d\n", tbl, vaddr,
782 npages);
1da177e4
LT
783 }
784 } else
d0847757 785 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
1da177e4
LT
786 }
787
788 return dma_handle;
789}
790
f9226d57
MN
791void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
792 size_t size, enum dma_data_direction direction,
793 struct dma_attrs *attrs)
1da177e4 794{
5d2efba6
LV
795 unsigned int npages;
796
1da177e4
LT
797 BUG_ON(direction == DMA_NONE);
798
5d2efba6 799 if (tbl) {
d0847757
AP
800 npages = iommu_num_pages(dma_handle, size,
801 IOMMU_PAGE_SIZE(tbl));
5d2efba6
LV
802 iommu_free(tbl, dma_handle, npages);
803 }
1da177e4
LT
804}
805
806/* Allocates a contiguous real buffer and creates mappings over it.
807 * Returns the virtual address of the buffer and sets dma_handle
808 * to the dma address (mapping) of the first page.
809 */
fb3475e9
FT
810void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
811 size_t size, dma_addr_t *dma_handle,
812 unsigned long mask, gfp_t flag, int node)
1da177e4
LT
813{
814 void *ret = NULL;
815 dma_addr_t mapping;
5d2efba6
LV
816 unsigned int order;
817 unsigned int nio_pages, io_order;
8eb6c6e3 818 struct page *page;
1da177e4
LT
819
820 size = PAGE_ALIGN(size);
1da177e4
LT
821 order = get_order(size);
822
823 /*
824 * Client asked for way too much space. This is checked later
825 * anyway. It is easier to debug here for the drivers than in
826 * the tce tables.
827 */
828 if (order >= IOMAP_MAX_ORDER) {
4dfa9c47
AB
829 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
830 size);
1da177e4
LT
831 return NULL;
832 }
833
834 if (!tbl)
835 return NULL;
836
837 /* Alloc enough pages (and possibly more) */
05061354 838 page = alloc_pages_node(node, flag, order);
8eb6c6e3 839 if (!page)
1da177e4 840 return NULL;
8eb6c6e3 841 ret = page_address(page);
1da177e4
LT
842 memset(ret, 0, size);
843
844 /* Set up tces to cover the allocated range */
d0847757
AP
845 nio_pages = size >> tbl->it_page_shift;
846 io_order = get_iommu_order(size, tbl);
fb3475e9 847 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
d0847757 848 mask >> tbl->it_page_shift, io_order, NULL);
1da177e4
LT
849 if (mapping == DMA_ERROR_CODE) {
850 free_pages((unsigned long)ret, order);
8eb6c6e3
CH
851 return NULL;
852 }
853 *dma_handle = mapping;
1da177e4
LT
854 return ret;
855}
856
857void iommu_free_coherent(struct iommu_table *tbl, size_t size,
858 void *vaddr, dma_addr_t dma_handle)
859{
1da177e4 860 if (tbl) {
5d2efba6
LV
861 unsigned int nio_pages;
862
863 size = PAGE_ALIGN(size);
d0847757 864 nio_pages = size >> tbl->it_page_shift;
5d2efba6 865 iommu_free(tbl, dma_handle, nio_pages);
1da177e4 866 size = PAGE_ALIGN(size);
1da177e4
LT
867 free_pages((unsigned long)vaddr, get_order(size));
868 }
869}
4e13c1ac 870
10b35b2b
AK
871unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
872{
873 switch (dir) {
874 case DMA_BIDIRECTIONAL:
875 return TCE_PCI_READ | TCE_PCI_WRITE;
876 case DMA_FROM_DEVICE:
877 return TCE_PCI_WRITE;
878 case DMA_TO_DEVICE:
879 return TCE_PCI_READ;
880 default:
881 return 0;
882 }
883}
884EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
885
4e13c1ac
AK
886#ifdef CONFIG_IOMMU_API
887/*
888 * SPAPR TCE API
889 */
890static void group_release(void *iommu_data)
891{
b348aa65
AK
892 struct iommu_table_group *table_group = iommu_data;
893
894 table_group->group = NULL;
4e13c1ac
AK
895}
896
b348aa65 897void iommu_register_group(struct iommu_table_group *table_group,
4e13c1ac
AK
898 int pci_domain_number, unsigned long pe_num)
899{
900 struct iommu_group *grp;
901 char *name;
902
903 grp = iommu_group_alloc();
904 if (IS_ERR(grp)) {
905 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
906 PTR_ERR(grp));
907 return;
908 }
b348aa65
AK
909 table_group->group = grp;
910 iommu_group_set_iommudata(grp, table_group, group_release);
4e13c1ac
AK
911 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
912 pci_domain_number, pe_num);
913 if (!name)
914 return;
915 iommu_group_set_name(grp, name);
916 kfree(name);
917}
918
919enum dma_data_direction iommu_tce_direction(unsigned long tce)
920{
921 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
922 return DMA_BIDIRECTIONAL;
923 else if (tce & TCE_PCI_READ)
924 return DMA_TO_DEVICE;
925 else if (tce & TCE_PCI_WRITE)
926 return DMA_FROM_DEVICE;
927 else
928 return DMA_NONE;
929}
930EXPORT_SYMBOL_GPL(iommu_tce_direction);
931
932void iommu_flush_tce(struct iommu_table *tbl)
933{
934 /* Flush/invalidate TLB caches if necessary */
da004c36
AK
935 if (tbl->it_ops->flush)
936 tbl->it_ops->flush(tbl);
4e13c1ac
AK
937
938 /* Make sure updates are seen by hardware */
939 mb();
940}
941EXPORT_SYMBOL_GPL(iommu_flush_tce);
942
943int iommu_tce_clear_param_check(struct iommu_table *tbl,
944 unsigned long ioba, unsigned long tce_value,
945 unsigned long npages)
946{
da004c36 947 /* tbl->it_ops->clear() does not support any value but 0 */
4e13c1ac
AK
948 if (tce_value)
949 return -EINVAL;
950
d0847757 951 if (ioba & ~IOMMU_PAGE_MASK(tbl))
4e13c1ac
AK
952 return -EINVAL;
953
d0847757 954 ioba >>= tbl->it_page_shift;
4e13c1ac
AK
955 if (ioba < tbl->it_offset)
956 return -EINVAL;
957
958 if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
959 return -EINVAL;
960
961 return 0;
962}
963EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
964
965int iommu_tce_put_param_check(struct iommu_table *tbl,
966 unsigned long ioba, unsigned long tce)
967{
05c6cfb9 968 if (tce & ~IOMMU_PAGE_MASK(tbl))
4e13c1ac
AK
969 return -EINVAL;
970
d0847757 971 if (ioba & ~IOMMU_PAGE_MASK(tbl))
4e13c1ac
AK
972 return -EINVAL;
973
d0847757 974 ioba >>= tbl->it_page_shift;
4e13c1ac
AK
975 if (ioba < tbl->it_offset)
976 return -EINVAL;
977
978 if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
979 return -EINVAL;
980
981 return 0;
982}
983EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
984
05c6cfb9
AK
985long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
986 unsigned long *hpa, enum dma_data_direction *direction)
4e13c1ac 987{
05c6cfb9 988 long ret;
4e13c1ac 989
05c6cfb9 990 ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
4e13c1ac 991
05c6cfb9
AK
992 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
993 (*direction == DMA_BIDIRECTIONAL)))
994 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
4e13c1ac
AK
995
996 /* if (unlikely(ret))
997 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
84f1966e 998 __func__, hwaddr, entry << tbl->it_page_shift,
4e13c1ac
AK
999 hwaddr, ret); */
1000
1001 return ret;
1002}
05c6cfb9 1003EXPORT_SYMBOL_GPL(iommu_tce_xchg);
4e13c1ac 1004
4e13c1ac
AK
1005int iommu_take_ownership(struct iommu_table *tbl)
1006{
b82c75bf
AK
1007 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1008 int ret = 0;
1009
05c6cfb9
AK
1010 /*
1011 * VFIO does not control TCE entries allocation and the guest
1012 * can write new TCEs on top of existing ones so iommu_tce_build()
1013 * must be able to release old pages. This functionality
1014 * requires exchange() callback defined so if it is not
1015 * implemented, we disallow taking ownership over the table.
1016 */
1017 if (!tbl->it_ops->exchange)
1018 return -EINVAL;
1019
b82c75bf
AK
1020 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1021 for (i = 0; i < tbl->nr_pools; i++)
1022 spin_lock(&tbl->pools[i].lock);
4e13c1ac
AK
1023
1024 if (tbl->it_offset == 0)
1025 clear_bit(0, tbl->it_map);
1026
1027 if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1028 pr_err("iommu_tce: it_map is not empty");
b82c75bf
AK
1029 ret = -EBUSY;
1030 /* Restore bit#0 set by iommu_init_table() */
1031 if (tbl->it_offset == 0)
1032 set_bit(0, tbl->it_map);
1033 } else {
1034 memset(tbl->it_map, 0xff, sz);
4e13c1ac
AK
1035 }
1036
b82c75bf
AK
1037 for (i = 0; i < tbl->nr_pools; i++)
1038 spin_unlock(&tbl->pools[i].lock);
1039 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
4e13c1ac 1040
b82c75bf 1041 return ret;
4e13c1ac
AK
1042}
1043EXPORT_SYMBOL_GPL(iommu_take_ownership);
1044
1045void iommu_release_ownership(struct iommu_table *tbl)
1046{
b82c75bf
AK
1047 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1048
1049 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1050 for (i = 0; i < tbl->nr_pools; i++)
1051 spin_lock(&tbl->pools[i].lock);
4e13c1ac 1052
4e13c1ac
AK
1053 memset(tbl->it_map, 0, sz);
1054
1055 /* Restore bit#0 set by iommu_init_table() */
1056 if (tbl->it_offset == 0)
1057 set_bit(0, tbl->it_map);
b82c75bf
AK
1058
1059 for (i = 0; i < tbl->nr_pools; i++)
1060 spin_unlock(&tbl->pools[i].lock);
1061 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
4e13c1ac
AK
1062}
1063EXPORT_SYMBOL_GPL(iommu_release_ownership);
1064
d905c5df 1065int iommu_add_device(struct device *dev)
4e13c1ac
AK
1066{
1067 struct iommu_table *tbl;
0eaf4def 1068 struct iommu_table_group_link *tgl;
4e13c1ac 1069
763fe0ad
GS
1070 /*
1071 * The sysfs entries should be populated before
1072 * binding IOMMU group. If sysfs entries isn't
1073 * ready, we simply bail.
1074 */
1075 if (!device_is_registered(dev))
1076 return -ENOENT;
1077
1078 if (dev->iommu_group) {
1079 pr_debug("%s: Skipping device %s with iommu group %d\n",
1080 __func__, dev_name(dev),
1081 iommu_group_id(dev->iommu_group));
4e13c1ac
AK
1082 return -EBUSY;
1083 }
1084
1085 tbl = get_iommu_table_base(dev);
0eaf4def 1086 if (!tbl) {
763fe0ad
GS
1087 pr_debug("%s: Skipping device %s with no tbl\n",
1088 __func__, dev_name(dev));
4e13c1ac
AK
1089 return 0;
1090 }
1091
0eaf4def
AK
1092 tgl = list_first_entry_or_null(&tbl->it_group_list,
1093 struct iommu_table_group_link, next);
1094 if (!tgl) {
1095 pr_debug("%s: Skipping device %s with no group\n",
1096 __func__, dev_name(dev));
1097 return 0;
1098 }
763fe0ad
GS
1099 pr_debug("%s: Adding %s to iommu group %d\n",
1100 __func__, dev_name(dev),
0eaf4def 1101 iommu_group_id(tgl->table_group->group));
4e13c1ac 1102
d0847757 1103 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
763fe0ad
GS
1104 pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
1105 __func__, IOMMU_PAGE_SIZE(tbl),
1106 PAGE_SIZE, dev_name(dev));
d0847757
AP
1107 return -EINVAL;
1108 }
1109
0eaf4def 1110 return iommu_group_add_device(tgl->table_group->group, dev);
4e13c1ac 1111}
d905c5df 1112EXPORT_SYMBOL_GPL(iommu_add_device);
4e13c1ac 1113
d905c5df 1114void iommu_del_device(struct device *dev)
4e13c1ac 1115{
0c4b9e27
GS
1116 /*
1117 * Some devices might not have IOMMU table and group
1118 * and we needn't detach them from the associated
1119 * IOMMU groups
1120 */
1121 if (!dev->iommu_group) {
1122 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1123 dev_name(dev));
1124 return;
1125 }
1126
4e13c1ac
AK
1127 iommu_group_remove_device(dev);
1128}
d905c5df 1129EXPORT_SYMBOL_GPL(iommu_del_device);
4e13c1ac 1130
4ad04e59
NA
1131static int tce_iommu_bus_notifier(struct notifier_block *nb,
1132 unsigned long action, void *data)
1133{
1134 struct device *dev = data;
1135
1136 switch (action) {
1137 case BUS_NOTIFY_ADD_DEVICE:
1138 return iommu_add_device(dev);
1139 case BUS_NOTIFY_DEL_DEVICE:
1140 if (dev->iommu_group)
1141 iommu_del_device(dev);
1142 return 0;
1143 default:
1144 return 0;
1145 }
1146}
1147
1148static struct notifier_block tce_iommu_bus_nb = {
1149 .notifier_call = tce_iommu_bus_notifier,
1150};
1151
1152int __init tce_iommu_bus_notifier_init(void)
1153{
1154 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1155 return 0;
1156}
4e13c1ac 1157#endif /* CONFIG_IOMMU_API */
This page took 1.172259 seconds and 5 git commands to generate.