2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/iommu-helper.h>
37 #include <asm/iommu.h>
38 #include <asm/pci-bridge.h>
39 #include <asm/machdep.h>
40 #include <asm/kdump.h>
44 #ifdef CONFIG_IOMMU_VMERGE
45 static int novmerge
= 0;
47 static int novmerge
= 1;
50 static int protect4gb
= 1;
52 static void __iommu_free(struct iommu_table
*, dma_addr_t
, unsigned int);
54 static inline unsigned long iommu_num_pages(unsigned long vaddr
,
59 npages
= IOMMU_PAGE_ALIGN(vaddr
+ slen
) - (vaddr
& IOMMU_PAGE_MASK
);
60 npages
>>= IOMMU_PAGE_SHIFT
;
65 static int __init
setup_protect4gb(char *str
)
67 if (strcmp(str
, "on") == 0)
69 else if (strcmp(str
, "off") == 0)
75 static int __init
setup_iommu(char *str
)
77 if (!strcmp(str
, "novmerge"))
79 else if (!strcmp(str
, "vmerge"))
84 __setup("protect4gb=", setup_protect4gb
);
85 __setup("iommu=", setup_iommu
);
87 static unsigned long iommu_range_alloc(struct device
*dev
,
88 struct iommu_table
*tbl
,
90 unsigned long *handle
,
92 unsigned int align_order
)
94 unsigned long n
, end
, start
;
96 int largealloc
= npages
> 15;
98 unsigned long align_mask
;
99 unsigned long boundary_size
;
101 align_mask
= 0xffffffffffffffffl
>> (64 - align_order
);
103 /* This allocator was derived from x86_64's bit string search */
106 if (unlikely(npages
== 0)) {
107 if (printk_ratelimit())
109 return DMA_ERROR_CODE
;
112 if (handle
&& *handle
)
115 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
117 /* Use only half of the table for small allocs (15 pages or less) */
118 limit
= largealloc
? tbl
->it_size
: tbl
->it_halfpoint
;
120 if (largealloc
&& start
< tbl
->it_halfpoint
)
121 start
= tbl
->it_halfpoint
;
123 /* The case below can happen if we have a small segment appended
124 * to a large, or when the previous alloc was at the very end of
125 * the available space. If so, go back to the initial start.
128 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
132 if (limit
+ tbl
->it_offset
> mask
) {
133 limit
= mask
- tbl
->it_offset
+ 1;
134 /* If we're constrained on address range, first try
135 * at the masked hint to avoid O(n) search complexity,
136 * but on second pass, start at 0.
138 if ((start
& mask
) >= limit
|| pass
> 0)
145 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
146 1 << IOMMU_PAGE_SHIFT
);
148 boundary_size
= ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT
);
149 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
151 n
= iommu_area_alloc(tbl
->it_map
, limit
, start
, npages
,
152 tbl
->it_offset
, boundary_size
>> IOMMU_PAGE_SHIFT
,
155 if (likely(pass
< 2)) {
156 /* First failure, just rescan the half of the table.
157 * Second failure, rescan the other half of the table.
159 start
= (largealloc
^ pass
) ? tbl
->it_halfpoint
: 0;
160 limit
= pass
? tbl
->it_size
: limit
;
164 /* Third failure, give up */
165 return DMA_ERROR_CODE
;
171 /* Bump the hint to a new block for small allocs. */
173 /* Don't bump to new block to avoid fragmentation */
174 tbl
->it_largehint
= end
;
176 /* Overflow will be taken care of at the next allocation */
177 tbl
->it_hint
= (end
+ tbl
->it_blocksize
- 1) &
178 ~(tbl
->it_blocksize
- 1);
181 /* Update handle for SG allocations */
188 static dma_addr_t
iommu_alloc(struct device
*dev
, struct iommu_table
*tbl
,
189 void *page
, unsigned int npages
,
190 enum dma_data_direction direction
,
191 unsigned long mask
, unsigned int align_order
,
192 struct dma_attrs
*attrs
)
194 unsigned long entry
, flags
;
195 dma_addr_t ret
= DMA_ERROR_CODE
;
198 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
200 entry
= iommu_range_alloc(dev
, tbl
, npages
, NULL
, mask
, align_order
);
202 if (unlikely(entry
== DMA_ERROR_CODE
)) {
203 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
204 return DMA_ERROR_CODE
;
207 entry
+= tbl
->it_offset
; /* Offset into real TCE table */
208 ret
= entry
<< IOMMU_PAGE_SHIFT
; /* Set the return dma address */
210 /* Put the TCEs in the HW table */
211 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
212 (unsigned long)page
& IOMMU_PAGE_MASK
,
215 /* ppc_md.tce_build() only returns non-zero for transient errors.
216 * Clean up the table bitmap in this case and return
217 * DMA_ERROR_CODE. For all other errors the functionality is
220 if (unlikely(build_fail
)) {
221 __iommu_free(tbl
, ret
, npages
);
223 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
224 return DMA_ERROR_CODE
;
227 /* Flush/invalidate TLB caches if necessary */
228 if (ppc_md
.tce_flush
)
229 ppc_md
.tce_flush(tbl
);
231 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
233 /* Make sure updates are seen by hardware */
239 static void __iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
242 unsigned long entry
, free_entry
;
244 entry
= dma_addr
>> IOMMU_PAGE_SHIFT
;
245 free_entry
= entry
- tbl
->it_offset
;
247 if (((free_entry
+ npages
) > tbl
->it_size
) ||
248 (entry
< tbl
->it_offset
)) {
249 if (printk_ratelimit()) {
250 printk(KERN_INFO
"iommu_free: invalid entry\n");
251 printk(KERN_INFO
"\tentry = 0x%lx\n", entry
);
252 printk(KERN_INFO
"\tdma_addr = 0x%lx\n", (u64
)dma_addr
);
253 printk(KERN_INFO
"\tTable = 0x%lx\n", (u64
)tbl
);
254 printk(KERN_INFO
"\tbus# = 0x%lx\n", (u64
)tbl
->it_busno
);
255 printk(KERN_INFO
"\tsize = 0x%lx\n", (u64
)tbl
->it_size
);
256 printk(KERN_INFO
"\tstartOff = 0x%lx\n", (u64
)tbl
->it_offset
);
257 printk(KERN_INFO
"\tindex = 0x%lx\n", (u64
)tbl
->it_index
);
263 ppc_md
.tce_free(tbl
, entry
, npages
);
264 iommu_area_free(tbl
->it_map
, free_entry
, npages
);
267 static void iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
272 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
274 __iommu_free(tbl
, dma_addr
, npages
);
276 /* Make sure TLB cache is flushed if the HW needs it. We do
277 * not do an mb() here on purpose, it is not needed on any of
278 * the current platforms.
280 if (ppc_md
.tce_flush
)
281 ppc_md
.tce_flush(tbl
);
283 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
286 int iommu_map_sg(struct device
*dev
, struct iommu_table
*tbl
,
287 struct scatterlist
*sglist
, int nelems
,
288 unsigned long mask
, enum dma_data_direction direction
,
289 struct dma_attrs
*attrs
)
291 dma_addr_t dma_next
= 0, dma_addr
;
293 struct scatterlist
*s
, *outs
, *segstart
;
294 int outcount
, incount
, i
, build_fail
= 0;
296 unsigned long handle
;
297 unsigned int max_seg_size
;
299 BUG_ON(direction
== DMA_NONE
);
301 if ((nelems
== 0) || !tbl
)
304 outs
= s
= segstart
= &sglist
[0];
309 /* Init first segment length for backout at failure */
310 outs
->dma_length
= 0;
312 DBG("sg mapping %d elements:\n", nelems
);
314 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
316 max_seg_size
= dma_get_max_seg_size(dev
);
317 for_each_sg(sglist
, s
, nelems
, i
) {
318 unsigned long vaddr
, npages
, entry
, slen
;
326 /* Allocate iommu entries for that segment */
327 vaddr
= (unsigned long) sg_virt(s
);
328 npages
= iommu_num_pages(vaddr
, slen
);
330 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& slen
>= PAGE_SIZE
&&
331 (vaddr
& ~PAGE_MASK
) == 0)
332 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
333 entry
= iommu_range_alloc(dev
, tbl
, npages
, &handle
,
334 mask
>> IOMMU_PAGE_SHIFT
, align
);
336 DBG(" - vaddr: %lx, size: %lx\n", vaddr
, slen
);
339 if (unlikely(entry
== DMA_ERROR_CODE
)) {
340 if (printk_ratelimit())
341 printk(KERN_INFO
"iommu_alloc failed, tbl %p vaddr %lx"
342 " npages %lx\n", tbl
, vaddr
, npages
);
346 /* Convert entry to a dma_addr_t */
347 entry
+= tbl
->it_offset
;
348 dma_addr
= entry
<< IOMMU_PAGE_SHIFT
;
349 dma_addr
|= (s
->offset
& ~IOMMU_PAGE_MASK
);
351 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
352 npages
, entry
, dma_addr
);
354 /* Insert into HW table */
355 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
356 vaddr
& IOMMU_PAGE_MASK
,
358 if(unlikely(build_fail
))
361 /* If we are in an open segment, try merging */
363 DBG(" - trying merge...\n");
364 /* We cannot merge if:
365 * - allocated dma_addr isn't contiguous to previous allocation
367 if (novmerge
|| (dma_addr
!= dma_next
) ||
368 (outs
->dma_length
+ s
->length
> max_seg_size
)) {
369 /* Can't merge: create a new segment */
372 outs
= sg_next(outs
);
373 DBG(" can't merge, new segment.\n");
375 outs
->dma_length
+= s
->length
;
376 DBG(" merged, new len: %ux\n", outs
->dma_length
);
381 /* This is a new segment, fill entries */
382 DBG(" - filling new segment.\n");
383 outs
->dma_address
= dma_addr
;
384 outs
->dma_length
= slen
;
387 /* Calculate next page pointer for contiguous check */
388 dma_next
= dma_addr
+ slen
;
390 DBG(" - dma next is: %lx\n", dma_next
);
393 /* Flush/invalidate TLB caches if necessary */
394 if (ppc_md
.tce_flush
)
395 ppc_md
.tce_flush(tbl
);
397 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
399 DBG("mapped %d elements:\n", outcount
);
401 /* For the sake of iommu_unmap_sg, we clear out the length in the
402 * next entry of the sglist if we didn't fill the list completely
404 if (outcount
< incount
) {
405 outs
= sg_next(outs
);
406 outs
->dma_address
= DMA_ERROR_CODE
;
407 outs
->dma_length
= 0;
410 /* Make sure updates are seen by hardware */
416 for_each_sg(sglist
, s
, nelems
, i
) {
417 if (s
->dma_length
!= 0) {
418 unsigned long vaddr
, npages
;
420 vaddr
= s
->dma_address
& IOMMU_PAGE_MASK
;
421 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
);
422 __iommu_free(tbl
, vaddr
, npages
);
423 s
->dma_address
= DMA_ERROR_CODE
;
429 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
434 void iommu_unmap_sg(struct iommu_table
*tbl
, struct scatterlist
*sglist
,
435 int nelems
, enum dma_data_direction direction
,
436 struct dma_attrs
*attrs
)
438 struct scatterlist
*sg
;
441 BUG_ON(direction
== DMA_NONE
);
446 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
451 dma_addr_t dma_handle
= sg
->dma_address
;
453 if (sg
->dma_length
== 0)
455 npages
= iommu_num_pages(dma_handle
, sg
->dma_length
);
456 __iommu_free(tbl
, dma_handle
, npages
);
460 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
461 * do not do an mb() here, the affected platforms do not need it
464 if (ppc_md
.tce_flush
)
465 ppc_md
.tce_flush(tbl
);
467 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
471 * Build a iommu_table structure. This contains a bit map which
472 * is used to manage allocation of the tce space.
474 struct iommu_table
*iommu_init_table(struct iommu_table
*tbl
, int nid
)
477 static int welcomed
= 0;
480 /* Set aside 1/4 of the table for large allocations. */
481 tbl
->it_halfpoint
= tbl
->it_size
* 3 / 4;
483 /* number of bytes needed for the bitmap */
484 sz
= (tbl
->it_size
+ 7) >> 3;
486 page
= alloc_pages_node(nid
, GFP_ATOMIC
, get_order(sz
));
488 panic("iommu_init_table: Can't allocate %ld bytes\n", sz
);
489 tbl
->it_map
= page_address(page
);
490 memset(tbl
->it_map
, 0, sz
);
493 tbl
->it_largehint
= tbl
->it_halfpoint
;
494 spin_lock_init(&tbl
->it_lock
);
496 #ifdef CONFIG_CRASH_DUMP
497 if (ppc_md
.tce_get
) {
499 unsigned long tceval
;
500 unsigned long tcecount
= 0;
503 * Reserve the existing mappings left by the first kernel.
505 for (index
= 0; index
< tbl
->it_size
; index
++) {
506 tceval
= ppc_md
.tce_get(tbl
, index
+ tbl
->it_offset
);
508 * Freed TCE entry contains 0x7fffffffffffffff on JS20
510 if (tceval
&& (tceval
!= 0x7fffffffffffffffUL
)) {
511 __set_bit(index
, tbl
->it_map
);
515 if ((tbl
->it_size
- tcecount
) < KDUMP_MIN_TCE_ENTRIES
) {
516 printk(KERN_WARNING
"TCE table is full; ");
517 printk(KERN_WARNING
"freeing %d entries for the kdump boot\n",
518 KDUMP_MIN_TCE_ENTRIES
);
519 for (index
= tbl
->it_size
- KDUMP_MIN_TCE_ENTRIES
;
520 index
< tbl
->it_size
; index
++)
521 __clear_bit(index
, tbl
->it_map
);
525 /* Clear the hardware table in case firmware left allocations in it */
526 ppc_md
.tce_free(tbl
, tbl
->it_offset
, tbl
->it_size
);
530 printk(KERN_INFO
"IOMMU table initialized, virtual merging %s\n",
531 novmerge
? "disabled" : "enabled");
538 void iommu_free_table(struct iommu_table
*tbl
, const char *node_name
)
540 unsigned long bitmap_sz
, i
;
543 if (!tbl
|| !tbl
->it_map
) {
544 printk(KERN_ERR
"%s: expected TCE map for %s\n", __func__
,
549 /* verify that table contains no entries */
550 /* it_size is in entries, and we're examining 64 at a time */
551 for (i
= 0; i
< (tbl
->it_size
/64); i
++) {
552 if (tbl
->it_map
[i
] != 0) {
553 printk(KERN_WARNING
"%s: Unexpected TCEs for %s\n",
554 __func__
, node_name
);
559 /* calculate bitmap size in bytes */
560 bitmap_sz
= (tbl
->it_size
+ 7) / 8;
563 order
= get_order(bitmap_sz
);
564 free_pages((unsigned long) tbl
->it_map
, order
);
570 /* Creates TCEs for a user provided buffer. The user buffer must be
571 * contiguous real kernel storage (not vmalloc). The address of the buffer
572 * passed here is the kernel (virtual) address of the buffer. The buffer
573 * need not be page aligned, the dma_addr_t returned will point to the same
574 * byte within the page as vaddr.
576 dma_addr_t
iommu_map_single(struct device
*dev
, struct iommu_table
*tbl
,
577 void *vaddr
, size_t size
, unsigned long mask
,
578 enum dma_data_direction direction
, struct dma_attrs
*attrs
)
580 dma_addr_t dma_handle
= DMA_ERROR_CODE
;
582 unsigned int npages
, align
;
584 BUG_ON(direction
== DMA_NONE
);
586 uaddr
= (unsigned long)vaddr
;
587 npages
= iommu_num_pages(uaddr
, size
);
591 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& size
>= PAGE_SIZE
&&
592 ((unsigned long)vaddr
& ~PAGE_MASK
) == 0)
593 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
595 dma_handle
= iommu_alloc(dev
, tbl
, vaddr
, npages
, direction
,
596 mask
>> IOMMU_PAGE_SHIFT
, align
,
598 if (dma_handle
== DMA_ERROR_CODE
) {
599 if (printk_ratelimit()) {
600 printk(KERN_INFO
"iommu_alloc failed, "
601 "tbl %p vaddr %p npages %d\n",
605 dma_handle
|= (uaddr
& ~IOMMU_PAGE_MASK
);
611 void iommu_unmap_single(struct iommu_table
*tbl
, dma_addr_t dma_handle
,
612 size_t size
, enum dma_data_direction direction
,
613 struct dma_attrs
*attrs
)
617 BUG_ON(direction
== DMA_NONE
);
620 npages
= iommu_num_pages(dma_handle
, size
);
621 iommu_free(tbl
, dma_handle
, npages
);
625 /* Allocates a contiguous real buffer and creates mappings over it.
626 * Returns the virtual address of the buffer and sets dma_handle
627 * to the dma address (mapping) of the first page.
629 void *iommu_alloc_coherent(struct device
*dev
, struct iommu_table
*tbl
,
630 size_t size
, dma_addr_t
*dma_handle
,
631 unsigned long mask
, gfp_t flag
, int node
)
636 unsigned int nio_pages
, io_order
;
639 size
= PAGE_ALIGN(size
);
640 order
= get_order(size
);
643 * Client asked for way too much space. This is checked later
644 * anyway. It is easier to debug here for the drivers than in
647 if (order
>= IOMAP_MAX_ORDER
) {
648 printk("iommu_alloc_consistent size too large: 0x%lx\n", size
);
655 /* Alloc enough pages (and possibly more) */
656 page
= alloc_pages_node(node
, flag
, order
);
659 ret
= page_address(page
);
660 memset(ret
, 0, size
);
662 /* Set up tces to cover the allocated range */
663 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
664 io_order
= get_iommu_order(size
);
665 mapping
= iommu_alloc(dev
, tbl
, ret
, nio_pages
, DMA_BIDIRECTIONAL
,
666 mask
>> IOMMU_PAGE_SHIFT
, io_order
, NULL
);
667 if (mapping
== DMA_ERROR_CODE
) {
668 free_pages((unsigned long)ret
, order
);
671 *dma_handle
= mapping
;
675 void iommu_free_coherent(struct iommu_table
*tbl
, size_t size
,
676 void *vaddr
, dma_addr_t dma_handle
)
679 unsigned int nio_pages
;
681 size
= PAGE_ALIGN(size
);
682 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
683 iommu_free(tbl
, dma_handle
, nio_pages
);
684 size
= PAGE_ALIGN(size
);
685 free_pages((unsigned long)vaddr
, get_order(size
));