Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
3 | * | |
4 | * Rewrite, cleanup, new allocation schemes, virtual merging: | |
5 | * Copyright (C) 2004 Olof Johansson, IBM Corporation | |
6 | * and Ben. Herrenschmidt, IBM Corporation | |
7 | * | |
8 | * Dynamic DMA mapping support, bus-independent parts. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
23 | */ | |
24 | ||
25 | ||
1da177e4 LT |
26 | #include <linux/init.h> |
27 | #include <linux/types.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/spinlock.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/dma-mapping.h> | |
a66022c4 | 33 | #include <linux/bitmap.h> |
fb3475e9 | 34 | #include <linux/iommu-helper.h> |
62a8bd6c | 35 | #include <linux/crash_dump.h> |
1da177e4 LT |
36 | #include <asm/io.h> |
37 | #include <asm/prom.h> | |
38 | #include <asm/iommu.h> | |
39 | #include <asm/pci-bridge.h> | |
40 | #include <asm/machdep.h> | |
5f50867b | 41 | #include <asm/kdump.h> |
3ccc00a7 | 42 | #include <asm/fadump.h> |
1da177e4 LT |
43 | |
44 | #define DBG(...) | |
45 | ||
191aee58 | 46 | static int novmerge; |
56997559 | 47 | |
6490c490 RJ |
48 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); |
49 | ||
1da177e4 LT |
50 | static int __init setup_iommu(char *str) |
51 | { | |
52 | if (!strcmp(str, "novmerge")) | |
53 | novmerge = 1; | |
54 | else if (!strcmp(str, "vmerge")) | |
55 | novmerge = 0; | |
56 | return 1; | |
57 | } | |
58 | ||
59 | __setup("iommu=", setup_iommu); | |
60 | ||
fb3475e9 FT |
61 | static unsigned long iommu_range_alloc(struct device *dev, |
62 | struct iommu_table *tbl, | |
1da177e4 LT |
63 | unsigned long npages, |
64 | unsigned long *handle, | |
7daa411b | 65 | unsigned long mask, |
1da177e4 LT |
66 | unsigned int align_order) |
67 | { | |
fb3475e9 | 68 | unsigned long n, end, start; |
1da177e4 LT |
69 | unsigned long limit; |
70 | int largealloc = npages > 15; | |
71 | int pass = 0; | |
72 | unsigned long align_mask; | |
fb3475e9 | 73 | unsigned long boundary_size; |
1da177e4 LT |
74 | |
75 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | |
76 | ||
77 | /* This allocator was derived from x86_64's bit string search */ | |
78 | ||
79 | /* Sanity check */ | |
13a2eea1 | 80 | if (unlikely(npages == 0)) { |
1da177e4 LT |
81 | if (printk_ratelimit()) |
82 | WARN_ON(1); | |
83 | return DMA_ERROR_CODE; | |
84 | } | |
85 | ||
86 | if (handle && *handle) | |
87 | start = *handle; | |
88 | else | |
89 | start = largealloc ? tbl->it_largehint : tbl->it_hint; | |
90 | ||
91 | /* Use only half of the table for small allocs (15 pages or less) */ | |
92 | limit = largealloc ? tbl->it_size : tbl->it_halfpoint; | |
93 | ||
94 | if (largealloc && start < tbl->it_halfpoint) | |
95 | start = tbl->it_halfpoint; | |
96 | ||
97 | /* The case below can happen if we have a small segment appended | |
98 | * to a large, or when the previous alloc was at the very end of | |
99 | * the available space. If so, go back to the initial start. | |
100 | */ | |
101 | if (start >= limit) | |
102 | start = largealloc ? tbl->it_largehint : tbl->it_hint; | |
7daa411b | 103 | |
1da177e4 LT |
104 | again: |
105 | ||
7daa411b OJ |
106 | if (limit + tbl->it_offset > mask) { |
107 | limit = mask - tbl->it_offset + 1; | |
108 | /* If we're constrained on address range, first try | |
109 | * at the masked hint to avoid O(n) search complexity, | |
110 | * but on second pass, start at 0. | |
111 | */ | |
112 | if ((start & mask) >= limit || pass > 0) | |
113 | start = 0; | |
114 | else | |
115 | start &= mask; | |
116 | } | |
117 | ||
fb3475e9 FT |
118 | if (dev) |
119 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | |
120 | 1 << IOMMU_PAGE_SHIFT); | |
121 | else | |
122 | boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); | |
123 | /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ | |
1da177e4 | 124 | |
fb3475e9 FT |
125 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, |
126 | tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, | |
127 | align_mask); | |
128 | if (n == -1) { | |
1da177e4 LT |
129 | if (likely(pass < 2)) { |
130 | /* First failure, just rescan the half of the table. | |
131 | * Second failure, rescan the other half of the table. | |
132 | */ | |
133 | start = (largealloc ^ pass) ? tbl->it_halfpoint : 0; | |
134 | limit = pass ? tbl->it_size : limit; | |
135 | pass++; | |
136 | goto again; | |
137 | } else { | |
138 | /* Third failure, give up */ | |
139 | return DMA_ERROR_CODE; | |
140 | } | |
141 | } | |
142 | ||
fb3475e9 | 143 | end = n + npages; |
1da177e4 LT |
144 | |
145 | /* Bump the hint to a new block for small allocs. */ | |
146 | if (largealloc) { | |
147 | /* Don't bump to new block to avoid fragmentation */ | |
148 | tbl->it_largehint = end; | |
149 | } else { | |
150 | /* Overflow will be taken care of at the next allocation */ | |
151 | tbl->it_hint = (end + tbl->it_blocksize - 1) & | |
152 | ~(tbl->it_blocksize - 1); | |
153 | } | |
154 | ||
155 | /* Update handle for SG allocations */ | |
156 | if (handle) | |
157 | *handle = end; | |
158 | ||
159 | return n; | |
160 | } | |
161 | ||
fb3475e9 FT |
162 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, |
163 | void *page, unsigned int npages, | |
164 | enum dma_data_direction direction, | |
4f3dd8a0 MN |
165 | unsigned long mask, unsigned int align_order, |
166 | struct dma_attrs *attrs) | |
1da177e4 LT |
167 | { |
168 | unsigned long entry, flags; | |
169 | dma_addr_t ret = DMA_ERROR_CODE; | |
6490c490 | 170 | int build_fail; |
7daa411b | 171 | |
1da177e4 | 172 | spin_lock_irqsave(&(tbl->it_lock), flags); |
fb3475e9 | 173 | entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); |
0e4bc95d | 174 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
1da177e4 | 175 | |
0e4bc95d | 176 | if (unlikely(entry == DMA_ERROR_CODE)) |
1da177e4 | 177 | return DMA_ERROR_CODE; |
1da177e4 LT |
178 | |
179 | entry += tbl->it_offset; /* Offset into real TCE table */ | |
5d2efba6 | 180 | ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ |
1da177e4 LT |
181 | |
182 | /* Put the TCEs in the HW table */ | |
6490c490 RJ |
183 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
184 | (unsigned long)page & IOMMU_PAGE_MASK, | |
185 | direction, attrs); | |
186 | ||
187 | /* ppc_md.tce_build() only returns non-zero for transient errors. | |
188 | * Clean up the table bitmap in this case and return | |
189 | * DMA_ERROR_CODE. For all other errors the functionality is | |
190 | * not altered. | |
191 | */ | |
192 | if (unlikely(build_fail)) { | |
0e4bc95d | 193 | spin_lock_irqsave(&(tbl->it_lock), flags); |
6490c490 | 194 | __iommu_free(tbl, ret, npages); |
6490c490 | 195 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
0e4bc95d | 196 | |
6490c490 RJ |
197 | return DMA_ERROR_CODE; |
198 | } | |
1da177e4 LT |
199 | |
200 | /* Flush/invalidate TLB caches if necessary */ | |
201 | if (ppc_md.tce_flush) | |
202 | ppc_md.tce_flush(tbl); | |
203 | ||
1da177e4 LT |
204 | /* Make sure updates are seen by hardware */ |
205 | mb(); | |
206 | ||
207 | return ret; | |
208 | } | |
209 | ||
210 | static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |
211 | unsigned int npages) | |
212 | { | |
213 | unsigned long entry, free_entry; | |
1da177e4 | 214 | |
5d2efba6 | 215 | entry = dma_addr >> IOMMU_PAGE_SHIFT; |
1da177e4 LT |
216 | free_entry = entry - tbl->it_offset; |
217 | ||
218 | if (((free_entry + npages) > tbl->it_size) || | |
219 | (entry < tbl->it_offset)) { | |
220 | if (printk_ratelimit()) { | |
221 | printk(KERN_INFO "iommu_free: invalid entry\n"); | |
222 | printk(KERN_INFO "\tentry = 0x%lx\n", entry); | |
fe333321 IM |
223 | printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); |
224 | printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); | |
225 | printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); | |
226 | printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); | |
227 | printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); | |
228 | printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); | |
1da177e4 LT |
229 | WARN_ON(1); |
230 | } | |
231 | return; | |
232 | } | |
233 | ||
234 | ppc_md.tce_free(tbl, entry, npages); | |
a66022c4 | 235 | bitmap_clear(tbl->it_map, free_entry, npages); |
1da177e4 LT |
236 | } |
237 | ||
238 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |
239 | unsigned int npages) | |
240 | { | |
241 | unsigned long flags; | |
242 | ||
243 | spin_lock_irqsave(&(tbl->it_lock), flags); | |
1da177e4 | 244 | __iommu_free(tbl, dma_addr, npages); |
0e4bc95d | 245 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
1da177e4 LT |
246 | |
247 | /* Make sure TLB cache is flushed if the HW needs it. We do | |
248 | * not do an mb() here on purpose, it is not needed on any of | |
249 | * the current platforms. | |
250 | */ | |
251 | if (ppc_md.tce_flush) | |
252 | ppc_md.tce_flush(tbl); | |
1da177e4 LT |
253 | } |
254 | ||
c8692362 MN |
255 | int iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
256 | struct scatterlist *sglist, int nelems, | |
3affedc4 MN |
257 | unsigned long mask, enum dma_data_direction direction, |
258 | struct dma_attrs *attrs) | |
1da177e4 LT |
259 | { |
260 | dma_addr_t dma_next = 0, dma_addr; | |
261 | unsigned long flags; | |
262 | struct scatterlist *s, *outs, *segstart; | |
6490c490 | 263 | int outcount, incount, i, build_fail = 0; |
d262c32a | 264 | unsigned int align; |
1da177e4 | 265 | unsigned long handle; |
740c3ce6 | 266 | unsigned int max_seg_size; |
1da177e4 LT |
267 | |
268 | BUG_ON(direction == DMA_NONE); | |
269 | ||
270 | if ((nelems == 0) || !tbl) | |
271 | return 0; | |
272 | ||
273 | outs = s = segstart = &sglist[0]; | |
274 | outcount = 1; | |
ac9af7cb | 275 | incount = nelems; |
1da177e4 LT |
276 | handle = 0; |
277 | ||
278 | /* Init first segment length for backout at failure */ | |
279 | outs->dma_length = 0; | |
280 | ||
5d2efba6 | 281 | DBG("sg mapping %d elements:\n", nelems); |
1da177e4 LT |
282 | |
283 | spin_lock_irqsave(&(tbl->it_lock), flags); | |
284 | ||
740c3ce6 | 285 | max_seg_size = dma_get_max_seg_size(dev); |
78bdc310 | 286 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
287 | unsigned long vaddr, npages, entry, slen; |
288 | ||
289 | slen = s->length; | |
290 | /* Sanity check */ | |
291 | if (slen == 0) { | |
292 | dma_next = 0; | |
293 | continue; | |
294 | } | |
295 | /* Allocate iommu entries for that segment */ | |
58b053e4 | 296 | vaddr = (unsigned long) sg_virt(s); |
2994a3b2 | 297 | npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); |
d262c32a BH |
298 | align = 0; |
299 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && | |
300 | (vaddr & ~PAGE_MASK) == 0) | |
301 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | |
fb3475e9 | 302 | entry = iommu_range_alloc(dev, tbl, npages, &handle, |
d262c32a | 303 | mask >> IOMMU_PAGE_SHIFT, align); |
1da177e4 LT |
304 | |
305 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | |
306 | ||
307 | /* Handle failure */ | |
308 | if (unlikely(entry == DMA_ERROR_CODE)) { | |
309 | if (printk_ratelimit()) | |
4dfa9c47 AB |
310 | dev_info(dev, "iommu_alloc failed, tbl %p " |
311 | "vaddr %lx npages %lu\n", tbl, vaddr, | |
312 | npages); | |
1da177e4 LT |
313 | goto failure; |
314 | } | |
315 | ||
316 | /* Convert entry to a dma_addr_t */ | |
317 | entry += tbl->it_offset; | |
5d2efba6 LV |
318 | dma_addr = entry << IOMMU_PAGE_SHIFT; |
319 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); | |
1da177e4 | 320 | |
5d2efba6 | 321 | DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", |
1da177e4 LT |
322 | npages, entry, dma_addr); |
323 | ||
324 | /* Insert into HW table */ | |
6490c490 RJ |
325 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
326 | vaddr & IOMMU_PAGE_MASK, | |
327 | direction, attrs); | |
328 | if(unlikely(build_fail)) | |
329 | goto failure; | |
1da177e4 LT |
330 | |
331 | /* If we are in an open segment, try merging */ | |
332 | if (segstart != s) { | |
333 | DBG(" - trying merge...\n"); | |
334 | /* We cannot merge if: | |
335 | * - allocated dma_addr isn't contiguous to previous allocation | |
336 | */ | |
740c3ce6 FT |
337 | if (novmerge || (dma_addr != dma_next) || |
338 | (outs->dma_length + s->length > max_seg_size)) { | |
1da177e4 LT |
339 | /* Can't merge: create a new segment */ |
340 | segstart = s; | |
78bdc310 JA |
341 | outcount++; |
342 | outs = sg_next(outs); | |
1da177e4 LT |
343 | DBG(" can't merge, new segment.\n"); |
344 | } else { | |
345 | outs->dma_length += s->length; | |
5d2efba6 | 346 | DBG(" merged, new len: %ux\n", outs->dma_length); |
1da177e4 LT |
347 | } |
348 | } | |
349 | ||
350 | if (segstart == s) { | |
351 | /* This is a new segment, fill entries */ | |
352 | DBG(" - filling new segment.\n"); | |
353 | outs->dma_address = dma_addr; | |
354 | outs->dma_length = slen; | |
355 | } | |
356 | ||
357 | /* Calculate next page pointer for contiguous check */ | |
358 | dma_next = dma_addr + slen; | |
359 | ||
360 | DBG(" - dma next is: %lx\n", dma_next); | |
361 | } | |
362 | ||
363 | /* Flush/invalidate TLB caches if necessary */ | |
364 | if (ppc_md.tce_flush) | |
365 | ppc_md.tce_flush(tbl); | |
366 | ||
367 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
368 | ||
1da177e4 LT |
369 | DBG("mapped %d elements:\n", outcount); |
370 | ||
ac9af7cb | 371 | /* For the sake of iommu_unmap_sg, we clear out the length in the |
1da177e4 LT |
372 | * next entry of the sglist if we didn't fill the list completely |
373 | */ | |
ac9af7cb | 374 | if (outcount < incount) { |
78bdc310 | 375 | outs = sg_next(outs); |
1da177e4 LT |
376 | outs->dma_address = DMA_ERROR_CODE; |
377 | outs->dma_length = 0; | |
378 | } | |
a958a264 JM |
379 | |
380 | /* Make sure updates are seen by hardware */ | |
381 | mb(); | |
382 | ||
1da177e4 LT |
383 | return outcount; |
384 | ||
385 | failure: | |
78bdc310 | 386 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
387 | if (s->dma_length != 0) { |
388 | unsigned long vaddr, npages; | |
389 | ||
5d2efba6 | 390 | vaddr = s->dma_address & IOMMU_PAGE_MASK; |
2994a3b2 JR |
391 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
392 | IOMMU_PAGE_SIZE); | |
1da177e4 | 393 | __iommu_free(tbl, vaddr, npages); |
a958a264 JM |
394 | s->dma_address = DMA_ERROR_CODE; |
395 | s->dma_length = 0; | |
1da177e4 | 396 | } |
78bdc310 JA |
397 | if (s == outs) |
398 | break; | |
1da177e4 LT |
399 | } |
400 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
401 | return 0; | |
402 | } | |
403 | ||
404 | ||
405 | void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |
3affedc4 MN |
406 | int nelems, enum dma_data_direction direction, |
407 | struct dma_attrs *attrs) | |
1da177e4 | 408 | { |
78bdc310 | 409 | struct scatterlist *sg; |
1da177e4 LT |
410 | unsigned long flags; |
411 | ||
412 | BUG_ON(direction == DMA_NONE); | |
413 | ||
414 | if (!tbl) | |
415 | return; | |
416 | ||
417 | spin_lock_irqsave(&(tbl->it_lock), flags); | |
418 | ||
78bdc310 | 419 | sg = sglist; |
1da177e4 LT |
420 | while (nelems--) { |
421 | unsigned int npages; | |
78bdc310 | 422 | dma_addr_t dma_handle = sg->dma_address; |
1da177e4 | 423 | |
78bdc310 | 424 | if (sg->dma_length == 0) |
1da177e4 | 425 | break; |
2994a3b2 JR |
426 | npages = iommu_num_pages(dma_handle, sg->dma_length, |
427 | IOMMU_PAGE_SIZE); | |
1da177e4 | 428 | __iommu_free(tbl, dma_handle, npages); |
78bdc310 | 429 | sg = sg_next(sg); |
1da177e4 LT |
430 | } |
431 | ||
432 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we | |
433 | * do not do an mb() here, the affected platforms do not need it | |
434 | * when freeing. | |
435 | */ | |
436 | if (ppc_md.tce_flush) | |
437 | ppc_md.tce_flush(tbl); | |
438 | ||
439 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
440 | } | |
441 | ||
54622f10 MK |
442 | static void iommu_table_clear(struct iommu_table *tbl) |
443 | { | |
3ccc00a7 MS |
444 | /* |
445 | * In case of firmware assisted dump system goes through clean | |
446 | * reboot process at the time of system crash. Hence it's safe to | |
447 | * clear the TCE entries if firmware assisted dump is active. | |
448 | */ | |
449 | if (!is_kdump_kernel() || is_fadump_active()) { | |
54622f10 MK |
450 | /* Clear the table in case firmware left allocations in it */ |
451 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | |
452 | return; | |
453 | } | |
454 | ||
455 | #ifdef CONFIG_CRASH_DUMP | |
456 | if (ppc_md.tce_get) { | |
457 | unsigned long index, tceval, tcecount = 0; | |
458 | ||
459 | /* Reserve the existing mappings left by the first kernel. */ | |
460 | for (index = 0; index < tbl->it_size; index++) { | |
461 | tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); | |
462 | /* | |
463 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 | |
464 | */ | |
465 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { | |
466 | __set_bit(index, tbl->it_map); | |
467 | tcecount++; | |
468 | } | |
469 | } | |
470 | ||
471 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { | |
472 | printk(KERN_WARNING "TCE table is full; freeing "); | |
473 | printk(KERN_WARNING "%d entries for the kdump boot\n", | |
474 | KDUMP_MIN_TCE_ENTRIES); | |
475 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; | |
476 | index < tbl->it_size; index++) | |
477 | __clear_bit(index, tbl->it_map); | |
478 | } | |
479 | } | |
480 | #endif | |
481 | } | |
482 | ||
1da177e4 LT |
483 | /* |
484 | * Build a iommu_table structure. This contains a bit map which | |
485 | * is used to manage allocation of the tce space. | |
486 | */ | |
ca1588e7 | 487 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
1da177e4 LT |
488 | { |
489 | unsigned long sz; | |
490 | static int welcomed = 0; | |
ca1588e7 | 491 | struct page *page; |
1da177e4 LT |
492 | |
493 | /* Set aside 1/4 of the table for large allocations. */ | |
494 | tbl->it_halfpoint = tbl->it_size * 3 / 4; | |
495 | ||
496 | /* number of bytes needed for the bitmap */ | |
497 | sz = (tbl->it_size + 7) >> 3; | |
498 | ||
ca1588e7 AB |
499 | page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); |
500 | if (!page) | |
1da177e4 | 501 | panic("iommu_init_table: Can't allocate %ld bytes\n", sz); |
ca1588e7 | 502 | tbl->it_map = page_address(page); |
1da177e4 LT |
503 | memset(tbl->it_map, 0, sz); |
504 | ||
d12b524f TLSC |
505 | /* |
506 | * Reserve page 0 so it will not be used for any mappings. | |
507 | * This avoids buggy drivers that consider page 0 to be invalid | |
508 | * to crash the machine or even lose data. | |
509 | */ | |
510 | if (tbl->it_offset == 0) | |
511 | set_bit(0, tbl->it_map); | |
512 | ||
1da177e4 LT |
513 | tbl->it_hint = 0; |
514 | tbl->it_largehint = tbl->it_halfpoint; | |
515 | spin_lock_init(&tbl->it_lock); | |
516 | ||
54622f10 | 517 | iommu_table_clear(tbl); |
d3588ba9 | 518 | |
1da177e4 LT |
519 | if (!welcomed) { |
520 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | |
521 | novmerge ? "disabled" : "enabled"); | |
522 | welcomed = 1; | |
523 | } | |
524 | ||
525 | return tbl; | |
526 | } | |
527 | ||
68d315f5 | 528 | void iommu_free_table(struct iommu_table *tbl, const char *node_name) |
1da177e4 | 529 | { |
1da177e4 LT |
530 | unsigned long bitmap_sz, i; |
531 | unsigned int order; | |
532 | ||
533 | if (!tbl || !tbl->it_map) { | |
e48b1b45 | 534 | printk(KERN_ERR "%s: expected TCE map for %s\n", __func__, |
68d315f5 | 535 | node_name); |
1da177e4 LT |
536 | return; |
537 | } | |
538 | ||
539 | /* verify that table contains no entries */ | |
540 | /* it_size is in entries, and we're examining 64 at a time */ | |
541 | for (i = 0; i < (tbl->it_size/64); i++) { | |
542 | if (tbl->it_map[i] != 0) { | |
543 | printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", | |
e48b1b45 | 544 | __func__, node_name); |
1da177e4 LT |
545 | break; |
546 | } | |
547 | } | |
548 | ||
549 | /* calculate bitmap size in bytes */ | |
550 | bitmap_sz = (tbl->it_size + 7) / 8; | |
551 | ||
552 | /* free bitmap */ | |
553 | order = get_order(bitmap_sz); | |
554 | free_pages((unsigned long) tbl->it_map, order); | |
555 | ||
556 | /* free table */ | |
557 | kfree(tbl); | |
558 | } | |
559 | ||
560 | /* Creates TCEs for a user provided buffer. The user buffer must be | |
f9226d57 MN |
561 | * contiguous real kernel storage (not vmalloc). The address passed here |
562 | * comprises a page address and offset into that page. The dma_addr_t | |
563 | * returned will point to the same byte within the page as was passed in. | |
1da177e4 | 564 | */ |
f9226d57 MN |
565 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
566 | struct page *page, unsigned long offset, size_t size, | |
567 | unsigned long mask, enum dma_data_direction direction, | |
568 | struct dma_attrs *attrs) | |
1da177e4 LT |
569 | { |
570 | dma_addr_t dma_handle = DMA_ERROR_CODE; | |
f9226d57 | 571 | void *vaddr; |
1da177e4 | 572 | unsigned long uaddr; |
d262c32a | 573 | unsigned int npages, align; |
1da177e4 LT |
574 | |
575 | BUG_ON(direction == DMA_NONE); | |
576 | ||
f9226d57 | 577 | vaddr = page_address(page) + offset; |
1da177e4 | 578 | uaddr = (unsigned long)vaddr; |
2994a3b2 | 579 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); |
1da177e4 LT |
580 | |
581 | if (tbl) { | |
d262c32a BH |
582 | align = 0; |
583 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && | |
584 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) | |
585 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | |
586 | ||
fb3475e9 | 587 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
4f3dd8a0 MN |
588 | mask >> IOMMU_PAGE_SHIFT, align, |
589 | attrs); | |
1da177e4 LT |
590 | if (dma_handle == DMA_ERROR_CODE) { |
591 | if (printk_ratelimit()) { | |
4dfa9c47 AB |
592 | dev_info(dev, "iommu_alloc failed, tbl %p " |
593 | "vaddr %p npages %d\n", tbl, vaddr, | |
594 | npages); | |
1da177e4 LT |
595 | } |
596 | } else | |
5d2efba6 | 597 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); |
1da177e4 LT |
598 | } |
599 | ||
600 | return dma_handle; | |
601 | } | |
602 | ||
f9226d57 MN |
603 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
604 | size_t size, enum dma_data_direction direction, | |
605 | struct dma_attrs *attrs) | |
1da177e4 | 606 | { |
5d2efba6 LV |
607 | unsigned int npages; |
608 | ||
1da177e4 LT |
609 | BUG_ON(direction == DMA_NONE); |
610 | ||
5d2efba6 | 611 | if (tbl) { |
2994a3b2 | 612 | npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); |
5d2efba6 LV |
613 | iommu_free(tbl, dma_handle, npages); |
614 | } | |
1da177e4 LT |
615 | } |
616 | ||
617 | /* Allocates a contiguous real buffer and creates mappings over it. | |
618 | * Returns the virtual address of the buffer and sets dma_handle | |
619 | * to the dma address (mapping) of the first page. | |
620 | */ | |
fb3475e9 FT |
621 | void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
622 | size_t size, dma_addr_t *dma_handle, | |
623 | unsigned long mask, gfp_t flag, int node) | |
1da177e4 LT |
624 | { |
625 | void *ret = NULL; | |
626 | dma_addr_t mapping; | |
5d2efba6 LV |
627 | unsigned int order; |
628 | unsigned int nio_pages, io_order; | |
8eb6c6e3 | 629 | struct page *page; |
1da177e4 LT |
630 | |
631 | size = PAGE_ALIGN(size); | |
1da177e4 LT |
632 | order = get_order(size); |
633 | ||
634 | /* | |
635 | * Client asked for way too much space. This is checked later | |
636 | * anyway. It is easier to debug here for the drivers than in | |
637 | * the tce tables. | |
638 | */ | |
639 | if (order >= IOMAP_MAX_ORDER) { | |
4dfa9c47 AB |
640 | dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", |
641 | size); | |
1da177e4 LT |
642 | return NULL; |
643 | } | |
644 | ||
645 | if (!tbl) | |
646 | return NULL; | |
647 | ||
648 | /* Alloc enough pages (and possibly more) */ | |
05061354 | 649 | page = alloc_pages_node(node, flag, order); |
8eb6c6e3 | 650 | if (!page) |
1da177e4 | 651 | return NULL; |
8eb6c6e3 | 652 | ret = page_address(page); |
1da177e4 LT |
653 | memset(ret, 0, size); |
654 | ||
655 | /* Set up tces to cover the allocated range */ | |
5d2efba6 LV |
656 | nio_pages = size >> IOMMU_PAGE_SHIFT; |
657 | io_order = get_iommu_order(size); | |
fb3475e9 | 658 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
4f3dd8a0 | 659 | mask >> IOMMU_PAGE_SHIFT, io_order, NULL); |
1da177e4 LT |
660 | if (mapping == DMA_ERROR_CODE) { |
661 | free_pages((unsigned long)ret, order); | |
8eb6c6e3 CH |
662 | return NULL; |
663 | } | |
664 | *dma_handle = mapping; | |
1da177e4 LT |
665 | return ret; |
666 | } | |
667 | ||
668 | void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |
669 | void *vaddr, dma_addr_t dma_handle) | |
670 | { | |
1da177e4 | 671 | if (tbl) { |
5d2efba6 LV |
672 | unsigned int nio_pages; |
673 | ||
674 | size = PAGE_ALIGN(size); | |
675 | nio_pages = size >> IOMMU_PAGE_SHIFT; | |
676 | iommu_free(tbl, dma_handle, nio_pages); | |
1da177e4 | 677 | size = PAGE_ALIGN(size); |
1da177e4 LT |
678 | free_pages((unsigned long)vaddr, get_order(size)); |
679 | } | |
680 | } |