[SPARC64]: Fix cpu trampoline et al. mismatch warnings.
[deliverable/linux.git] / arch / sparc64 / kernel / iommu.c
CommitLineData
ad7ad57c 1/* iommu.c: Generic sparc64 IOMMU support.
1da177e4 2 *
d284142c 3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/kernel.h>
ad7ad57c 8#include <linux/module.h>
4dbc30fb 9#include <linux/delay.h>
ad7ad57c
DM
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
d284142c 13#include <linux/iommu-helper.h>
ad7ad57c
DM
14
15#ifdef CONFIG_PCI
c57c2ffb 16#include <linux/pci.h>
ad7ad57c 17#endif
1da177e4 18
ad7ad57c 19#include <asm/iommu.h>
1da177e4
LT
20
21#include "iommu_common.h"
22
ad7ad57c 23#define STC_CTXMATCH_ADDR(STC, CTX) \
1da177e4 24 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
ad7ad57c
DM
25#define STC_FLUSHFLAG_INIT(STC) \
26 (*((STC)->strbuf_flushflag) = 0UL)
27#define STC_FLUSHFLAG_SET(STC) \
28 (*((STC)->strbuf_flushflag) != 0UL)
1da177e4 29
ad7ad57c 30#define iommu_read(__reg) \
1da177e4
LT
31({ u64 __ret; \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
33 : "=r" (__ret) \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
35 : "memory"); \
36 __ret; \
37})
ad7ad57c 38#define iommu_write(__reg, __val) \
1da177e4
LT
39 __asm__ __volatile__("stxa %0, [%1] %2" \
40 : /* no outputs */ \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E))
43
44/* Must be invoked under the IOMMU lock. */
d284142c 45static void iommu_flushall(struct iommu *iommu)
1da177e4 46{
861fe906 47 if (iommu->iommu_flushinv) {
ad7ad57c 48 iommu_write(iommu->iommu_flushinv, ~(u64)0);
861fe906
DM
49 } else {
50 unsigned long tag;
51 int entry;
1da177e4 52
ad7ad57c 53 tag = iommu->iommu_tags;
861fe906 54 for (entry = 0; entry < 16; entry++) {
ad7ad57c 55 iommu_write(tag, 0);
861fe906
DM
56 tag += 8;
57 }
1da177e4 58
861fe906 59 /* Ensure completion of previous PIO writes. */
ad7ad57c 60 (void) iommu_read(iommu->write_complete_reg);
861fe906 61 }
1da177e4
LT
62}
63
64#define IOPTE_CONSISTENT(CTX) \
65 (IOPTE_VALID | IOPTE_CACHE | \
66 (((CTX) << 47) & IOPTE_CONTEXT))
67
68#define IOPTE_STREAMING(CTX) \
69 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
70
71/* Existing mappings are never marked invalid, instead they
72 * are pointed to a dummy page.
73 */
74#define IOPTE_IS_DUMMY(iommu, iopte) \
75 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
76
16ce82d8 77static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
1da177e4
LT
78{
79 unsigned long val = iopte_val(*iopte);
80
81 val &= ~IOPTE_PAGE;
82 val |= iommu->dummy_page_pa;
83
84 iopte_val(*iopte) = val;
85}
86
d284142c
DM
87/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
88 * facility it must all be done in one pass while under the iommu lock.
89 *
90 * On sun4u platforms, we only flush the IOMMU once every time we've passed
91 * over the entire page table doing allocations. Therefore we only ever advance
92 * the hint and cannot backtrack it.
93 */
94unsigned long iommu_range_alloc(struct device *dev,
95 struct iommu *iommu,
96 unsigned long npages,
97 unsigned long *handle)
688cb30b 98{
d284142c 99 unsigned long n, end, start, limit, boundary_size;
9b3627f3 100 struct iommu_arena *arena = &iommu->arena;
d284142c
DM
101 int pass = 0;
102
103 /* This allocator was derived from x86_64's bit string search */
104
105 /* Sanity check */
106 if (unlikely(npages == 0)) {
107 if (printk_ratelimit())
108 WARN_ON(1);
109 return DMA_ERROR_CODE;
110 }
111
112 if (handle && *handle)
113 start = *handle;
114 else
115 start = arena->hint;
688cb30b
DM
116
117 limit = arena->limit;
688cb30b 118
d284142c
DM
119 /* The case below can happen if we have a small segment appended
120 * to a large, or when the previous alloc was at the very end of
121 * the available space. If so, go back to the beginning and flush.
122 */
123 if (start >= limit) {
124 start = 0;
125 if (iommu->flush_all)
126 iommu->flush_all(iommu);
127 }
128
129 again:
130
131 if (dev)
132 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
133 1 << IO_PAGE_SHIFT);
134 else
135 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
136
137 n = iommu_area_alloc(arena->map, limit, start, npages, 0,
138 boundary_size >> IO_PAGE_SHIFT, 0);
139 if (n == -1) {
688cb30b 140 if (likely(pass < 1)) {
d284142c 141 /* First failure, rescan from the beginning. */
688cb30b 142 start = 0;
d284142c
DM
143 if (iommu->flush_all)
144 iommu->flush_all(iommu);
688cb30b
DM
145 pass++;
146 goto again;
147 } else {
d284142c
DM
148 /* Second failure, give up */
149 return DMA_ERROR_CODE;
688cb30b
DM
150 }
151 }
152
d284142c 153 end = n + npages;
688cb30b
DM
154
155 arena->hint = end;
156
d284142c
DM
157 /* Update handle for SG allocations */
158 if (handle)
159 *handle = end;
160
688cb30b
DM
161 return n;
162}
163
d284142c 164void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
688cb30b 165{
d284142c
DM
166 struct iommu_arena *arena = &iommu->arena;
167 unsigned long entry;
688cb30b 168
d284142c
DM
169 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
170
171 iommu_area_free(arena->map, entry, npages);
688cb30b
DM
172}
173
ad7ad57c
DM
174int iommu_table_init(struct iommu *iommu, int tsbsize,
175 u32 dma_offset, u32 dma_addr_mask)
1da177e4 176{
688cb30b
DM
177 unsigned long i, tsbbase, order, sz, num_tsb_entries;
178
179 num_tsb_entries = tsbsize / sizeof(iopte_t);
51e85136
DM
180
181 /* Setup initial software IOMMU state. */
182 spin_lock_init(&iommu->lock);
183 iommu->ctx_lowest_free = 1;
184 iommu->page_table_map_base = dma_offset;
185 iommu->dma_addr_mask = dma_addr_mask;
186
688cb30b
DM
187 /* Allocate and initialize the free area map. */
188 sz = num_tsb_entries / 8;
189 sz = (sz + 7UL) & ~7UL;
9132983a 190 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
688cb30b 191 if (!iommu->arena.map) {
ad7ad57c
DM
192 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
193 return -ENOMEM;
51e85136 194 }
688cb30b 195 iommu->arena.limit = num_tsb_entries;
1da177e4 196
d284142c
DM
197 if (tlb_type != hypervisor)
198 iommu->flush_all = iommu_flushall;
199
51e85136
DM
200 /* Allocate and initialize the dummy page which we
201 * set inactive IO PTEs to point to.
202 */
b83ebf56 203 iommu->dummy_page = get_zeroed_page(GFP_KERNEL);
51e85136 204 if (!iommu->dummy_page) {
ad7ad57c
DM
205 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
206 goto out_free_map;
51e85136 207 }
51e85136
DM
208 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
209
210 /* Now allocate and setup the IOMMU page table itself. */
211 order = get_order(tsbsize);
212 tsbbase = __get_free_pages(GFP_KERNEL, order);
213 if (!tsbbase) {
ad7ad57c
DM
214 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
215 goto out_free_dummy_page;
51e85136
DM
216 }
217 iommu->page_table = (iopte_t *)tsbbase;
1da177e4 218
688cb30b 219 for (i = 0; i < num_tsb_entries; i++)
1da177e4 220 iopte_make_dummy(iommu, &iommu->page_table[i]);
ad7ad57c
DM
221
222 return 0;
223
224out_free_dummy_page:
225 free_page(iommu->dummy_page);
226 iommu->dummy_page = 0UL;
227
228out_free_map:
229 kfree(iommu->arena.map);
230 iommu->arena.map = NULL;
231
232 return -ENOMEM;
1da177e4
LT
233}
234
d284142c
DM
235static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
236 unsigned long npages)
1da177e4 237{
d284142c 238 unsigned long entry;
1da177e4 239
d284142c
DM
240 entry = iommu_range_alloc(dev, iommu, npages, NULL);
241 if (unlikely(entry == DMA_ERROR_CODE))
688cb30b 242 return NULL;
1da177e4 243
688cb30b 244 return iommu->page_table + entry;
1da177e4
LT
245}
246
16ce82d8 247static int iommu_alloc_ctx(struct iommu *iommu)
7c963ad1
DM
248{
249 int lowest = iommu->ctx_lowest_free;
250 int sz = IOMMU_NUM_CTXS - lowest;
251 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
252
253 if (unlikely(n == sz)) {
254 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
255 if (unlikely(n == lowest)) {
256 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
257 n = 0;
258 }
259 }
260 if (n)
261 __set_bit(n, iommu->ctx_bitmap);
262
263 return n;
264}
265
16ce82d8 266static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
7c963ad1
DM
267{
268 if (likely(ctx)) {
269 __clear_bit(ctx, iommu->ctx_bitmap);
270 if (ctx < iommu->ctx_lowest_free)
271 iommu->ctx_lowest_free = ctx;
272 }
273}
274
ad7ad57c
DM
275static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
276 dma_addr_t *dma_addrp, gfp_t gfp)
1da177e4 277{
16ce82d8 278 struct iommu *iommu;
1da177e4 279 iopte_t *iopte;
688cb30b 280 unsigned long flags, order, first_page;
1da177e4
LT
281 void *ret;
282 int npages;
283
284 size = IO_PAGE_ALIGN(size);
285 order = get_order(size);
286 if (order >= 10)
287 return NULL;
288
42f14237 289 first_page = __get_free_pages(gfp, order);
1da177e4
LT
290 if (first_page == 0UL)
291 return NULL;
292 memset((char *)first_page, 0, PAGE_SIZE << order);
293
ad7ad57c 294 iommu = dev->archdata.iommu;
1da177e4
LT
295
296 spin_lock_irqsave(&iommu->lock, flags);
d284142c 297 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
688cb30b
DM
298 spin_unlock_irqrestore(&iommu->lock, flags);
299
300 if (unlikely(iopte == NULL)) {
1da177e4
LT
301 free_pages(first_page, order);
302 return NULL;
303 }
304
305 *dma_addrp = (iommu->page_table_map_base +
306 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
307 ret = (void *) first_page;
308 npages = size >> IO_PAGE_SHIFT;
1da177e4
LT
309 first_page = __pa(first_page);
310 while (npages--) {
688cb30b 311 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
1da177e4
LT
312 IOPTE_WRITE |
313 (first_page & IOPTE_PAGE));
314 iopte++;
315 first_page += IO_PAGE_SIZE;
316 }
317
1da177e4
LT
318 return ret;
319}
320
ad7ad57c
DM
321static void dma_4u_free_coherent(struct device *dev, size_t size,
322 void *cpu, dma_addr_t dvma)
1da177e4 323{
16ce82d8 324 struct iommu *iommu;
1da177e4 325 iopte_t *iopte;
688cb30b 326 unsigned long flags, order, npages;
1da177e4
LT
327
328 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c 329 iommu = dev->archdata.iommu;
1da177e4
LT
330 iopte = iommu->page_table +
331 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
332
333 spin_lock_irqsave(&iommu->lock, flags);
334
d284142c 335 iommu_range_free(iommu, dvma, npages);
7c963ad1 336
1da177e4
LT
337 spin_unlock_irqrestore(&iommu->lock, flags);
338
339 order = get_order(size);
340 if (order < 10)
341 free_pages((unsigned long)cpu, order);
342}
343
ad7ad57c
DM
344static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
345 enum dma_data_direction direction)
1da177e4 346{
16ce82d8
DM
347 struct iommu *iommu;
348 struct strbuf *strbuf;
1da177e4
LT
349 iopte_t *base;
350 unsigned long flags, npages, oaddr;
351 unsigned long i, base_paddr, ctx;
352 u32 bus_addr, ret;
353 unsigned long iopte_protection;
354
ad7ad57c
DM
355 iommu = dev->archdata.iommu;
356 strbuf = dev->archdata.stc;
1da177e4 357
ad7ad57c 358 if (unlikely(direction == DMA_NONE))
688cb30b 359 goto bad_no_ctx;
1da177e4
LT
360
361 oaddr = (unsigned long)ptr;
362 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
363 npages >>= IO_PAGE_SHIFT;
364
365 spin_lock_irqsave(&iommu->lock, flags);
d284142c 366 base = alloc_npages(dev, iommu, npages);
688cb30b
DM
367 ctx = 0;
368 if (iommu->iommu_ctxflush)
369 ctx = iommu_alloc_ctx(iommu);
370 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4 371
688cb30b 372 if (unlikely(!base))
1da177e4 373 goto bad;
688cb30b 374
1da177e4
LT
375 bus_addr = (iommu->page_table_map_base +
376 ((base - iommu->page_table) << IO_PAGE_SHIFT));
377 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
378 base_paddr = __pa(oaddr & IO_PAGE_MASK);
1da177e4
LT
379 if (strbuf->strbuf_enabled)
380 iopte_protection = IOPTE_STREAMING(ctx);
381 else
382 iopte_protection = IOPTE_CONSISTENT(ctx);
ad7ad57c 383 if (direction != DMA_TO_DEVICE)
1da177e4
LT
384 iopte_protection |= IOPTE_WRITE;
385
386 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
387 iopte_val(*base) = iopte_protection | base_paddr;
388
1da177e4
LT
389 return ret;
390
391bad:
688cb30b
DM
392 iommu_free_ctx(iommu, ctx);
393bad_no_ctx:
394 if (printk_ratelimit())
395 WARN_ON(1);
ad7ad57c 396 return DMA_ERROR_CODE;
1da177e4
LT
397}
398
ad7ad57c
DM
399static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
400 u32 vaddr, unsigned long ctx, unsigned long npages,
401 enum dma_data_direction direction)
4dbc30fb
DM
402{
403 int limit;
404
4dbc30fb
DM
405 if (strbuf->strbuf_ctxflush &&
406 iommu->iommu_ctxflush) {
407 unsigned long matchreg, flushreg;
7c963ad1 408 u64 val;
4dbc30fb
DM
409
410 flushreg = strbuf->strbuf_ctxflush;
ad7ad57c 411 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
4dbc30fb 412
ad7ad57c
DM
413 iommu_write(flushreg, ctx);
414 val = iommu_read(matchreg);
88314ee7
DM
415 val &= 0xffff;
416 if (!val)
7c963ad1
DM
417 goto do_flush_sync;
418
7c963ad1
DM
419 while (val) {
420 if (val & 0x1)
ad7ad57c 421 iommu_write(flushreg, ctx);
7c963ad1 422 val >>= 1;
a228dfd5 423 }
ad7ad57c 424 val = iommu_read(matchreg);
7c963ad1 425 if (unlikely(val)) {
ad7ad57c 426 printk(KERN_WARNING "strbuf_flush: ctx flush "
7c963ad1
DM
427 "timeout matchreg[%lx] ctx[%lx]\n",
428 val, ctx);
429 goto do_page_flush;
430 }
4dbc30fb
DM
431 } else {
432 unsigned long i;
433
7c963ad1 434 do_page_flush:
4dbc30fb 435 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
ad7ad57c 436 iommu_write(strbuf->strbuf_pflush, vaddr);
4dbc30fb
DM
437 }
438
7c963ad1
DM
439do_flush_sync:
440 /* If the device could not have possibly put dirty data into
441 * the streaming cache, no flush-flag synchronization needs
442 * to be performed.
443 */
ad7ad57c 444 if (direction == DMA_TO_DEVICE)
7c963ad1
DM
445 return;
446
ad7ad57c
DM
447 STC_FLUSHFLAG_INIT(strbuf);
448 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
449 (void) iommu_read(iommu->write_complete_reg);
4dbc30fb 450
a228dfd5 451 limit = 100000;
ad7ad57c 452 while (!STC_FLUSHFLAG_SET(strbuf)) {
4dbc30fb
DM
453 limit--;
454 if (!limit)
455 break;
a228dfd5 456 udelay(1);
4f07118f 457 rmb();
4dbc30fb
DM
458 }
459 if (!limit)
ad7ad57c 460 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
4dbc30fb
DM
461 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
462 vaddr, ctx, npages);
463}
464
ad7ad57c
DM
465static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
466 size_t sz, enum dma_data_direction direction)
1da177e4 467{
16ce82d8
DM
468 struct iommu *iommu;
469 struct strbuf *strbuf;
1da177e4 470 iopte_t *base;
688cb30b 471 unsigned long flags, npages, ctx, i;
1da177e4 472
ad7ad57c 473 if (unlikely(direction == DMA_NONE)) {
688cb30b
DM
474 if (printk_ratelimit())
475 WARN_ON(1);
476 return;
477 }
1da177e4 478
ad7ad57c
DM
479 iommu = dev->archdata.iommu;
480 strbuf = dev->archdata.stc;
1da177e4
LT
481
482 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
483 npages >>= IO_PAGE_SHIFT;
484 base = iommu->page_table +
485 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
1da177e4
LT
486 bus_addr &= IO_PAGE_MASK;
487
488 spin_lock_irqsave(&iommu->lock, flags);
489
490 /* Record the context, if any. */
491 ctx = 0;
492 if (iommu->iommu_ctxflush)
493 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
494
495 /* Step 1: Kick data out of streaming buffers if necessary. */
4dbc30fb 496 if (strbuf->strbuf_enabled)
ad7ad57c
DM
497 strbuf_flush(strbuf, iommu, bus_addr, ctx,
498 npages, direction);
1da177e4 499
688cb30b
DM
500 /* Step 2: Clear out TSB entries. */
501 for (i = 0; i < npages; i++)
502 iopte_make_dummy(iommu, base + i);
1da177e4 503
d284142c 504 iommu_range_free(iommu, bus_addr, npages);
1da177e4 505
7c963ad1
DM
506 iommu_free_ctx(iommu, ctx);
507
1da177e4
LT
508 spin_unlock_irqrestore(&iommu->lock, flags);
509}
510
ad7ad57c
DM
511static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
512 int nelems, enum dma_data_direction direction)
1da177e4 513{
13fa14e1
DM
514 struct scatterlist *s, *outs, *segstart;
515 unsigned long flags, handle, prot, ctx;
516 dma_addr_t dma_next = 0, dma_addr;
517 unsigned int max_seg_size;
518 int outcount, incount, i;
16ce82d8 519 struct strbuf *strbuf;
38192d52 520 struct iommu *iommu;
13fa14e1
DM
521
522 BUG_ON(direction == DMA_NONE);
1da177e4 523
ad7ad57c
DM
524 iommu = dev->archdata.iommu;
525 strbuf = dev->archdata.stc;
13fa14e1
DM
526 if (nelems == 0 || !iommu)
527 return 0;
1da177e4
LT
528
529 spin_lock_irqsave(&iommu->lock, flags);
530
688cb30b
DM
531 ctx = 0;
532 if (iommu->iommu_ctxflush)
533 ctx = iommu_alloc_ctx(iommu);
534
1da177e4 535 if (strbuf->strbuf_enabled)
13fa14e1 536 prot = IOPTE_STREAMING(ctx);
1da177e4 537 else
13fa14e1 538 prot = IOPTE_CONSISTENT(ctx);
ad7ad57c 539 if (direction != DMA_TO_DEVICE)
13fa14e1
DM
540 prot |= IOPTE_WRITE;
541
542 outs = s = segstart = &sglist[0];
543 outcount = 1;
544 incount = nelems;
545 handle = 0;
546
547 /* Init first segment length for backout at failure */
548 outs->dma_length = 0;
549
550 max_seg_size = dma_get_max_seg_size(dev);
551 for_each_sg(sglist, s, nelems, i) {
552 unsigned long paddr, npages, entry, slen;
553 iopte_t *base;
554
555 slen = s->length;
556 /* Sanity check */
557 if (slen == 0) {
558 dma_next = 0;
559 continue;
560 }
561 /* Allocate iommu entries for that segment */
562 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
563 npages = iommu_num_pages(paddr, slen);
564 entry = iommu_range_alloc(dev, iommu, npages, &handle);
565
566 /* Handle failure */
567 if (unlikely(entry == DMA_ERROR_CODE)) {
568 if (printk_ratelimit())
569 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
570 " npages %lx\n", iommu, paddr, npages);
571 goto iommu_map_failed;
572 }
688cb30b 573
13fa14e1 574 base = iommu->page_table + entry;
1da177e4 575
13fa14e1
DM
576 /* Convert entry to a dma_addr_t */
577 dma_addr = iommu->page_table_map_base +
578 (entry << IO_PAGE_SHIFT);
579 dma_addr |= (s->offset & ~IO_PAGE_MASK);
38192d52 580
13fa14e1 581 /* Insert into HW table */
38192d52 582 paddr &= IO_PAGE_MASK;
13fa14e1
DM
583 while (npages--) {
584 iopte_val(*base) = prot | paddr;
38192d52
DM
585 base++;
586 paddr += IO_PAGE_SIZE;
38192d52 587 }
13fa14e1
DM
588
589 /* If we are in an open segment, try merging */
590 if (segstart != s) {
591 /* We cannot merge if:
592 * - allocated dma_addr isn't contiguous to previous allocation
593 */
594 if ((dma_addr != dma_next) ||
595 (outs->dma_length + s->length > max_seg_size)) {
596 /* Can't merge: create a new segment */
597 segstart = s;
598 outcount++;
599 outs = sg_next(outs);
600 } else {
601 outs->dma_length += s->length;
602 }
603 }
604
605 if (segstart == s) {
606 /* This is a new segment, fill entries */
607 outs->dma_address = dma_addr;
608 outs->dma_length = slen;
609 }
610
611 /* Calculate next page pointer for contiguous check */
612 dma_next = dma_addr + slen;
38192d52
DM
613 }
614
13fa14e1
DM
615 spin_unlock_irqrestore(&iommu->lock, flags);
616
617 if (outcount < incount) {
618 outs = sg_next(outs);
619 outs->dma_address = DMA_ERROR_CODE;
620 outs->dma_length = 0;
621 }
622
623 return outcount;
624
625iommu_map_failed:
626 for_each_sg(sglist, s, nelems, i) {
627 if (s->dma_length != 0) {
628 unsigned long vaddr, npages, entry, i;
629 iopte_t *base;
630
631 vaddr = s->dma_address & IO_PAGE_MASK;
632 npages = iommu_num_pages(s->dma_address, s->dma_length);
633 iommu_range_free(iommu, vaddr, npages);
634
635 entry = (vaddr - iommu->page_table_map_base)
636 >> IO_PAGE_SHIFT;
637 base = iommu->page_table + entry;
638
639 for (i = 0; i < npages; i++)
640 iopte_make_dummy(iommu, base + i);
641
642 s->dma_address = DMA_ERROR_CODE;
643 s->dma_length = 0;
644 }
645 if (s == outs)
646 break;
647 }
648 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4 649
688cb30b 650 return 0;
1da177e4
LT
651}
652
13fa14e1
DM
653/* If contexts are being used, they are the same in all of the mappings
654 * we make for a particular SG.
655 */
656static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
657{
658 unsigned long ctx = 0;
659
660 if (iommu->iommu_ctxflush) {
661 iopte_t *base;
662 u32 bus_addr;
663
664 bus_addr = sg->dma_address & IO_PAGE_MASK;
665 base = iommu->page_table +
666 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
667
668 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
669 }
670 return ctx;
671}
672
ad7ad57c
DM
673static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
674 int nelems, enum dma_data_direction direction)
1da177e4 675{
13fa14e1
DM
676 unsigned long flags, ctx;
677 struct scatterlist *sg;
16ce82d8 678 struct strbuf *strbuf;
38192d52 679 struct iommu *iommu;
1da177e4 680
13fa14e1 681 BUG_ON(direction == DMA_NONE);
1da177e4 682
ad7ad57c
DM
683 iommu = dev->archdata.iommu;
684 strbuf = dev->archdata.stc;
685
13fa14e1 686 ctx = fetch_sg_ctx(iommu, sglist);
1da177e4 687
13fa14e1 688 spin_lock_irqsave(&iommu->lock, flags);
1da177e4 689
13fa14e1
DM
690 sg = sglist;
691 while (nelems--) {
692 dma_addr_t dma_handle = sg->dma_address;
693 unsigned int len = sg->dma_length;
694 unsigned long npages, entry;
695 iopte_t *base;
696 int i;
1da177e4 697
13fa14e1
DM
698 if (!len)
699 break;
700 npages = iommu_num_pages(dma_handle, len);
701 iommu_range_free(iommu, dma_handle, npages);
1da177e4 702
13fa14e1
DM
703 entry = ((dma_handle - iommu->page_table_map_base)
704 >> IO_PAGE_SHIFT);
705 base = iommu->page_table + entry;
1da177e4 706
13fa14e1
DM
707 dma_handle &= IO_PAGE_MASK;
708 if (strbuf->strbuf_enabled)
709 strbuf_flush(strbuf, iommu, dma_handle, ctx,
710 npages, direction);
1da177e4 711
13fa14e1
DM
712 for (i = 0; i < npages; i++)
713 iopte_make_dummy(iommu, base + i);
1da177e4 714
13fa14e1
DM
715 sg = sg_next(sg);
716 }
1da177e4 717
7c963ad1
DM
718 iommu_free_ctx(iommu, ctx);
719
1da177e4
LT
720 spin_unlock_irqrestore(&iommu->lock, flags);
721}
722
ad7ad57c
DM
723static void dma_4u_sync_single_for_cpu(struct device *dev,
724 dma_addr_t bus_addr, size_t sz,
725 enum dma_data_direction direction)
1da177e4 726{
16ce82d8
DM
727 struct iommu *iommu;
728 struct strbuf *strbuf;
1da177e4
LT
729 unsigned long flags, ctx, npages;
730
ad7ad57c
DM
731 iommu = dev->archdata.iommu;
732 strbuf = dev->archdata.stc;
1da177e4
LT
733
734 if (!strbuf->strbuf_enabled)
735 return;
736
737 spin_lock_irqsave(&iommu->lock, flags);
738
739 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
740 npages >>= IO_PAGE_SHIFT;
741 bus_addr &= IO_PAGE_MASK;
742
743 /* Step 1: Record the context, if any. */
744 ctx = 0;
745 if (iommu->iommu_ctxflush &&
746 strbuf->strbuf_ctxflush) {
747 iopte_t *iopte;
748
749 iopte = iommu->page_table +
750 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
751 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
752 }
753
754 /* Step 2: Kick data out of streaming buffers. */
ad7ad57c 755 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
756
757 spin_unlock_irqrestore(&iommu->lock, flags);
758}
759
ad7ad57c
DM
760static void dma_4u_sync_sg_for_cpu(struct device *dev,
761 struct scatterlist *sglist, int nelems,
762 enum dma_data_direction direction)
1da177e4 763{
16ce82d8
DM
764 struct iommu *iommu;
765 struct strbuf *strbuf;
4dbc30fb 766 unsigned long flags, ctx, npages, i;
2c941a20 767 struct scatterlist *sg, *sgprv;
4dbc30fb 768 u32 bus_addr;
1da177e4 769
ad7ad57c
DM
770 iommu = dev->archdata.iommu;
771 strbuf = dev->archdata.stc;
1da177e4
LT
772
773 if (!strbuf->strbuf_enabled)
774 return;
775
776 spin_lock_irqsave(&iommu->lock, flags);
777
778 /* Step 1: Record the context, if any. */
779 ctx = 0;
780 if (iommu->iommu_ctxflush &&
781 strbuf->strbuf_ctxflush) {
782 iopte_t *iopte;
783
784 iopte = iommu->page_table +
785 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
786 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
787 }
788
789 /* Step 2: Kick data out of streaming buffers. */
4dbc30fb 790 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
2c941a20
JA
791 sgprv = NULL;
792 for_each_sg(sglist, sg, nelems, i) {
793 if (sg->dma_length == 0)
4dbc30fb 794 break;
2c941a20
JA
795 sgprv = sg;
796 }
797
798 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
4dbc30fb 799 - bus_addr) >> IO_PAGE_SHIFT;
ad7ad57c 800 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
801
802 spin_unlock_irqrestore(&iommu->lock, flags);
803}
804
ad7ad57c
DM
805const struct dma_ops sun4u_dma_ops = {
806 .alloc_coherent = dma_4u_alloc_coherent,
807 .free_coherent = dma_4u_free_coherent,
808 .map_single = dma_4u_map_single,
809 .unmap_single = dma_4u_unmap_single,
810 .map_sg = dma_4u_map_sg,
811 .unmap_sg = dma_4u_unmap_sg,
812 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
813 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
8f6a93a1
DM
814};
815
ad7ad57c
DM
816const struct dma_ops *dma_ops = &sun4u_dma_ops;
817EXPORT_SYMBOL(dma_ops);
1da177e4 818
ad7ad57c 819int dma_supported(struct device *dev, u64 device_mask)
1da177e4 820{
ad7ad57c
DM
821 struct iommu *iommu = dev->archdata.iommu;
822 u64 dma_addr_mask = iommu->dma_addr_mask;
1da177e4 823
ad7ad57c
DM
824 if (device_mask >= (1UL << 32UL))
825 return 0;
1da177e4 826
ad7ad57c
DM
827 if ((device_mask & dma_addr_mask) == dma_addr_mask)
828 return 1;
1da177e4 829
ad7ad57c
DM
830#ifdef CONFIG_PCI
831 if (dev->bus == &pci_bus_type)
832 return pci_dma_supported(to_pci_dev(dev), device_mask);
833#endif
1da177e4 834
ad7ad57c
DM
835 return 0;
836}
837EXPORT_SYMBOL(dma_supported);
1da177e4 838
ad7ad57c
DM
839int dma_set_mask(struct device *dev, u64 dma_mask)
840{
841#ifdef CONFIG_PCI
842 if (dev->bus == &pci_bus_type)
843 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
844#endif
845 return -EINVAL;
1da177e4 846}
ad7ad57c 847EXPORT_SYMBOL(dma_set_mask);
This page took 0.539253 seconds and 5 git commands to generate.