Merge branch 'i2c-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvar...
[deliverable/linux.git] / drivers / iommu / omap-iovmm.c
1 /*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
18 #include <linux/iommu.h>
19
20 #include <asm/cacheflush.h>
21 #include <asm/mach/map.h>
22
23 #include <plat/iommu.h>
24 #include <plat/iovmm.h>
25
26 #include <plat/iopgtable.h>
27
28 static struct kmem_cache *iovm_area_cachep;
29
30 /* return the offset of the first scatterlist entry in a sg table */
31 static unsigned int sgtable_offset(const struct sg_table *sgt)
32 {
33 if (!sgt || !sgt->nents)
34 return 0;
35
36 return sgt->sgl->offset;
37 }
38
39 /* return total bytes of sg buffers */
40 static size_t sgtable_len(const struct sg_table *sgt)
41 {
42 unsigned int i, total = 0;
43 struct scatterlist *sg;
44
45 if (!sgt)
46 return 0;
47
48 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
49 size_t bytes;
50
51 bytes = sg->length + sg->offset;
52
53 if (!iopgsz_ok(bytes)) {
54 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
55 __func__, i, bytes, sg->offset);
56 return 0;
57 }
58
59 if (i && sg->offset) {
60 pr_err("%s: sg[%d] offset not allowed in internal "
61 "entries\n", __func__, i);
62 return 0;
63 }
64
65 total += bytes;
66 }
67
68 return total;
69 }
70 #define sgtable_ok(x) (!!sgtable_len(x))
71
72 static unsigned max_alignment(u32 addr)
73 {
74 int i;
75 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
76 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
77 ;
78 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
79 }
80
81 /*
82 * calculate the optimal number sg elements from total bytes based on
83 * iommu superpages
84 */
85 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
86 {
87 unsigned nr_entries = 0, ent_sz;
88
89 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
90 pr_err("%s: wrong size %08x\n", __func__, bytes);
91 return 0;
92 }
93
94 while (bytes) {
95 ent_sz = max_alignment(da | pa);
96 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
97 nr_entries++;
98 da += ent_sz;
99 pa += ent_sz;
100 bytes -= ent_sz;
101 }
102
103 return nr_entries;
104 }
105
106 /* allocate and initialize sg_table header(a kind of 'superblock') */
107 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
108 u32 da, u32 pa)
109 {
110 unsigned int nr_entries;
111 int err;
112 struct sg_table *sgt;
113
114 if (!bytes)
115 return ERR_PTR(-EINVAL);
116
117 if (!IS_ALIGNED(bytes, PAGE_SIZE))
118 return ERR_PTR(-EINVAL);
119
120 if (flags & IOVMF_LINEAR) {
121 nr_entries = sgtable_nents(bytes, da, pa);
122 if (!nr_entries)
123 return ERR_PTR(-EINVAL);
124 } else
125 nr_entries = bytes / PAGE_SIZE;
126
127 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
128 if (!sgt)
129 return ERR_PTR(-ENOMEM);
130
131 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
132 if (err) {
133 kfree(sgt);
134 return ERR_PTR(err);
135 }
136
137 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
138
139 return sgt;
140 }
141
142 /* free sg_table header(a kind of superblock) */
143 static void sgtable_free(struct sg_table *sgt)
144 {
145 if (!sgt)
146 return;
147
148 sg_free_table(sgt);
149 kfree(sgt);
150
151 pr_debug("%s: sgt:%p\n", __func__, sgt);
152 }
153
154 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
155 static void *vmap_sg(const struct sg_table *sgt)
156 {
157 u32 va;
158 size_t total;
159 unsigned int i;
160 struct scatterlist *sg;
161 struct vm_struct *new;
162 const struct mem_type *mtype;
163
164 mtype = get_mem_type(MT_DEVICE);
165 if (!mtype)
166 return ERR_PTR(-EINVAL);
167
168 total = sgtable_len(sgt);
169 if (!total)
170 return ERR_PTR(-EINVAL);
171
172 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
173 if (!new)
174 return ERR_PTR(-ENOMEM);
175 va = (u32)new->addr;
176
177 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
178 size_t bytes;
179 u32 pa;
180 int err;
181
182 pa = sg_phys(sg) - sg->offset;
183 bytes = sg->length + sg->offset;
184
185 BUG_ON(bytes != PAGE_SIZE);
186
187 err = ioremap_page(va, pa, mtype);
188 if (err)
189 goto err_out;
190
191 va += bytes;
192 }
193
194 flush_cache_vmap((unsigned long)new->addr,
195 (unsigned long)(new->addr + total));
196 return new->addr;
197
198 err_out:
199 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
200 vunmap(new->addr);
201 return ERR_PTR(-EAGAIN);
202 }
203
204 static inline void vunmap_sg(const void *va)
205 {
206 vunmap(va);
207 }
208
209 static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
210 const u32 da)
211 {
212 struct iovm_struct *tmp;
213
214 list_for_each_entry(tmp, &obj->mmap, list) {
215 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
216 size_t len;
217
218 len = tmp->da_end - tmp->da_start;
219
220 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
221 __func__, tmp->da_start, da, tmp->da_end, len,
222 tmp->flags);
223
224 return tmp;
225 }
226 }
227
228 return NULL;
229 }
230
231 /**
232 * omap_find_iovm_area - find iovma which includes @da
233 * @da: iommu device virtual address
234 *
235 * Find the existing iovma starting at @da
236 */
237 struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
238 {
239 struct iovm_struct *area;
240
241 mutex_lock(&obj->mmap_lock);
242 area = __find_iovm_area(obj, da);
243 mutex_unlock(&obj->mmap_lock);
244
245 return area;
246 }
247 EXPORT_SYMBOL_GPL(omap_find_iovm_area);
248
249 /*
250 * This finds the hole(area) which fits the requested address and len
251 * in iovmas mmap, and returns the new allocated iovma.
252 */
253 static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
254 size_t bytes, u32 flags)
255 {
256 struct iovm_struct *new, *tmp;
257 u32 start, prev_end, alignment;
258
259 if (!obj || !bytes)
260 return ERR_PTR(-EINVAL);
261
262 start = da;
263 alignment = PAGE_SIZE;
264
265 if (~flags & IOVMF_DA_FIXED) {
266 /* Don't map address 0 */
267 start = obj->da_start ? obj->da_start : alignment;
268
269 if (flags & IOVMF_LINEAR)
270 alignment = iopgsz_max(bytes);
271 start = roundup(start, alignment);
272 } else if (start < obj->da_start || start > obj->da_end ||
273 obj->da_end - start < bytes) {
274 return ERR_PTR(-EINVAL);
275 }
276
277 tmp = NULL;
278 if (list_empty(&obj->mmap))
279 goto found;
280
281 prev_end = 0;
282 list_for_each_entry(tmp, &obj->mmap, list) {
283
284 if (prev_end > start)
285 break;
286
287 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
288 goto found;
289
290 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
291 start = roundup(tmp->da_end + 1, alignment);
292
293 prev_end = tmp->da_end;
294 }
295
296 if ((start >= prev_end) && (obj->da_end - start >= bytes))
297 goto found;
298
299 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
300 __func__, da, bytes, flags);
301
302 return ERR_PTR(-EINVAL);
303
304 found:
305 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
306 if (!new)
307 return ERR_PTR(-ENOMEM);
308
309 new->iommu = obj;
310 new->da_start = start;
311 new->da_end = start + bytes;
312 new->flags = flags;
313
314 /*
315 * keep ascending order of iovmas
316 */
317 if (tmp)
318 list_add_tail(&new->list, &tmp->list);
319 else
320 list_add(&new->list, &obj->mmap);
321
322 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
323 __func__, new->da_start, start, new->da_end, bytes, flags);
324
325 return new;
326 }
327
328 static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
329 {
330 size_t bytes;
331
332 BUG_ON(!obj || !area);
333
334 bytes = area->da_end - area->da_start;
335
336 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
337 __func__, area->da_start, area->da_end, bytes, area->flags);
338
339 list_del(&area->list);
340 kmem_cache_free(iovm_area_cachep, area);
341 }
342
343 /**
344 * omap_da_to_va - convert (d) to (v)
345 * @obj: objective iommu
346 * @da: iommu device virtual address
347 * @va: mpu virtual address
348 *
349 * Returns mpu virtual addr which corresponds to a given device virtual addr
350 */
351 void *omap_da_to_va(struct omap_iommu *obj, u32 da)
352 {
353 void *va = NULL;
354 struct iovm_struct *area;
355
356 mutex_lock(&obj->mmap_lock);
357
358 area = __find_iovm_area(obj, da);
359 if (!area) {
360 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
361 goto out;
362 }
363 va = area->va;
364 out:
365 mutex_unlock(&obj->mmap_lock);
366
367 return va;
368 }
369 EXPORT_SYMBOL_GPL(omap_da_to_va);
370
371 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
372 {
373 unsigned int i;
374 struct scatterlist *sg;
375 void *va = _va;
376 void *va_end;
377
378 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
379 struct page *pg;
380 const size_t bytes = PAGE_SIZE;
381
382 /*
383 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
384 */
385 pg = vmalloc_to_page(va);
386 BUG_ON(!pg);
387 sg_set_page(sg, pg, bytes, 0);
388
389 va += bytes;
390 }
391
392 va_end = _va + PAGE_SIZE * i;
393 }
394
395 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
396 {
397 /*
398 * Actually this is not necessary at all, just exists for
399 * consistency of the code readability.
400 */
401 BUG_ON(!sgt);
402 }
403
404 /* create 'da' <-> 'pa' mapping from 'sgt' */
405 static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
406 const struct sg_table *sgt, u32 flags)
407 {
408 int err;
409 unsigned int i, j;
410 struct scatterlist *sg;
411 u32 da = new->da_start;
412 int order;
413
414 if (!domain || !sgt)
415 return -EINVAL;
416
417 BUG_ON(!sgtable_ok(sgt));
418
419 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
420 u32 pa;
421 size_t bytes;
422
423 pa = sg_phys(sg) - sg->offset;
424 bytes = sg->length + sg->offset;
425
426 flags &= ~IOVMF_PGSZ_MASK;
427
428 if (bytes_to_iopgsz(bytes) < 0)
429 goto err_out;
430
431 order = get_order(bytes);
432
433 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
434 i, da, pa, bytes);
435
436 err = iommu_map(domain, da, pa, order, flags);
437 if (err)
438 goto err_out;
439
440 da += bytes;
441 }
442 return 0;
443
444 err_out:
445 da = new->da_start;
446
447 for_each_sg(sgt->sgl, sg, i, j) {
448 size_t bytes;
449
450 bytes = sg->length + sg->offset;
451 order = get_order(bytes);
452
453 /* ignore failures.. we're already handling one */
454 iommu_unmap(domain, da, order);
455
456 da += bytes;
457 }
458 return err;
459 }
460
461 /* release 'da' <-> 'pa' mapping */
462 static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
463 struct iovm_struct *area)
464 {
465 u32 start;
466 size_t total = area->da_end - area->da_start;
467 const struct sg_table *sgt = area->sgt;
468 struct scatterlist *sg;
469 int i, err;
470
471 BUG_ON(!sgtable_ok(sgt));
472 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
473
474 start = area->da_start;
475 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
476 size_t bytes;
477 int order;
478
479 bytes = sg->length + sg->offset;
480 order = get_order(bytes);
481
482 err = iommu_unmap(domain, start, order);
483 if (err < 0)
484 break;
485
486 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
487 __func__, start, bytes, area->flags);
488
489 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
490
491 total -= bytes;
492 start += bytes;
493 }
494 BUG_ON(total);
495 }
496
497 /* template function for all unmapping */
498 static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
499 struct omap_iommu *obj, const u32 da,
500 void (*fn)(const void *), u32 flags)
501 {
502 struct sg_table *sgt = NULL;
503 struct iovm_struct *area;
504
505 if (!IS_ALIGNED(da, PAGE_SIZE)) {
506 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
507 return NULL;
508 }
509
510 mutex_lock(&obj->mmap_lock);
511
512 area = __find_iovm_area(obj, da);
513 if (!area) {
514 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
515 goto out;
516 }
517
518 if ((area->flags & flags) != flags) {
519 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
520 area->flags);
521 goto out;
522 }
523 sgt = (struct sg_table *)area->sgt;
524
525 unmap_iovm_area(domain, obj, area);
526
527 fn(area->va);
528
529 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
530 area->da_start, da, area->da_end,
531 area->da_end - area->da_start, area->flags);
532
533 free_iovm_area(obj, area);
534 out:
535 mutex_unlock(&obj->mmap_lock);
536
537 return sgt;
538 }
539
540 static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
541 u32 da, const struct sg_table *sgt, void *va,
542 size_t bytes, u32 flags)
543 {
544 int err = -ENOMEM;
545 struct iovm_struct *new;
546
547 mutex_lock(&obj->mmap_lock);
548
549 new = alloc_iovm_area(obj, da, bytes, flags);
550 if (IS_ERR(new)) {
551 err = PTR_ERR(new);
552 goto err_alloc_iovma;
553 }
554 new->va = va;
555 new->sgt = sgt;
556
557 if (map_iovm_area(domain, new, sgt, new->flags))
558 goto err_map;
559
560 mutex_unlock(&obj->mmap_lock);
561
562 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
563 __func__, new->da_start, bytes, new->flags, va);
564
565 return new->da_start;
566
567 err_map:
568 free_iovm_area(obj, new);
569 err_alloc_iovma:
570 mutex_unlock(&obj->mmap_lock);
571 return err;
572 }
573
574 static inline u32
575 __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
576 u32 da, const struct sg_table *sgt,
577 void *va, size_t bytes, u32 flags)
578 {
579 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
580 }
581
582 /**
583 * omap_iommu_vmap - (d)-(p)-(v) address mapper
584 * @obj: objective iommu
585 * @sgt: address of scatter gather table
586 * @flags: iovma and page property
587 *
588 * Creates 1-n-1 mapping with given @sgt and returns @da.
589 * All @sgt element must be io page size aligned.
590 */
591 u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
592 const struct sg_table *sgt, u32 flags)
593 {
594 size_t bytes;
595 void *va = NULL;
596
597 if (!obj || !obj->dev || !sgt)
598 return -EINVAL;
599
600 bytes = sgtable_len(sgt);
601 if (!bytes)
602 return -EINVAL;
603 bytes = PAGE_ALIGN(bytes);
604
605 if (flags & IOVMF_MMIO) {
606 va = vmap_sg(sgt);
607 if (IS_ERR(va))
608 return PTR_ERR(va);
609 }
610
611 flags |= IOVMF_DISCONT;
612 flags |= IOVMF_MMIO;
613
614 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
615 if (IS_ERR_VALUE(da))
616 vunmap_sg(va);
617
618 return da + sgtable_offset(sgt);
619 }
620 EXPORT_SYMBOL_GPL(omap_iommu_vmap);
621
622 /**
623 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
624 * @obj: objective iommu
625 * @da: iommu device virtual address
626 *
627 * Free the iommu virtually contiguous memory area starting at
628 * @da, which was returned by 'omap_iommu_vmap()'.
629 */
630 struct sg_table *
631 omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
632 {
633 struct sg_table *sgt;
634 /*
635 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
636 * Just returns 'sgt' to the caller to free
637 */
638 da &= PAGE_MASK;
639 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
640 IOVMF_DISCONT | IOVMF_MMIO);
641 if (!sgt)
642 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
643 return sgt;
644 }
645 EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
646
647 /**
648 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
649 * @obj: objective iommu
650 * @da: contiguous iommu virtual memory
651 * @bytes: allocation size
652 * @flags: iovma and page property
653 *
654 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
655 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
656 */
657 u32
658 omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
659 size_t bytes, u32 flags)
660 {
661 void *va;
662 struct sg_table *sgt;
663
664 if (!obj || !obj->dev || !bytes)
665 return -EINVAL;
666
667 bytes = PAGE_ALIGN(bytes);
668
669 va = vmalloc(bytes);
670 if (!va)
671 return -ENOMEM;
672
673 flags |= IOVMF_DISCONT;
674 flags |= IOVMF_ALLOC;
675
676 sgt = sgtable_alloc(bytes, flags, da, 0);
677 if (IS_ERR(sgt)) {
678 da = PTR_ERR(sgt);
679 goto err_sgt_alloc;
680 }
681 sgtable_fill_vmalloc(sgt, va);
682
683 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
684 if (IS_ERR_VALUE(da))
685 goto err_iommu_vmap;
686
687 return da;
688
689 err_iommu_vmap:
690 sgtable_drain_vmalloc(sgt);
691 sgtable_free(sgt);
692 err_sgt_alloc:
693 vfree(va);
694 return da;
695 }
696 EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
697
698 /**
699 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
700 * @obj: objective iommu
701 * @da: iommu device virtual address
702 *
703 * Frees the iommu virtually continuous memory area starting at
704 * @da, as obtained from 'omap_iommu_vmalloc()'.
705 */
706 void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
707 const u32 da)
708 {
709 struct sg_table *sgt;
710
711 sgt = unmap_vm_area(domain, obj, da, vfree,
712 IOVMF_DISCONT | IOVMF_ALLOC);
713 if (!sgt)
714 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
715 sgtable_free(sgt);
716 }
717 EXPORT_SYMBOL_GPL(omap_iommu_vfree);
718
719 static int __init iovmm_init(void)
720 {
721 const unsigned long flags = SLAB_HWCACHE_ALIGN;
722 struct kmem_cache *p;
723
724 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
725 flags, NULL);
726 if (!p)
727 return -ENOMEM;
728 iovm_area_cachep = p;
729
730 return 0;
731 }
732 module_init(iovmm_init);
733
734 static void __exit iovmm_exit(void)
735 {
736 kmem_cache_destroy(iovm_area_cachep);
737 }
738 module_exit(iovmm_exit);
739
740 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
741 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
742 MODULE_LICENSE("GPL v2");
This page took 0.045929 seconds and 5 git commands to generate.