Commit | Line | Data |
---|---|---|
69d3a84a HD |
1 | /* |
2 | * omap iommu: simple virtual address space management | |
3 | * | |
4 | * Copyright (C) 2008-2009 Nokia Corporation | |
5 | * | |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/err.h> | |
5a0e3ad6 | 14 | #include <linux/slab.h> |
69d3a84a HD |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/device.h> | |
17 | #include <linux/scatterlist.h> | |
18 | ||
19 | #include <asm/cacheflush.h> | |
20 | #include <asm/mach/map.h> | |
21 | ||
ce491cf8 TL |
22 | #include <plat/iommu.h> |
23 | #include <plat/iovmm.h> | |
69d3a84a HD |
24 | |
25 | #include "iopgtable.h" | |
26 | ||
27 | /* | |
28 | * A device driver needs to create address mappings between: | |
29 | * | |
30 | * - iommu/device address | |
31 | * - physical address | |
32 | * - mpu virtual address | |
33 | * | |
34 | * There are 4 possible patterns for them: | |
35 | * | |
36 | * |iova/ mapping iommu_ page | |
37 | * | da pa va (d)-(p)-(v) function type | |
38 | * --------------------------------------------------------------------------- | |
39 | * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s | |
40 | * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s | |
41 | * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s | |
42 | * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* | |
43 | * | |
44 | * | |
45 | * 'iova': device iommu virtual address | |
46 | * 'da': alias of 'iova' | |
47 | * 'pa': physical address | |
48 | * 'va': mpu virtual address | |
49 | * | |
50 | * 'c': contiguous memory area | |
ba6a1179 | 51 | * 'd': discontiguous memory area |
69d3a84a HD |
52 | * 'a': anonymous memory allocation |
53 | * '()': optional feature | |
54 | * | |
55 | * 'n': a normal page(4KB) size is used. | |
56 | * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. | |
57 | * | |
58 | * '*': not yet, but feasible. | |
59 | */ | |
60 | ||
61 | static struct kmem_cache *iovm_area_cachep; | |
62 | ||
63 | /* return total bytes of sg buffers */ | |
64 | static size_t sgtable_len(const struct sg_table *sgt) | |
65 | { | |
66 | unsigned int i, total = 0; | |
67 | struct scatterlist *sg; | |
68 | ||
69 | if (!sgt) | |
70 | return 0; | |
71 | ||
72 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
73 | size_t bytes; | |
74 | ||
75 | bytes = sg_dma_len(sg); | |
76 | ||
77 | if (!iopgsz_ok(bytes)) { | |
78 | pr_err("%s: sg[%d] not iommu pagesize(%x)\n", | |
79 | __func__, i, bytes); | |
80 | return 0; | |
81 | } | |
82 | ||
83 | total += bytes; | |
84 | } | |
85 | ||
86 | return total; | |
87 | } | |
88 | #define sgtable_ok(x) (!!sgtable_len(x)) | |
89 | ||
90 | /* | |
91 | * calculate the optimal number sg elements from total bytes based on | |
92 | * iommu superpages | |
93 | */ | |
94 | static unsigned int sgtable_nents(size_t bytes) | |
95 | { | |
96 | int i; | |
97 | unsigned int nr_entries; | |
98 | const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | |
99 | ||
100 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | |
101 | pr_err("%s: wrong size %08x\n", __func__, bytes); | |
102 | return 0; | |
103 | } | |
104 | ||
105 | nr_entries = 0; | |
106 | for (i = 0; i < ARRAY_SIZE(pagesize); i++) { | |
107 | if (bytes >= pagesize[i]) { | |
108 | nr_entries += (bytes / pagesize[i]); | |
109 | bytes %= pagesize[i]; | |
110 | } | |
111 | } | |
112 | BUG_ON(bytes); | |
113 | ||
114 | return nr_entries; | |
115 | } | |
116 | ||
117 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | |
118 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) | |
119 | { | |
120 | unsigned int nr_entries; | |
121 | int err; | |
122 | struct sg_table *sgt; | |
123 | ||
124 | if (!bytes) | |
125 | return ERR_PTR(-EINVAL); | |
126 | ||
127 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | |
128 | return ERR_PTR(-EINVAL); | |
129 | ||
130 | /* FIXME: IOVMF_DA_FIXED should support 'superpages' */ | |
131 | if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { | |
132 | nr_entries = sgtable_nents(bytes); | |
133 | if (!nr_entries) | |
134 | return ERR_PTR(-EINVAL); | |
135 | } else | |
136 | nr_entries = bytes / PAGE_SIZE; | |
137 | ||
138 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
139 | if (!sgt) | |
140 | return ERR_PTR(-ENOMEM); | |
141 | ||
142 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | |
143 | if (err) | |
144 | return ERR_PTR(err); | |
145 | ||
146 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | |
147 | ||
148 | return sgt; | |
149 | } | |
150 | ||
151 | /* free sg_table header(a kind of superblock) */ | |
152 | static void sgtable_free(struct sg_table *sgt) | |
153 | { | |
154 | if (!sgt) | |
155 | return; | |
156 | ||
157 | sg_free_table(sgt); | |
158 | kfree(sgt); | |
159 | ||
160 | pr_debug("%s: sgt:%p\n", __func__, sgt); | |
161 | } | |
162 | ||
163 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | |
164 | static void *vmap_sg(const struct sg_table *sgt) | |
165 | { | |
166 | u32 va; | |
167 | size_t total; | |
168 | unsigned int i; | |
169 | struct scatterlist *sg; | |
170 | struct vm_struct *new; | |
171 | const struct mem_type *mtype; | |
172 | ||
173 | mtype = get_mem_type(MT_DEVICE); | |
174 | if (!mtype) | |
175 | return ERR_PTR(-EINVAL); | |
176 | ||
177 | total = sgtable_len(sgt); | |
178 | if (!total) | |
179 | return ERR_PTR(-EINVAL); | |
180 | ||
181 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | |
182 | if (!new) | |
183 | return ERR_PTR(-ENOMEM); | |
184 | va = (u32)new->addr; | |
185 | ||
186 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
187 | size_t bytes; | |
188 | u32 pa; | |
189 | int err; | |
190 | ||
191 | pa = sg_phys(sg); | |
192 | bytes = sg_dma_len(sg); | |
193 | ||
194 | BUG_ON(bytes != PAGE_SIZE); | |
195 | ||
196 | err = ioremap_page(va, pa, mtype); | |
197 | if (err) | |
198 | goto err_out; | |
199 | ||
200 | va += bytes; | |
201 | } | |
202 | ||
6716bd06 SP |
203 | flush_cache_vmap((unsigned long)new->addr, |
204 | (unsigned long)(new->addr + total)); | |
69d3a84a HD |
205 | return new->addr; |
206 | ||
207 | err_out: | |
208 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | |
209 | vunmap(new->addr); | |
210 | return ERR_PTR(-EAGAIN); | |
211 | } | |
212 | ||
213 | static inline void vunmap_sg(const void *va) | |
214 | { | |
215 | vunmap(va); | |
216 | } | |
217 | ||
218 | static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | |
219 | { | |
220 | struct iovm_struct *tmp; | |
221 | ||
222 | list_for_each_entry(tmp, &obj->mmap, list) { | |
223 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | |
224 | size_t len; | |
225 | ||
226 | len = tmp->da_end - tmp->da_start; | |
227 | ||
228 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | |
229 | __func__, tmp->da_start, da, tmp->da_end, len, | |
230 | tmp->flags); | |
231 | ||
232 | return tmp; | |
233 | } | |
234 | } | |
235 | ||
236 | return NULL; | |
237 | } | |
238 | ||
239 | /** | |
240 | * find_iovm_area - find iovma which includes @da | |
241 | * @da: iommu device virtual address | |
242 | * | |
243 | * Find the existing iovma starting at @da | |
244 | */ | |
245 | struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | |
246 | { | |
247 | struct iovm_struct *area; | |
248 | ||
249 | mutex_lock(&obj->mmap_lock); | |
250 | area = __find_iovm_area(obj, da); | |
251 | mutex_unlock(&obj->mmap_lock); | |
252 | ||
253 | return area; | |
254 | } | |
255 | EXPORT_SYMBOL_GPL(find_iovm_area); | |
256 | ||
257 | /* | |
258 | * This finds the hole(area) which fits the requested address and len | |
259 | * in iovmas mmap, and returns the new allocated iovma. | |
260 | */ | |
261 | static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | |
262 | size_t bytes, u32 flags) | |
263 | { | |
264 | struct iovm_struct *new, *tmp; | |
265 | u32 start, prev_end, alignement; | |
266 | ||
267 | if (!obj || !bytes) | |
268 | return ERR_PTR(-EINVAL); | |
269 | ||
270 | start = da; | |
271 | alignement = PAGE_SIZE; | |
272 | ||
273 | if (flags & IOVMF_DA_ANON) { | |
274 | /* | |
275 | * Reserve the first page for NULL | |
276 | */ | |
277 | start = PAGE_SIZE; | |
278 | if (flags & IOVMF_LINEAR) | |
279 | alignement = iopgsz_max(bytes); | |
280 | start = roundup(start, alignement); | |
281 | } | |
282 | ||
283 | tmp = NULL; | |
284 | if (list_empty(&obj->mmap)) | |
285 | goto found; | |
286 | ||
287 | prev_end = 0; | |
288 | list_for_each_entry(tmp, &obj->mmap, list) { | |
289 | ||
290 | if ((prev_end <= start) && (start + bytes < tmp->da_start)) | |
291 | goto found; | |
292 | ||
293 | if (flags & IOVMF_DA_ANON) | |
294 | start = roundup(tmp->da_end, alignement); | |
295 | ||
296 | prev_end = tmp->da_end; | |
297 | } | |
298 | ||
299 | if ((start >= prev_end) && (ULONG_MAX - start >= bytes)) | |
300 | goto found; | |
301 | ||
302 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | |
303 | __func__, da, bytes, flags); | |
304 | ||
305 | return ERR_PTR(-EINVAL); | |
306 | ||
307 | found: | |
308 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | |
309 | if (!new) | |
310 | return ERR_PTR(-ENOMEM); | |
311 | ||
312 | new->iommu = obj; | |
313 | new->da_start = start; | |
314 | new->da_end = start + bytes; | |
315 | new->flags = flags; | |
316 | ||
317 | /* | |
318 | * keep ascending order of iovmas | |
319 | */ | |
320 | if (tmp) | |
321 | list_add_tail(&new->list, &tmp->list); | |
322 | else | |
323 | list_add(&new->list, &obj->mmap); | |
324 | ||
325 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | |
326 | __func__, new->da_start, start, new->da_end, bytes, flags); | |
327 | ||
328 | return new; | |
329 | } | |
330 | ||
331 | static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | |
332 | { | |
333 | size_t bytes; | |
334 | ||
335 | BUG_ON(!obj || !area); | |
336 | ||
337 | bytes = area->da_end - area->da_start; | |
338 | ||
339 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | |
340 | __func__, area->da_start, area->da_end, bytes, area->flags); | |
341 | ||
342 | list_del(&area->list); | |
343 | kmem_cache_free(iovm_area_cachep, area); | |
344 | } | |
345 | ||
346 | /** | |
347 | * da_to_va - convert (d) to (v) | |
348 | * @obj: objective iommu | |
349 | * @da: iommu device virtual address | |
350 | * @va: mpu virtual address | |
351 | * | |
352 | * Returns mpu virtual addr which corresponds to a given device virtual addr | |
353 | */ | |
354 | void *da_to_va(struct iommu *obj, u32 da) | |
355 | { | |
356 | void *va = NULL; | |
357 | struct iovm_struct *area; | |
358 | ||
359 | mutex_lock(&obj->mmap_lock); | |
360 | ||
361 | area = __find_iovm_area(obj, da); | |
362 | if (!area) { | |
363 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
364 | goto out; | |
365 | } | |
366 | va = area->va; | |
69d3a84a | 367 | out: |
26548900 DW |
368 | mutex_unlock(&obj->mmap_lock); |
369 | ||
69d3a84a HD |
370 | return va; |
371 | } | |
372 | EXPORT_SYMBOL_GPL(da_to_va); | |
373 | ||
374 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | |
375 | { | |
376 | unsigned int i; | |
377 | struct scatterlist *sg; | |
378 | void *va = _va; | |
379 | void *va_end; | |
380 | ||
381 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
382 | struct page *pg; | |
383 | const size_t bytes = PAGE_SIZE; | |
384 | ||
385 | /* | |
386 | * iommu 'superpage' isn't supported with 'iommu_vmalloc()' | |
387 | */ | |
388 | pg = vmalloc_to_page(va); | |
389 | BUG_ON(!pg); | |
390 | sg_set_page(sg, pg, bytes, 0); | |
391 | ||
392 | va += bytes; | |
393 | } | |
394 | ||
395 | va_end = _va + PAGE_SIZE * i; | |
69d3a84a HD |
396 | } |
397 | ||
398 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |
399 | { | |
400 | /* | |
401 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 402 | * consistency of the code readability. |
69d3a84a HD |
403 | */ |
404 | BUG_ON(!sgt); | |
405 | } | |
406 | ||
407 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) | |
408 | { | |
409 | unsigned int i; | |
410 | struct scatterlist *sg; | |
411 | void *va; | |
412 | ||
413 | va = phys_to_virt(pa); | |
414 | ||
415 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
416 | size_t bytes; | |
417 | ||
418 | bytes = iopgsz_max(len); | |
419 | ||
420 | BUG_ON(!iopgsz_ok(bytes)); | |
421 | ||
422 | sg_set_buf(sg, phys_to_virt(pa), bytes); | |
423 | /* | |
424 | * 'pa' is cotinuous(linear). | |
425 | */ | |
426 | pa += bytes; | |
427 | len -= bytes; | |
428 | } | |
429 | BUG_ON(len); | |
69d3a84a HD |
430 | } |
431 | ||
432 | static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | |
433 | { | |
434 | /* | |
435 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 436 | * consistency of the code readability |
69d3a84a HD |
437 | */ |
438 | BUG_ON(!sgt); | |
439 | } | |
440 | ||
441 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | |
442 | static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, | |
443 | const struct sg_table *sgt, u32 flags) | |
444 | { | |
445 | int err; | |
446 | unsigned int i, j; | |
447 | struct scatterlist *sg; | |
448 | u32 da = new->da_start; | |
449 | ||
20e11c2d | 450 | if (!obj || !sgt) |
69d3a84a HD |
451 | return -EINVAL; |
452 | ||
453 | BUG_ON(!sgtable_ok(sgt)); | |
454 | ||
455 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
456 | u32 pa; | |
457 | int pgsz; | |
458 | size_t bytes; | |
459 | struct iotlb_entry e; | |
460 | ||
461 | pa = sg_phys(sg); | |
462 | bytes = sg_dma_len(sg); | |
463 | ||
464 | flags &= ~IOVMF_PGSZ_MASK; | |
465 | pgsz = bytes_to_iopgsz(bytes); | |
466 | if (pgsz < 0) | |
467 | goto err_out; | |
468 | flags |= pgsz; | |
469 | ||
470 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | |
471 | i, da, pa, bytes); | |
472 | ||
473 | iotlb_init_entry(&e, da, pa, flags); | |
474 | err = iopgtable_store_entry(obj, &e); | |
475 | if (err) | |
476 | goto err_out; | |
477 | ||
478 | da += bytes; | |
479 | } | |
480 | return 0; | |
481 | ||
482 | err_out: | |
483 | da = new->da_start; | |
484 | ||
485 | for_each_sg(sgt->sgl, sg, i, j) { | |
486 | size_t bytes; | |
487 | ||
488 | bytes = iopgtable_clear_entry(obj, da); | |
489 | ||
490 | BUG_ON(!iopgsz_ok(bytes)); | |
491 | ||
492 | da += bytes; | |
493 | } | |
494 | return err; | |
495 | } | |
496 | ||
497 | /* release 'da' <-> 'pa' mapping */ | |
498 | static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) | |
499 | { | |
500 | u32 start; | |
501 | size_t total = area->da_end - area->da_start; | |
502 | ||
503 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | |
504 | ||
505 | start = area->da_start; | |
506 | while (total > 0) { | |
507 | size_t bytes; | |
508 | ||
509 | bytes = iopgtable_clear_entry(obj, start); | |
510 | if (bytes == 0) | |
511 | bytes = PAGE_SIZE; | |
512 | else | |
513 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | |
514 | __func__, start, bytes, area->flags); | |
515 | ||
516 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | |
517 | ||
518 | total -= bytes; | |
519 | start += bytes; | |
520 | } | |
521 | BUG_ON(total); | |
522 | } | |
523 | ||
524 | /* template function for all unmapping */ | |
525 | static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, | |
526 | void (*fn)(const void *), u32 flags) | |
527 | { | |
528 | struct sg_table *sgt = NULL; | |
529 | struct iovm_struct *area; | |
530 | ||
531 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | |
532 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | |
533 | return NULL; | |
534 | } | |
535 | ||
536 | mutex_lock(&obj->mmap_lock); | |
537 | ||
538 | area = __find_iovm_area(obj, da); | |
539 | if (!area) { | |
540 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
541 | goto out; | |
542 | } | |
543 | ||
544 | if ((area->flags & flags) != flags) { | |
545 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | |
546 | area->flags); | |
547 | goto out; | |
548 | } | |
549 | sgt = (struct sg_table *)area->sgt; | |
550 | ||
551 | unmap_iovm_area(obj, area); | |
552 | ||
553 | fn(area->va); | |
554 | ||
555 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | |
556 | area->da_start, da, area->da_end, | |
557 | area->da_end - area->da_start, area->flags); | |
558 | ||
559 | free_iovm_area(obj, area); | |
560 | out: | |
561 | mutex_unlock(&obj->mmap_lock); | |
562 | ||
563 | return sgt; | |
564 | } | |
565 | ||
566 | static u32 map_iommu_region(struct iommu *obj, u32 da, | |
567 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | |
568 | { | |
569 | int err = -ENOMEM; | |
570 | struct iovm_struct *new; | |
571 | ||
572 | mutex_lock(&obj->mmap_lock); | |
573 | ||
574 | new = alloc_iovm_area(obj, da, bytes, flags); | |
575 | if (IS_ERR(new)) { | |
576 | err = PTR_ERR(new); | |
577 | goto err_alloc_iovma; | |
578 | } | |
579 | new->va = va; | |
580 | new->sgt = sgt; | |
581 | ||
582 | if (map_iovm_area(obj, new, sgt, new->flags)) | |
583 | goto err_map; | |
584 | ||
585 | mutex_unlock(&obj->mmap_lock); | |
586 | ||
587 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | |
588 | __func__, new->da_start, bytes, new->flags, va); | |
589 | ||
590 | return new->da_start; | |
591 | ||
592 | err_map: | |
593 | free_iovm_area(obj, new); | |
594 | err_alloc_iovma: | |
595 | mutex_unlock(&obj->mmap_lock); | |
596 | return err; | |
597 | } | |
598 | ||
599 | static inline u32 __iommu_vmap(struct iommu *obj, u32 da, | |
600 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | |
601 | { | |
602 | return map_iommu_region(obj, da, sgt, va, bytes, flags); | |
603 | } | |
604 | ||
605 | /** | |
606 | * iommu_vmap - (d)-(p)-(v) address mapper | |
607 | * @obj: objective iommu | |
608 | * @sgt: address of scatter gather table | |
609 | * @flags: iovma and page property | |
610 | * | |
611 | * Creates 1-n-1 mapping with given @sgt and returns @da. | |
612 | * All @sgt element must be io page size aligned. | |
613 | */ | |
614 | u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, | |
615 | u32 flags) | |
616 | { | |
617 | size_t bytes; | |
935e4739 | 618 | void *va = NULL; |
69d3a84a HD |
619 | |
620 | if (!obj || !obj->dev || !sgt) | |
621 | return -EINVAL; | |
622 | ||
623 | bytes = sgtable_len(sgt); | |
624 | if (!bytes) | |
625 | return -EINVAL; | |
626 | bytes = PAGE_ALIGN(bytes); | |
627 | ||
935e4739 HD |
628 | if (flags & IOVMF_MMIO) { |
629 | va = vmap_sg(sgt); | |
630 | if (IS_ERR(va)) | |
631 | return PTR_ERR(va); | |
632 | } | |
69d3a84a HD |
633 | |
634 | flags &= IOVMF_HW_MASK; | |
635 | flags |= IOVMF_DISCONT; | |
636 | flags |= IOVMF_MMIO; | |
637 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
638 | ||
639 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | |
640 | if (IS_ERR_VALUE(da)) | |
641 | vunmap_sg(va); | |
642 | ||
643 | return da; | |
644 | } | |
645 | EXPORT_SYMBOL_GPL(iommu_vmap); | |
646 | ||
647 | /** | |
648 | * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' | |
649 | * @obj: objective iommu | |
650 | * @da: iommu device virtual address | |
651 | * | |
652 | * Free the iommu virtually contiguous memory area starting at | |
653 | * @da, which was returned by 'iommu_vmap()'. | |
654 | */ | |
655 | struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) | |
656 | { | |
657 | struct sg_table *sgt; | |
658 | /* | |
659 | * 'sgt' is allocated before 'iommu_vmalloc()' is called. | |
660 | * Just returns 'sgt' to the caller to free | |
661 | */ | |
662 | sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); | |
663 | if (!sgt) | |
664 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
665 | return sgt; | |
666 | } | |
667 | EXPORT_SYMBOL_GPL(iommu_vunmap); | |
668 | ||
669 | /** | |
670 | * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | |
671 | * @obj: objective iommu | |
672 | * @da: contiguous iommu virtual memory | |
673 | * @bytes: allocation size | |
674 | * @flags: iovma and page property | |
675 | * | |
676 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | |
677 | * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. | |
678 | */ | |
679 | u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |
680 | { | |
681 | void *va; | |
682 | struct sg_table *sgt; | |
683 | ||
684 | if (!obj || !obj->dev || !bytes) | |
685 | return -EINVAL; | |
686 | ||
687 | bytes = PAGE_ALIGN(bytes); | |
688 | ||
689 | va = vmalloc(bytes); | |
690 | if (!va) | |
691 | return -ENOMEM; | |
692 | ||
693 | sgt = sgtable_alloc(bytes, flags); | |
694 | if (IS_ERR(sgt)) { | |
695 | da = PTR_ERR(sgt); | |
696 | goto err_sgt_alloc; | |
697 | } | |
698 | sgtable_fill_vmalloc(sgt, va); | |
699 | ||
700 | flags &= IOVMF_HW_MASK; | |
701 | flags |= IOVMF_DISCONT; | |
702 | flags |= IOVMF_ALLOC; | |
703 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
704 | ||
705 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | |
706 | if (IS_ERR_VALUE(da)) | |
707 | goto err_iommu_vmap; | |
708 | ||
709 | return da; | |
710 | ||
711 | err_iommu_vmap: | |
712 | sgtable_drain_vmalloc(sgt); | |
713 | sgtable_free(sgt); | |
714 | err_sgt_alloc: | |
715 | vfree(va); | |
716 | return da; | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(iommu_vmalloc); | |
719 | ||
720 | /** | |
721 | * iommu_vfree - release memory allocated by 'iommu_vmalloc()' | |
722 | * @obj: objective iommu | |
723 | * @da: iommu device virtual address | |
724 | * | |
725 | * Frees the iommu virtually continuous memory area starting at | |
726 | * @da, as obtained from 'iommu_vmalloc()'. | |
727 | */ | |
728 | void iommu_vfree(struct iommu *obj, const u32 da) | |
729 | { | |
730 | struct sg_table *sgt; | |
731 | ||
732 | sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); | |
733 | if (!sgt) | |
734 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
735 | sgtable_free(sgt); | |
736 | } | |
737 | EXPORT_SYMBOL_GPL(iommu_vfree); | |
738 | ||
739 | static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, | |
740 | size_t bytes, u32 flags) | |
741 | { | |
742 | struct sg_table *sgt; | |
743 | ||
744 | sgt = sgtable_alloc(bytes, flags); | |
745 | if (IS_ERR(sgt)) | |
746 | return PTR_ERR(sgt); | |
747 | ||
748 | sgtable_fill_kmalloc(sgt, pa, bytes); | |
749 | ||
750 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); | |
751 | if (IS_ERR_VALUE(da)) { | |
752 | sgtable_drain_kmalloc(sgt); | |
753 | sgtable_free(sgt); | |
754 | } | |
755 | ||
756 | return da; | |
757 | } | |
758 | ||
759 | /** | |
760 | * iommu_kmap - (d)-(p)-(v) address mapper | |
761 | * @obj: objective iommu | |
762 | * @da: contiguous iommu virtual memory | |
763 | * @pa: contiguous physical memory | |
764 | * @flags: iovma and page property | |
765 | * | |
766 | * Creates 1-1-1 mapping and returns @da again, which can be | |
767 | * adjusted if 'IOVMF_DA_ANON' is set. | |
768 | */ | |
769 | u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | |
770 | u32 flags) | |
771 | { | |
772 | void *va; | |
773 | ||
774 | if (!obj || !obj->dev || !bytes) | |
775 | return -EINVAL; | |
776 | ||
777 | bytes = PAGE_ALIGN(bytes); | |
778 | ||
779 | va = ioremap(pa, bytes); | |
780 | if (!va) | |
781 | return -ENOMEM; | |
782 | ||
783 | flags &= IOVMF_HW_MASK; | |
784 | flags |= IOVMF_LINEAR; | |
785 | flags |= IOVMF_MMIO; | |
786 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
787 | ||
788 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | |
789 | if (IS_ERR_VALUE(da)) | |
790 | iounmap(va); | |
791 | ||
792 | return da; | |
793 | } | |
794 | EXPORT_SYMBOL_GPL(iommu_kmap); | |
795 | ||
796 | /** | |
797 | * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' | |
798 | * @obj: objective iommu | |
799 | * @da: iommu device virtual address | |
800 | * | |
801 | * Frees the iommu virtually contiguous memory area starting at | |
802 | * @da, which was passed to and was returned by'iommu_kmap()'. | |
803 | */ | |
804 | void iommu_kunmap(struct iommu *obj, u32 da) | |
805 | { | |
806 | struct sg_table *sgt; | |
807 | typedef void (*func_t)(const void *); | |
808 | ||
809 | sgt = unmap_vm_area(obj, da, (func_t)__iounmap, | |
810 | IOVMF_LINEAR | IOVMF_MMIO); | |
811 | if (!sgt) | |
812 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
813 | sgtable_free(sgt); | |
814 | } | |
815 | EXPORT_SYMBOL_GPL(iommu_kunmap); | |
816 | ||
817 | /** | |
818 | * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper | |
819 | * @obj: objective iommu | |
820 | * @da: contiguous iommu virtual memory | |
821 | * @bytes: bytes for allocation | |
822 | * @flags: iovma and page property | |
823 | * | |
824 | * Allocate @bytes linearly and creates 1-1-1 mapping and returns | |
825 | * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. | |
826 | */ | |
827 | u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |
828 | { | |
829 | void *va; | |
830 | u32 pa; | |
831 | ||
832 | if (!obj || !obj->dev || !bytes) | |
833 | return -EINVAL; | |
834 | ||
835 | bytes = PAGE_ALIGN(bytes); | |
836 | ||
837 | va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); | |
838 | if (!va) | |
839 | return -ENOMEM; | |
840 | pa = virt_to_phys(va); | |
841 | ||
842 | flags &= IOVMF_HW_MASK; | |
843 | flags |= IOVMF_LINEAR; | |
844 | flags |= IOVMF_ALLOC; | |
845 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | |
846 | ||
847 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | |
848 | if (IS_ERR_VALUE(da)) | |
849 | kfree(va); | |
850 | ||
851 | return da; | |
852 | } | |
853 | EXPORT_SYMBOL_GPL(iommu_kmalloc); | |
854 | ||
855 | /** | |
856 | * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' | |
857 | * @obj: objective iommu | |
858 | * @da: iommu device virtual address | |
859 | * | |
860 | * Frees the iommu virtually contiguous memory area starting at | |
861 | * @da, which was passed to and was returned by'iommu_kmalloc()'. | |
862 | */ | |
863 | void iommu_kfree(struct iommu *obj, u32 da) | |
864 | { | |
865 | struct sg_table *sgt; | |
866 | ||
867 | sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); | |
868 | if (!sgt) | |
869 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
870 | sgtable_free(sgt); | |
871 | } | |
872 | EXPORT_SYMBOL_GPL(iommu_kfree); | |
873 | ||
874 | ||
875 | static int __init iovmm_init(void) | |
876 | { | |
877 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
878 | struct kmem_cache *p; | |
879 | ||
880 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | |
881 | flags, NULL); | |
882 | if (!p) | |
883 | return -ENOMEM; | |
884 | iovm_area_cachep = p; | |
885 | ||
886 | return 0; | |
887 | } | |
888 | module_init(iovmm_init); | |
889 | ||
890 | static void __exit iovmm_exit(void) | |
891 | { | |
892 | kmem_cache_destroy(iovm_area_cachep); | |
893 | } | |
894 | module_exit(iovmm_exit); | |
895 | ||
896 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | |
897 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | |
898 | MODULE_LICENSE("GPL v2"); |