Commit | Line | Data |
---|---|---|
69d3a84a HD |
1 | /* |
2 | * omap iommu: simple virtual address space management | |
3 | * | |
4 | * Copyright (C) 2008-2009 Nokia Corporation | |
5 | * | |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/err.h> | |
5a0e3ad6 | 14 | #include <linux/slab.h> |
69d3a84a HD |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/device.h> | |
17 | #include <linux/scatterlist.h> | |
f626b52d | 18 | #include <linux/iommu.h> |
69d3a84a HD |
19 | |
20 | #include <asm/cacheflush.h> | |
21 | #include <asm/mach/map.h> | |
22 | ||
ce491cf8 TL |
23 | #include <plat/iommu.h> |
24 | #include <plat/iovmm.h> | |
69d3a84a | 25 | |
fcf3a6ef | 26 | #include <plat/iopgtable.h> |
69d3a84a | 27 | |
69d3a84a HD |
28 | static struct kmem_cache *iovm_area_cachep; |
29 | ||
30 | /* return total bytes of sg buffers */ | |
31 | static size_t sgtable_len(const struct sg_table *sgt) | |
32 | { | |
33 | unsigned int i, total = 0; | |
34 | struct scatterlist *sg; | |
35 | ||
36 | if (!sgt) | |
37 | return 0; | |
38 | ||
39 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
40 | size_t bytes; | |
41 | ||
66cf402b | 42 | bytes = sg->length; |
69d3a84a HD |
43 | |
44 | if (!iopgsz_ok(bytes)) { | |
45 | pr_err("%s: sg[%d] not iommu pagesize(%x)\n", | |
46 | __func__, i, bytes); | |
47 | return 0; | |
48 | } | |
49 | ||
50 | total += bytes; | |
51 | } | |
52 | ||
53 | return total; | |
54 | } | |
55 | #define sgtable_ok(x) (!!sgtable_len(x)) | |
56 | ||
ad108121 GLF |
57 | static unsigned max_alignment(u32 addr) |
58 | { | |
59 | int i; | |
60 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | |
61 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | |
62 | ; | |
63 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | |
64 | } | |
65 | ||
69d3a84a HD |
66 | /* |
67 | * calculate the optimal number sg elements from total bytes based on | |
68 | * iommu superpages | |
69 | */ | |
ad108121 | 70 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) |
69d3a84a | 71 | { |
ad108121 | 72 | unsigned nr_entries = 0, ent_sz; |
69d3a84a HD |
73 | |
74 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | |
75 | pr_err("%s: wrong size %08x\n", __func__, bytes); | |
76 | return 0; | |
77 | } | |
78 | ||
ad108121 GLF |
79 | while (bytes) { |
80 | ent_sz = max_alignment(da | pa); | |
81 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); | |
82 | nr_entries++; | |
83 | da += ent_sz; | |
84 | pa += ent_sz; | |
85 | bytes -= ent_sz; | |
69d3a84a | 86 | } |
69d3a84a HD |
87 | |
88 | return nr_entries; | |
89 | } | |
90 | ||
91 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | |
ad108121 GLF |
92 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, |
93 | u32 da, u32 pa) | |
69d3a84a HD |
94 | { |
95 | unsigned int nr_entries; | |
96 | int err; | |
97 | struct sg_table *sgt; | |
98 | ||
99 | if (!bytes) | |
100 | return ERR_PTR(-EINVAL); | |
101 | ||
102 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | |
103 | return ERR_PTR(-EINVAL); | |
104 | ||
ad108121 GLF |
105 | if (flags & IOVMF_LINEAR) { |
106 | nr_entries = sgtable_nents(bytes, da, pa); | |
69d3a84a HD |
107 | if (!nr_entries) |
108 | return ERR_PTR(-EINVAL); | |
109 | } else | |
110 | nr_entries = bytes / PAGE_SIZE; | |
111 | ||
112 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
113 | if (!sgt) | |
114 | return ERR_PTR(-ENOMEM); | |
115 | ||
116 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | |
7f1225bd S |
117 | if (err) { |
118 | kfree(sgt); | |
69d3a84a | 119 | return ERR_PTR(err); |
7f1225bd | 120 | } |
69d3a84a HD |
121 | |
122 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | |
123 | ||
124 | return sgt; | |
125 | } | |
126 | ||
127 | /* free sg_table header(a kind of superblock) */ | |
128 | static void sgtable_free(struct sg_table *sgt) | |
129 | { | |
130 | if (!sgt) | |
131 | return; | |
132 | ||
133 | sg_free_table(sgt); | |
134 | kfree(sgt); | |
135 | ||
136 | pr_debug("%s: sgt:%p\n", __func__, sgt); | |
137 | } | |
138 | ||
139 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | |
140 | static void *vmap_sg(const struct sg_table *sgt) | |
141 | { | |
142 | u32 va; | |
143 | size_t total; | |
144 | unsigned int i; | |
145 | struct scatterlist *sg; | |
146 | struct vm_struct *new; | |
147 | const struct mem_type *mtype; | |
148 | ||
149 | mtype = get_mem_type(MT_DEVICE); | |
150 | if (!mtype) | |
151 | return ERR_PTR(-EINVAL); | |
152 | ||
153 | total = sgtable_len(sgt); | |
154 | if (!total) | |
155 | return ERR_PTR(-EINVAL); | |
156 | ||
157 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | |
158 | if (!new) | |
159 | return ERR_PTR(-ENOMEM); | |
160 | va = (u32)new->addr; | |
161 | ||
162 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
163 | size_t bytes; | |
164 | u32 pa; | |
165 | int err; | |
166 | ||
167 | pa = sg_phys(sg); | |
66cf402b | 168 | bytes = sg->length; |
69d3a84a HD |
169 | |
170 | BUG_ON(bytes != PAGE_SIZE); | |
171 | ||
172 | err = ioremap_page(va, pa, mtype); | |
173 | if (err) | |
174 | goto err_out; | |
175 | ||
176 | va += bytes; | |
177 | } | |
178 | ||
6716bd06 SP |
179 | flush_cache_vmap((unsigned long)new->addr, |
180 | (unsigned long)(new->addr + total)); | |
69d3a84a HD |
181 | return new->addr; |
182 | ||
183 | err_out: | |
184 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | |
185 | vunmap(new->addr); | |
186 | return ERR_PTR(-EAGAIN); | |
187 | } | |
188 | ||
189 | static inline void vunmap_sg(const void *va) | |
190 | { | |
191 | vunmap(va); | |
192 | } | |
193 | ||
194 | static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | |
195 | { | |
196 | struct iovm_struct *tmp; | |
197 | ||
198 | list_for_each_entry(tmp, &obj->mmap, list) { | |
199 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | |
200 | size_t len; | |
201 | ||
202 | len = tmp->da_end - tmp->da_start; | |
203 | ||
204 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | |
205 | __func__, tmp->da_start, da, tmp->da_end, len, | |
206 | tmp->flags); | |
207 | ||
208 | return tmp; | |
209 | } | |
210 | } | |
211 | ||
212 | return NULL; | |
213 | } | |
214 | ||
215 | /** | |
216 | * find_iovm_area - find iovma which includes @da | |
217 | * @da: iommu device virtual address | |
218 | * | |
219 | * Find the existing iovma starting at @da | |
220 | */ | |
221 | struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | |
222 | { | |
223 | struct iovm_struct *area; | |
224 | ||
225 | mutex_lock(&obj->mmap_lock); | |
226 | area = __find_iovm_area(obj, da); | |
227 | mutex_unlock(&obj->mmap_lock); | |
228 | ||
229 | return area; | |
230 | } | |
231 | EXPORT_SYMBOL_GPL(find_iovm_area); | |
232 | ||
233 | /* | |
234 | * This finds the hole(area) which fits the requested address and len | |
235 | * in iovmas mmap, and returns the new allocated iovma. | |
236 | */ | |
237 | static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | |
238 | size_t bytes, u32 flags) | |
239 | { | |
240 | struct iovm_struct *new, *tmp; | |
4359d38d | 241 | u32 start, prev_end, alignment; |
69d3a84a HD |
242 | |
243 | if (!obj || !bytes) | |
244 | return ERR_PTR(-EINVAL); | |
245 | ||
246 | start = da; | |
4359d38d | 247 | alignment = PAGE_SIZE; |
69d3a84a | 248 | |
d038aee2 | 249 | if (~flags & IOVMF_DA_FIXED) { |
4359d38d MJ |
250 | /* Don't map address 0 */ |
251 | start = obj->da_start ? obj->da_start : alignment; | |
c7f4ab26 | 252 | |
69d3a84a | 253 | if (flags & IOVMF_LINEAR) |
4359d38d MJ |
254 | alignment = iopgsz_max(bytes); |
255 | start = roundup(start, alignment); | |
c7f4ab26 GLF |
256 | } else if (start < obj->da_start || start > obj->da_end || |
257 | obj->da_end - start < bytes) { | |
258 | return ERR_PTR(-EINVAL); | |
69d3a84a HD |
259 | } |
260 | ||
261 | tmp = NULL; | |
262 | if (list_empty(&obj->mmap)) | |
263 | goto found; | |
264 | ||
265 | prev_end = 0; | |
266 | list_for_each_entry(tmp, &obj->mmap, list) { | |
267 | ||
ba6e1f4f | 268 | if (prev_end > start) |
e0a42e4f HD |
269 | break; |
270 | ||
c7f4ab26 | 271 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) |
69d3a84a HD |
272 | goto found; |
273 | ||
d038aee2 | 274 | if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) |
4359d38d | 275 | start = roundup(tmp->da_end + 1, alignment); |
69d3a84a HD |
276 | |
277 | prev_end = tmp->da_end; | |
278 | } | |
279 | ||
c7f4ab26 | 280 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) |
69d3a84a HD |
281 | goto found; |
282 | ||
283 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | |
284 | __func__, da, bytes, flags); | |
285 | ||
286 | return ERR_PTR(-EINVAL); | |
287 | ||
288 | found: | |
289 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | |
290 | if (!new) | |
291 | return ERR_PTR(-ENOMEM); | |
292 | ||
293 | new->iommu = obj; | |
294 | new->da_start = start; | |
295 | new->da_end = start + bytes; | |
296 | new->flags = flags; | |
297 | ||
298 | /* | |
299 | * keep ascending order of iovmas | |
300 | */ | |
301 | if (tmp) | |
302 | list_add_tail(&new->list, &tmp->list); | |
303 | else | |
304 | list_add(&new->list, &obj->mmap); | |
305 | ||
306 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | |
307 | __func__, new->da_start, start, new->da_end, bytes, flags); | |
308 | ||
309 | return new; | |
310 | } | |
311 | ||
312 | static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | |
313 | { | |
314 | size_t bytes; | |
315 | ||
316 | BUG_ON(!obj || !area); | |
317 | ||
318 | bytes = area->da_end - area->da_start; | |
319 | ||
320 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | |
321 | __func__, area->da_start, area->da_end, bytes, area->flags); | |
322 | ||
323 | list_del(&area->list); | |
324 | kmem_cache_free(iovm_area_cachep, area); | |
325 | } | |
326 | ||
327 | /** | |
328 | * da_to_va - convert (d) to (v) | |
329 | * @obj: objective iommu | |
330 | * @da: iommu device virtual address | |
331 | * @va: mpu virtual address | |
332 | * | |
333 | * Returns mpu virtual addr which corresponds to a given device virtual addr | |
334 | */ | |
335 | void *da_to_va(struct iommu *obj, u32 da) | |
336 | { | |
337 | void *va = NULL; | |
338 | struct iovm_struct *area; | |
339 | ||
340 | mutex_lock(&obj->mmap_lock); | |
341 | ||
342 | area = __find_iovm_area(obj, da); | |
343 | if (!area) { | |
344 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
345 | goto out; | |
346 | } | |
347 | va = area->va; | |
69d3a84a | 348 | out: |
26548900 DW |
349 | mutex_unlock(&obj->mmap_lock); |
350 | ||
69d3a84a HD |
351 | return va; |
352 | } | |
353 | EXPORT_SYMBOL_GPL(da_to_va); | |
354 | ||
355 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | |
356 | { | |
357 | unsigned int i; | |
358 | struct scatterlist *sg; | |
359 | void *va = _va; | |
360 | void *va_end; | |
361 | ||
362 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
363 | struct page *pg; | |
364 | const size_t bytes = PAGE_SIZE; | |
365 | ||
366 | /* | |
367 | * iommu 'superpage' isn't supported with 'iommu_vmalloc()' | |
368 | */ | |
369 | pg = vmalloc_to_page(va); | |
370 | BUG_ON(!pg); | |
371 | sg_set_page(sg, pg, bytes, 0); | |
372 | ||
373 | va += bytes; | |
374 | } | |
375 | ||
376 | va_end = _va + PAGE_SIZE * i; | |
69d3a84a HD |
377 | } |
378 | ||
379 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |
380 | { | |
381 | /* | |
382 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 383 | * consistency of the code readability. |
69d3a84a HD |
384 | */ |
385 | BUG_ON(!sgt); | |
386 | } | |
387 | ||
69d3a84a | 388 | /* create 'da' <-> 'pa' mapping from 'sgt' */ |
f626b52d OBC |
389 | static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, |
390 | const struct sg_table *sgt, u32 flags) | |
69d3a84a HD |
391 | { |
392 | int err; | |
393 | unsigned int i, j; | |
394 | struct scatterlist *sg; | |
395 | u32 da = new->da_start; | |
f626b52d | 396 | int order; |
69d3a84a | 397 | |
f626b52d | 398 | if (!domain || !sgt) |
69d3a84a HD |
399 | return -EINVAL; |
400 | ||
401 | BUG_ON(!sgtable_ok(sgt)); | |
402 | ||
403 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
404 | u32 pa; | |
69d3a84a | 405 | size_t bytes; |
69d3a84a HD |
406 | |
407 | pa = sg_phys(sg); | |
66cf402b | 408 | bytes = sg->length; |
69d3a84a HD |
409 | |
410 | flags &= ~IOVMF_PGSZ_MASK; | |
f626b52d OBC |
411 | |
412 | if (bytes_to_iopgsz(bytes) < 0) | |
69d3a84a | 413 | goto err_out; |
f626b52d OBC |
414 | |
415 | order = get_order(bytes); | |
69d3a84a HD |
416 | |
417 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | |
418 | i, da, pa, bytes); | |
419 | ||
f626b52d | 420 | err = iommu_map(domain, da, pa, order, flags); |
69d3a84a HD |
421 | if (err) |
422 | goto err_out; | |
423 | ||
424 | da += bytes; | |
425 | } | |
426 | return 0; | |
427 | ||
428 | err_out: | |
429 | da = new->da_start; | |
430 | ||
431 | for_each_sg(sgt->sgl, sg, i, j) { | |
432 | size_t bytes; | |
433 | ||
f626b52d OBC |
434 | bytes = sg->length; |
435 | order = get_order(bytes); | |
69d3a84a | 436 | |
f626b52d OBC |
437 | /* ignore failures.. we're already handling one */ |
438 | iommu_unmap(domain, da, order); | |
69d3a84a HD |
439 | |
440 | da += bytes; | |
441 | } | |
442 | return err; | |
443 | } | |
444 | ||
445 | /* release 'da' <-> 'pa' mapping */ | |
f626b52d OBC |
446 | static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj, |
447 | struct iovm_struct *area) | |
69d3a84a HD |
448 | { |
449 | u32 start; | |
450 | size_t total = area->da_end - area->da_start; | |
f626b52d OBC |
451 | const struct sg_table *sgt = area->sgt; |
452 | struct scatterlist *sg; | |
453 | int i, err; | |
69d3a84a | 454 | |
f626b52d | 455 | BUG_ON(!sgtable_ok(sgt)); |
69d3a84a HD |
456 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); |
457 | ||
458 | start = area->da_start; | |
f626b52d | 459 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
69d3a84a | 460 | size_t bytes; |
f626b52d OBC |
461 | int order; |
462 | ||
463 | bytes = sg->length; | |
464 | order = get_order(bytes); | |
465 | ||
466 | err = iommu_unmap(domain, start, order); | |
467 | if (err) | |
468 | break; | |
69d3a84a | 469 | |
f626b52d | 470 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", |
69d3a84a HD |
471 | __func__, start, bytes, area->flags); |
472 | ||
473 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | |
474 | ||
475 | total -= bytes; | |
476 | start += bytes; | |
477 | } | |
478 | BUG_ON(total); | |
479 | } | |
480 | ||
481 | /* template function for all unmapping */ | |
f626b52d OBC |
482 | static struct sg_table *unmap_vm_area(struct iommu_domain *domain, |
483 | struct iommu *obj, const u32 da, | |
69d3a84a HD |
484 | void (*fn)(const void *), u32 flags) |
485 | { | |
486 | struct sg_table *sgt = NULL; | |
487 | struct iovm_struct *area; | |
488 | ||
489 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | |
490 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | |
491 | return NULL; | |
492 | } | |
493 | ||
494 | mutex_lock(&obj->mmap_lock); | |
495 | ||
496 | area = __find_iovm_area(obj, da); | |
497 | if (!area) { | |
498 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
499 | goto out; | |
500 | } | |
501 | ||
502 | if ((area->flags & flags) != flags) { | |
503 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | |
504 | area->flags); | |
505 | goto out; | |
506 | } | |
507 | sgt = (struct sg_table *)area->sgt; | |
508 | ||
f626b52d | 509 | unmap_iovm_area(domain, obj, area); |
69d3a84a HD |
510 | |
511 | fn(area->va); | |
512 | ||
513 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | |
514 | area->da_start, da, area->da_end, | |
515 | area->da_end - area->da_start, area->flags); | |
516 | ||
517 | free_iovm_area(obj, area); | |
518 | out: | |
519 | mutex_unlock(&obj->mmap_lock); | |
520 | ||
521 | return sgt; | |
522 | } | |
523 | ||
f626b52d OBC |
524 | static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj, |
525 | u32 da, const struct sg_table *sgt, void *va, | |
526 | size_t bytes, u32 flags) | |
69d3a84a HD |
527 | { |
528 | int err = -ENOMEM; | |
529 | struct iovm_struct *new; | |
530 | ||
531 | mutex_lock(&obj->mmap_lock); | |
532 | ||
533 | new = alloc_iovm_area(obj, da, bytes, flags); | |
534 | if (IS_ERR(new)) { | |
535 | err = PTR_ERR(new); | |
536 | goto err_alloc_iovma; | |
537 | } | |
538 | new->va = va; | |
539 | new->sgt = sgt; | |
540 | ||
f626b52d | 541 | if (map_iovm_area(domain, new, sgt, new->flags)) |
69d3a84a HD |
542 | goto err_map; |
543 | ||
544 | mutex_unlock(&obj->mmap_lock); | |
545 | ||
546 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | |
547 | __func__, new->da_start, bytes, new->flags, va); | |
548 | ||
549 | return new->da_start; | |
550 | ||
551 | err_map: | |
552 | free_iovm_area(obj, new); | |
553 | err_alloc_iovma: | |
554 | mutex_unlock(&obj->mmap_lock); | |
555 | return err; | |
556 | } | |
557 | ||
f626b52d OBC |
558 | static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj, |
559 | u32 da, const struct sg_table *sgt, | |
560 | void *va, size_t bytes, u32 flags) | |
69d3a84a | 561 | { |
f626b52d | 562 | return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); |
69d3a84a HD |
563 | } |
564 | ||
565 | /** | |
566 | * iommu_vmap - (d)-(p)-(v) address mapper | |
567 | * @obj: objective iommu | |
568 | * @sgt: address of scatter gather table | |
569 | * @flags: iovma and page property | |
570 | * | |
571 | * Creates 1-n-1 mapping with given @sgt and returns @da. | |
572 | * All @sgt element must be io page size aligned. | |
573 | */ | |
f626b52d OBC |
574 | u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da, |
575 | const struct sg_table *sgt, u32 flags) | |
69d3a84a HD |
576 | { |
577 | size_t bytes; | |
935e4739 | 578 | void *va = NULL; |
69d3a84a HD |
579 | |
580 | if (!obj || !obj->dev || !sgt) | |
581 | return -EINVAL; | |
582 | ||
583 | bytes = sgtable_len(sgt); | |
584 | if (!bytes) | |
585 | return -EINVAL; | |
586 | bytes = PAGE_ALIGN(bytes); | |
587 | ||
935e4739 HD |
588 | if (flags & IOVMF_MMIO) { |
589 | va = vmap_sg(sgt); | |
590 | if (IS_ERR(va)) | |
591 | return PTR_ERR(va); | |
592 | } | |
69d3a84a | 593 | |
69d3a84a HD |
594 | flags |= IOVMF_DISCONT; |
595 | flags |= IOVMF_MMIO; | |
69d3a84a | 596 | |
f626b52d | 597 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); |
69d3a84a HD |
598 | if (IS_ERR_VALUE(da)) |
599 | vunmap_sg(va); | |
600 | ||
601 | return da; | |
602 | } | |
603 | EXPORT_SYMBOL_GPL(iommu_vmap); | |
604 | ||
605 | /** | |
606 | * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' | |
607 | * @obj: objective iommu | |
608 | * @da: iommu device virtual address | |
609 | * | |
610 | * Free the iommu virtually contiguous memory area starting at | |
611 | * @da, which was returned by 'iommu_vmap()'. | |
612 | */ | |
f626b52d OBC |
613 | struct sg_table * |
614 | iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) | |
69d3a84a HD |
615 | { |
616 | struct sg_table *sgt; | |
617 | /* | |
618 | * 'sgt' is allocated before 'iommu_vmalloc()' is called. | |
619 | * Just returns 'sgt' to the caller to free | |
620 | */ | |
f626b52d OBC |
621 | sgt = unmap_vm_area(domain, obj, da, vunmap_sg, |
622 | IOVMF_DISCONT | IOVMF_MMIO); | |
69d3a84a HD |
623 | if (!sgt) |
624 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
625 | return sgt; | |
626 | } | |
627 | EXPORT_SYMBOL_GPL(iommu_vunmap); | |
628 | ||
629 | /** | |
630 | * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | |
631 | * @obj: objective iommu | |
632 | * @da: contiguous iommu virtual memory | |
633 | * @bytes: allocation size | |
634 | * @flags: iovma and page property | |
635 | * | |
636 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | |
d038aee2 | 637 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. |
69d3a84a | 638 | */ |
f626b52d OBC |
639 | u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, |
640 | size_t bytes, u32 flags) | |
69d3a84a HD |
641 | { |
642 | void *va; | |
643 | struct sg_table *sgt; | |
644 | ||
645 | if (!obj || !obj->dev || !bytes) | |
646 | return -EINVAL; | |
647 | ||
648 | bytes = PAGE_ALIGN(bytes); | |
649 | ||
650 | va = vmalloc(bytes); | |
651 | if (!va) | |
652 | return -ENOMEM; | |
653 | ||
ad108121 GLF |
654 | flags |= IOVMF_DISCONT; |
655 | flags |= IOVMF_ALLOC; | |
ad108121 GLF |
656 | |
657 | sgt = sgtable_alloc(bytes, flags, da, 0); | |
69d3a84a HD |
658 | if (IS_ERR(sgt)) { |
659 | da = PTR_ERR(sgt); | |
660 | goto err_sgt_alloc; | |
661 | } | |
662 | sgtable_fill_vmalloc(sgt, va); | |
663 | ||
f626b52d | 664 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); |
69d3a84a HD |
665 | if (IS_ERR_VALUE(da)) |
666 | goto err_iommu_vmap; | |
667 | ||
668 | return da; | |
669 | ||
670 | err_iommu_vmap: | |
671 | sgtable_drain_vmalloc(sgt); | |
672 | sgtable_free(sgt); | |
673 | err_sgt_alloc: | |
674 | vfree(va); | |
675 | return da; | |
676 | } | |
677 | EXPORT_SYMBOL_GPL(iommu_vmalloc); | |
678 | ||
679 | /** | |
680 | * iommu_vfree - release memory allocated by 'iommu_vmalloc()' | |
681 | * @obj: objective iommu | |
682 | * @da: iommu device virtual address | |
683 | * | |
684 | * Frees the iommu virtually continuous memory area starting at | |
685 | * @da, as obtained from 'iommu_vmalloc()'. | |
686 | */ | |
f626b52d | 687 | void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da) |
69d3a84a HD |
688 | { |
689 | struct sg_table *sgt; | |
690 | ||
f626b52d OBC |
691 | sgt = unmap_vm_area(domain, obj, da, vfree, |
692 | IOVMF_DISCONT | IOVMF_ALLOC); | |
69d3a84a HD |
693 | if (!sgt) |
694 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
695 | sgtable_free(sgt); | |
696 | } | |
697 | EXPORT_SYMBOL_GPL(iommu_vfree); | |
698 | ||
69d3a84a HD |
699 | static int __init iovmm_init(void) |
700 | { | |
701 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
702 | struct kmem_cache *p; | |
703 | ||
704 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | |
705 | flags, NULL); | |
706 | if (!p) | |
707 | return -ENOMEM; | |
708 | iovm_area_cachep = p; | |
709 | ||
710 | return 0; | |
711 | } | |
712 | module_init(iovmm_init); | |
713 | ||
714 | static void __exit iovmm_exit(void) | |
715 | { | |
716 | kmem_cache_destroy(iovm_area_cachep); | |
717 | } | |
718 | module_exit(iovmm_exit); | |
719 | ||
720 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | |
721 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | |
722 | MODULE_LICENSE("GPL v2"); |