Commit | Line | Data |
---|---|---|
a9dcad5e HD |
1 | /* |
2 | * omap iommu: tlb and pagetable primitives | |
3 | * | |
c127c7dc | 4 | * Copyright (C) 2008-2010 Nokia Corporation |
a9dcad5e HD |
5 | * |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | |
7 | * Paul Mundt and Toshihiro Kobayashi | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/err.h> | |
15 | #include <linux/module.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
a9dcad5e HD |
17 | #include <linux/interrupt.h> |
18 | #include <linux/ioport.h> | |
19 | #include <linux/clk.h> | |
20 | #include <linux/platform_device.h> | |
f626b52d OBC |
21 | #include <linux/iommu.h> |
22 | #include <linux/mutex.h> | |
23 | #include <linux/spinlock.h> | |
a9dcad5e HD |
24 | |
25 | #include <asm/cacheflush.h> | |
26 | ||
ce491cf8 | 27 | #include <plat/iommu.h> |
a9dcad5e | 28 | |
fcf3a6ef | 29 | #include <plat/iopgtable.h> |
a9dcad5e | 30 | |
37c2836c HD |
31 | #define for_each_iotlb_cr(obj, n, __i, cr) \ |
32 | for (__i = 0; \ | |
33 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ | |
34 | __i++) | |
35 | ||
f626b52d OBC |
36 | /** |
37 | * struct omap_iommu_domain - omap iommu domain | |
38 | * @pgtable: the page table | |
39 | * @iommu_dev: an omap iommu device attached to this domain. only a single | |
40 | * iommu device can be attached for now. | |
41 | * @lock: domain lock, should be taken when attaching/detaching | |
42 | */ | |
43 | struct omap_iommu_domain { | |
44 | u32 *pgtable; | |
45 | struct iommu *iommu_dev; | |
46 | spinlock_t lock; | |
47 | }; | |
48 | ||
a9dcad5e HD |
49 | /* accommodate the difference between omap1 and omap2/3 */ |
50 | static const struct iommu_functions *arch_iommu; | |
51 | ||
52 | static struct platform_driver omap_iommu_driver; | |
53 | static struct kmem_cache *iopte_cachep; | |
54 | ||
55 | /** | |
56 | * install_iommu_arch - Install archtecure specific iommu functions | |
57 | * @ops: a pointer to architecture specific iommu functions | |
58 | * | |
59 | * There are several kind of iommu algorithm(tlb, pagetable) among | |
60 | * omap series. This interface installs such an iommu algorighm. | |
61 | **/ | |
62 | int install_iommu_arch(const struct iommu_functions *ops) | |
63 | { | |
64 | if (arch_iommu) | |
65 | return -EBUSY; | |
66 | ||
67 | arch_iommu = ops; | |
68 | return 0; | |
69 | } | |
70 | EXPORT_SYMBOL_GPL(install_iommu_arch); | |
71 | ||
72 | /** | |
73 | * uninstall_iommu_arch - Uninstall archtecure specific iommu functions | |
74 | * @ops: a pointer to architecture specific iommu functions | |
75 | * | |
76 | * This interface uninstalls the iommu algorighm installed previously. | |
77 | **/ | |
78 | void uninstall_iommu_arch(const struct iommu_functions *ops) | |
79 | { | |
80 | if (arch_iommu != ops) | |
81 | pr_err("%s: not your arch\n", __func__); | |
82 | ||
83 | arch_iommu = NULL; | |
84 | } | |
85 | EXPORT_SYMBOL_GPL(uninstall_iommu_arch); | |
86 | ||
87 | /** | |
88 | * iommu_save_ctx - Save registers for pm off-mode support | |
89 | * @obj: target iommu | |
90 | **/ | |
91 | void iommu_save_ctx(struct iommu *obj) | |
92 | { | |
93 | arch_iommu->save_ctx(obj); | |
94 | } | |
95 | EXPORT_SYMBOL_GPL(iommu_save_ctx); | |
96 | ||
97 | /** | |
98 | * iommu_restore_ctx - Restore registers for pm off-mode support | |
99 | * @obj: target iommu | |
100 | **/ | |
101 | void iommu_restore_ctx(struct iommu *obj) | |
102 | { | |
103 | arch_iommu->restore_ctx(obj); | |
104 | } | |
105 | EXPORT_SYMBOL_GPL(iommu_restore_ctx); | |
106 | ||
107 | /** | |
108 | * iommu_arch_version - Return running iommu arch version | |
109 | **/ | |
110 | u32 iommu_arch_version(void) | |
111 | { | |
112 | return arch_iommu->version; | |
113 | } | |
114 | EXPORT_SYMBOL_GPL(iommu_arch_version); | |
115 | ||
116 | static int iommu_enable(struct iommu *obj) | |
117 | { | |
118 | int err; | |
119 | ||
120 | if (!obj) | |
121 | return -EINVAL; | |
122 | ||
ef4815ab MH |
123 | if (!arch_iommu) |
124 | return -ENODEV; | |
125 | ||
a9dcad5e HD |
126 | clk_enable(obj->clk); |
127 | ||
128 | err = arch_iommu->enable(obj); | |
129 | ||
130 | clk_disable(obj->clk); | |
131 | return err; | |
132 | } | |
133 | ||
134 | static void iommu_disable(struct iommu *obj) | |
135 | { | |
136 | if (!obj) | |
137 | return; | |
138 | ||
139 | clk_enable(obj->clk); | |
140 | ||
141 | arch_iommu->disable(obj); | |
142 | ||
143 | clk_disable(obj->clk); | |
144 | } | |
145 | ||
146 | /* | |
147 | * TLB operations | |
148 | */ | |
149 | void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | |
150 | { | |
151 | BUG_ON(!cr || !e); | |
152 | ||
153 | arch_iommu->cr_to_e(cr, e); | |
154 | } | |
155 | EXPORT_SYMBOL_GPL(iotlb_cr_to_e); | |
156 | ||
157 | static inline int iotlb_cr_valid(struct cr_regs *cr) | |
158 | { | |
159 | if (!cr) | |
160 | return -EINVAL; | |
161 | ||
162 | return arch_iommu->cr_valid(cr); | |
163 | } | |
164 | ||
165 | static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | |
166 | struct iotlb_entry *e) | |
167 | { | |
168 | if (!e) | |
169 | return NULL; | |
170 | ||
171 | return arch_iommu->alloc_cr(obj, e); | |
172 | } | |
173 | ||
e1f23813 | 174 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
a9dcad5e HD |
175 | { |
176 | return arch_iommu->cr_to_virt(cr); | |
177 | } | |
a9dcad5e HD |
178 | |
179 | static u32 get_iopte_attr(struct iotlb_entry *e) | |
180 | { | |
181 | return arch_iommu->get_pte_attr(e); | |
182 | } | |
183 | ||
184 | static u32 iommu_report_fault(struct iommu *obj, u32 *da) | |
185 | { | |
186 | return arch_iommu->fault_isr(obj, da); | |
187 | } | |
188 | ||
189 | static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | |
190 | { | |
191 | u32 val; | |
192 | ||
193 | val = iommu_read_reg(obj, MMU_LOCK); | |
194 | ||
195 | l->base = MMU_LOCK_BASE(val); | |
196 | l->vict = MMU_LOCK_VICT(val); | |
197 | ||
a9dcad5e HD |
198 | } |
199 | ||
200 | static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | |
201 | { | |
202 | u32 val; | |
203 | ||
a9dcad5e HD |
204 | val = (l->base << MMU_LOCK_BASE_SHIFT); |
205 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | |
206 | ||
207 | iommu_write_reg(obj, val, MMU_LOCK); | |
208 | } | |
209 | ||
210 | static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) | |
211 | { | |
212 | arch_iommu->tlb_read_cr(obj, cr); | |
213 | } | |
214 | ||
215 | static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | |
216 | { | |
217 | arch_iommu->tlb_load_cr(obj, cr); | |
218 | ||
219 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | |
220 | iommu_write_reg(obj, 1, MMU_LD_TLB); | |
221 | } | |
222 | ||
223 | /** | |
224 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | |
225 | * @obj: target iommu | |
226 | * @cr: contents of cam and ram register | |
227 | * @buf: output buffer | |
228 | **/ | |
229 | static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | |
230 | char *buf) | |
231 | { | |
232 | BUG_ON(!cr || !buf); | |
233 | ||
234 | return arch_iommu->dump_cr(obj, cr, buf); | |
235 | } | |
236 | ||
37c2836c HD |
237 | /* only used in iotlb iteration for-loop */ |
238 | static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) | |
239 | { | |
240 | struct cr_regs cr; | |
241 | struct iotlb_lock l; | |
242 | ||
243 | iotlb_lock_get(obj, &l); | |
244 | l.vict = n; | |
245 | iotlb_lock_set(obj, &l); | |
246 | iotlb_read_cr(obj, &cr); | |
247 | ||
248 | return cr; | |
249 | } | |
250 | ||
a9dcad5e HD |
251 | /** |
252 | * load_iotlb_entry - Set an iommu tlb entry | |
253 | * @obj: target iommu | |
254 | * @e: an iommu tlb entry info | |
255 | **/ | |
5da14a47 | 256 | #ifdef PREFETCH_IOTLB |
e1f23813 | 257 | static int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) |
a9dcad5e | 258 | { |
a9dcad5e HD |
259 | int err = 0; |
260 | struct iotlb_lock l; | |
261 | struct cr_regs *cr; | |
262 | ||
263 | if (!obj || !obj->nr_tlb_entries || !e) | |
264 | return -EINVAL; | |
265 | ||
266 | clk_enable(obj->clk); | |
267 | ||
be6d8026 KH |
268 | iotlb_lock_get(obj, &l); |
269 | if (l.base == obj->nr_tlb_entries) { | |
270 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); | |
a9dcad5e HD |
271 | err = -EBUSY; |
272 | goto out; | |
273 | } | |
be6d8026 | 274 | if (!e->prsvd) { |
37c2836c HD |
275 | int i; |
276 | struct cr_regs tmp; | |
be6d8026 | 277 | |
37c2836c | 278 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) |
be6d8026 KH |
279 | if (!iotlb_cr_valid(&tmp)) |
280 | break; | |
37c2836c | 281 | |
be6d8026 KH |
282 | if (i == obj->nr_tlb_entries) { |
283 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | |
284 | err = -EBUSY; | |
285 | goto out; | |
286 | } | |
37c2836c HD |
287 | |
288 | iotlb_lock_get(obj, &l); | |
be6d8026 KH |
289 | } else { |
290 | l.vict = l.base; | |
291 | iotlb_lock_set(obj, &l); | |
292 | } | |
a9dcad5e HD |
293 | |
294 | cr = iotlb_alloc_cr(obj, e); | |
295 | if (IS_ERR(cr)) { | |
296 | clk_disable(obj->clk); | |
297 | return PTR_ERR(cr); | |
298 | } | |
299 | ||
300 | iotlb_load_cr(obj, cr); | |
301 | kfree(cr); | |
302 | ||
be6d8026 KH |
303 | if (e->prsvd) |
304 | l.base++; | |
a9dcad5e HD |
305 | /* increment victim for next tlb load */ |
306 | if (++l.vict == obj->nr_tlb_entries) | |
be6d8026 | 307 | l.vict = l.base; |
a9dcad5e HD |
308 | iotlb_lock_set(obj, &l); |
309 | out: | |
310 | clk_disable(obj->clk); | |
311 | return err; | |
312 | } | |
a9dcad5e | 313 | |
5da14a47 OBC |
314 | #else /* !PREFETCH_IOTLB */ |
315 | ||
316 | static int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) | |
317 | { | |
318 | return 0; | |
319 | } | |
320 | ||
321 | #endif /* !PREFETCH_IOTLB */ | |
322 | ||
323 | static int prefetch_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) | |
324 | { | |
325 | return load_iotlb_entry(obj, e); | |
326 | } | |
327 | ||
a9dcad5e HD |
328 | /** |
329 | * flush_iotlb_page - Clear an iommu tlb entry | |
330 | * @obj: target iommu | |
331 | * @da: iommu device virtual address | |
332 | * | |
333 | * Clear an iommu tlb entry which includes 'da' address. | |
334 | **/ | |
e1f23813 | 335 | static void flush_iotlb_page(struct iommu *obj, u32 da) |
a9dcad5e | 336 | { |
a9dcad5e | 337 | int i; |
37c2836c | 338 | struct cr_regs cr; |
a9dcad5e HD |
339 | |
340 | clk_enable(obj->clk); | |
341 | ||
37c2836c | 342 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { |
a9dcad5e HD |
343 | u32 start; |
344 | size_t bytes; | |
345 | ||
a9dcad5e HD |
346 | if (!iotlb_cr_valid(&cr)) |
347 | continue; | |
348 | ||
349 | start = iotlb_cr_to_virt(&cr); | |
350 | bytes = iopgsz_to_bytes(cr.cam & 3); | |
351 | ||
352 | if ((start <= da) && (da < start + bytes)) { | |
353 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | |
354 | __func__, start, da, bytes); | |
0fa035e5 | 355 | iotlb_load_cr(obj, &cr); |
a9dcad5e HD |
356 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
357 | } | |
358 | } | |
359 | clk_disable(obj->clk); | |
360 | ||
361 | if (i == obj->nr_tlb_entries) | |
362 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | |
363 | } | |
a9dcad5e | 364 | |
a9dcad5e HD |
365 | /** |
366 | * flush_iotlb_all - Clear all iommu tlb entries | |
367 | * @obj: target iommu | |
368 | **/ | |
e1f23813 | 369 | static void flush_iotlb_all(struct iommu *obj) |
a9dcad5e HD |
370 | { |
371 | struct iotlb_lock l; | |
372 | ||
373 | clk_enable(obj->clk); | |
374 | ||
375 | l.base = 0; | |
376 | l.vict = 0; | |
377 | iotlb_lock_set(obj, &l); | |
378 | ||
379 | iommu_write_reg(obj, 1, MMU_GFLUSH); | |
380 | ||
381 | clk_disable(obj->clk); | |
382 | } | |
a9dcad5e HD |
383 | |
384 | #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | |
385 | ||
14e0e679 | 386 | ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 387 | { |
a9dcad5e HD |
388 | if (!obj || !buf) |
389 | return -EINVAL; | |
390 | ||
391 | clk_enable(obj->clk); | |
392 | ||
14e0e679 | 393 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); |
a9dcad5e HD |
394 | |
395 | clk_disable(obj->clk); | |
396 | ||
397 | return bytes; | |
398 | } | |
399 | EXPORT_SYMBOL_GPL(iommu_dump_ctx); | |
400 | ||
14e0e679 | 401 | static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) |
a9dcad5e HD |
402 | { |
403 | int i; | |
37c2836c HD |
404 | struct iotlb_lock saved; |
405 | struct cr_regs tmp; | |
a9dcad5e HD |
406 | struct cr_regs *p = crs; |
407 | ||
408 | clk_enable(obj->clk); | |
a9dcad5e | 409 | iotlb_lock_get(obj, &saved); |
a9dcad5e | 410 | |
37c2836c | 411 | for_each_iotlb_cr(obj, num, i, tmp) { |
a9dcad5e HD |
412 | if (!iotlb_cr_valid(&tmp)) |
413 | continue; | |
a9dcad5e HD |
414 | *p++ = tmp; |
415 | } | |
37c2836c | 416 | |
a9dcad5e HD |
417 | iotlb_lock_set(obj, &saved); |
418 | clk_disable(obj->clk); | |
419 | ||
420 | return p - crs; | |
421 | } | |
422 | ||
423 | /** | |
424 | * dump_tlb_entries - dump cr arrays to given buffer | |
425 | * @obj: target iommu | |
426 | * @buf: output buffer | |
427 | **/ | |
14e0e679 | 428 | size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 429 | { |
14e0e679 | 430 | int i, num; |
a9dcad5e HD |
431 | struct cr_regs *cr; |
432 | char *p = buf; | |
433 | ||
14e0e679 HD |
434 | num = bytes / sizeof(*cr); |
435 | num = min(obj->nr_tlb_entries, num); | |
436 | ||
437 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); | |
a9dcad5e HD |
438 | if (!cr) |
439 | return 0; | |
440 | ||
14e0e679 HD |
441 | num = __dump_tlb_entries(obj, cr, num); |
442 | for (i = 0; i < num; i++) | |
a9dcad5e HD |
443 | p += iotlb_dump_cr(obj, cr + i, p); |
444 | kfree(cr); | |
445 | ||
446 | return p - buf; | |
447 | } | |
448 | EXPORT_SYMBOL_GPL(dump_tlb_entries); | |
449 | ||
450 | int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | |
451 | { | |
452 | return driver_for_each_device(&omap_iommu_driver.driver, | |
453 | NULL, data, fn); | |
454 | } | |
455 | EXPORT_SYMBOL_GPL(foreach_iommu_device); | |
456 | ||
457 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | |
458 | ||
459 | /* | |
460 | * H/W pagetable operations | |
461 | */ | |
462 | static void flush_iopgd_range(u32 *first, u32 *last) | |
463 | { | |
464 | /* FIXME: L2 cache should be taken care of if it exists */ | |
465 | do { | |
466 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | |
467 | : : "r" (first)); | |
468 | first += L1_CACHE_BYTES / sizeof(*first); | |
469 | } while (first <= last); | |
470 | } | |
471 | ||
472 | static void flush_iopte_range(u32 *first, u32 *last) | |
473 | { | |
474 | /* FIXME: L2 cache should be taken care of if it exists */ | |
475 | do { | |
476 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | |
477 | : : "r" (first)); | |
478 | first += L1_CACHE_BYTES / sizeof(*first); | |
479 | } while (first <= last); | |
480 | } | |
481 | ||
482 | static void iopte_free(u32 *iopte) | |
483 | { | |
484 | /* Note: freed iopte's must be clean ready for re-use */ | |
485 | kmem_cache_free(iopte_cachep, iopte); | |
486 | } | |
487 | ||
488 | static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) | |
489 | { | |
490 | u32 *iopte; | |
491 | ||
492 | /* a table has already existed */ | |
493 | if (*iopgd) | |
494 | goto pte_ready; | |
495 | ||
496 | /* | |
497 | * do the allocation outside the page table lock | |
498 | */ | |
499 | spin_unlock(&obj->page_table_lock); | |
500 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | |
501 | spin_lock(&obj->page_table_lock); | |
502 | ||
503 | if (!*iopgd) { | |
504 | if (!iopte) | |
505 | return ERR_PTR(-ENOMEM); | |
506 | ||
507 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | |
508 | flush_iopgd_range(iopgd, iopgd); | |
509 | ||
510 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | |
511 | } else { | |
512 | /* We raced, free the reduniovant table */ | |
513 | iopte_free(iopte); | |
514 | } | |
515 | ||
516 | pte_ready: | |
517 | iopte = iopte_offset(iopgd, da); | |
518 | ||
519 | dev_vdbg(obj->dev, | |
520 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | |
521 | __func__, da, iopgd, *iopgd, iopte, *iopte); | |
522 | ||
523 | return iopte; | |
524 | } | |
525 | ||
526 | static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
527 | { | |
528 | u32 *iopgd = iopgd_offset(obj, da); | |
529 | ||
4abb7617 HD |
530 | if ((da | pa) & ~IOSECTION_MASK) { |
531 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | |
532 | __func__, da, pa, IOSECTION_SIZE); | |
533 | return -EINVAL; | |
534 | } | |
535 | ||
a9dcad5e HD |
536 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; |
537 | flush_iopgd_range(iopgd, iopgd); | |
538 | return 0; | |
539 | } | |
540 | ||
541 | static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
542 | { | |
543 | u32 *iopgd = iopgd_offset(obj, da); | |
544 | int i; | |
545 | ||
4abb7617 HD |
546 | if ((da | pa) & ~IOSUPER_MASK) { |
547 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | |
548 | __func__, da, pa, IOSUPER_SIZE); | |
549 | return -EINVAL; | |
550 | } | |
551 | ||
a9dcad5e HD |
552 | for (i = 0; i < 16; i++) |
553 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | |
554 | flush_iopgd_range(iopgd, iopgd + 15); | |
555 | return 0; | |
556 | } | |
557 | ||
558 | static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
559 | { | |
560 | u32 *iopgd = iopgd_offset(obj, da); | |
561 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
562 | ||
563 | if (IS_ERR(iopte)) | |
564 | return PTR_ERR(iopte); | |
565 | ||
566 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | |
567 | flush_iopte_range(iopte, iopte); | |
568 | ||
569 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | |
570 | __func__, da, pa, iopte, *iopte); | |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
575 | static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
576 | { | |
577 | u32 *iopgd = iopgd_offset(obj, da); | |
578 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
579 | int i; | |
580 | ||
4abb7617 HD |
581 | if ((da | pa) & ~IOLARGE_MASK) { |
582 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | |
583 | __func__, da, pa, IOLARGE_SIZE); | |
584 | return -EINVAL; | |
585 | } | |
586 | ||
a9dcad5e HD |
587 | if (IS_ERR(iopte)) |
588 | return PTR_ERR(iopte); | |
589 | ||
590 | for (i = 0; i < 16; i++) | |
591 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | |
592 | flush_iopte_range(iopte, iopte + 15); | |
593 | return 0; | |
594 | } | |
595 | ||
596 | static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | |
597 | { | |
598 | int (*fn)(struct iommu *, u32, u32, u32); | |
599 | u32 prot; | |
600 | int err; | |
601 | ||
602 | if (!obj || !e) | |
603 | return -EINVAL; | |
604 | ||
605 | switch (e->pgsz) { | |
606 | case MMU_CAM_PGSZ_16M: | |
607 | fn = iopgd_alloc_super; | |
608 | break; | |
609 | case MMU_CAM_PGSZ_1M: | |
610 | fn = iopgd_alloc_section; | |
611 | break; | |
612 | case MMU_CAM_PGSZ_64K: | |
613 | fn = iopte_alloc_large; | |
614 | break; | |
615 | case MMU_CAM_PGSZ_4K: | |
616 | fn = iopte_alloc_page; | |
617 | break; | |
618 | default: | |
619 | fn = NULL; | |
620 | BUG(); | |
621 | break; | |
622 | } | |
623 | ||
624 | prot = get_iopte_attr(e); | |
625 | ||
626 | spin_lock(&obj->page_table_lock); | |
627 | err = fn(obj, e->da, e->pa, prot); | |
628 | spin_unlock(&obj->page_table_lock); | |
629 | ||
630 | return err; | |
631 | } | |
632 | ||
633 | /** | |
634 | * iopgtable_store_entry - Make an iommu pte entry | |
635 | * @obj: target iommu | |
636 | * @e: an iommu tlb entry info | |
637 | **/ | |
638 | int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) | |
639 | { | |
640 | int err; | |
641 | ||
642 | flush_iotlb_page(obj, e->da); | |
643 | err = iopgtable_store_entry_core(obj, e); | |
a9dcad5e | 644 | if (!err) |
5da14a47 | 645 | prefetch_iotlb_entry(obj, e); |
a9dcad5e HD |
646 | return err; |
647 | } | |
648 | EXPORT_SYMBOL_GPL(iopgtable_store_entry); | |
649 | ||
650 | /** | |
651 | * iopgtable_lookup_entry - Lookup an iommu pte entry | |
652 | * @obj: target iommu | |
653 | * @da: iommu device virtual address | |
654 | * @ppgd: iommu pgd entry pointer to be returned | |
655 | * @ppte: iommu pte entry pointer to be returned | |
656 | **/ | |
e1f23813 OBC |
657 | static void |
658 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | |
a9dcad5e HD |
659 | { |
660 | u32 *iopgd, *iopte = NULL; | |
661 | ||
662 | iopgd = iopgd_offset(obj, da); | |
663 | if (!*iopgd) | |
664 | goto out; | |
665 | ||
a1a54456 | 666 | if (iopgd_is_table(*iopgd)) |
a9dcad5e HD |
667 | iopte = iopte_offset(iopgd, da); |
668 | out: | |
669 | *ppgd = iopgd; | |
670 | *ppte = iopte; | |
671 | } | |
a9dcad5e HD |
672 | |
673 | static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) | |
674 | { | |
675 | size_t bytes; | |
676 | u32 *iopgd = iopgd_offset(obj, da); | |
677 | int nent = 1; | |
678 | ||
679 | if (!*iopgd) | |
680 | return 0; | |
681 | ||
a1a54456 | 682 | if (iopgd_is_table(*iopgd)) { |
a9dcad5e HD |
683 | int i; |
684 | u32 *iopte = iopte_offset(iopgd, da); | |
685 | ||
686 | bytes = IOPTE_SIZE; | |
687 | if (*iopte & IOPTE_LARGE) { | |
688 | nent *= 16; | |
689 | /* rewind to the 1st entry */ | |
c127c7dc | 690 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
a9dcad5e HD |
691 | } |
692 | bytes *= nent; | |
693 | memset(iopte, 0, nent * sizeof(*iopte)); | |
694 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | |
695 | ||
696 | /* | |
697 | * do table walk to check if this table is necessary or not | |
698 | */ | |
699 | iopte = iopte_offset(iopgd, 0); | |
700 | for (i = 0; i < PTRS_PER_IOPTE; i++) | |
701 | if (iopte[i]) | |
702 | goto out; | |
703 | ||
704 | iopte_free(iopte); | |
705 | nent = 1; /* for the next L1 entry */ | |
706 | } else { | |
707 | bytes = IOPGD_SIZE; | |
dcc730dc | 708 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
a9dcad5e HD |
709 | nent *= 16; |
710 | /* rewind to the 1st entry */ | |
8d33ea58 | 711 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
a9dcad5e HD |
712 | } |
713 | bytes *= nent; | |
714 | } | |
715 | memset(iopgd, 0, nent * sizeof(*iopgd)); | |
716 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | |
717 | out: | |
718 | return bytes; | |
719 | } | |
720 | ||
721 | /** | |
722 | * iopgtable_clear_entry - Remove an iommu pte entry | |
723 | * @obj: target iommu | |
724 | * @da: iommu device virtual address | |
725 | **/ | |
e1f23813 | 726 | static size_t iopgtable_clear_entry(struct iommu *obj, u32 da) |
a9dcad5e HD |
727 | { |
728 | size_t bytes; | |
729 | ||
730 | spin_lock(&obj->page_table_lock); | |
731 | ||
732 | bytes = iopgtable_clear_entry_core(obj, da); | |
733 | flush_iotlb_page(obj, da); | |
734 | ||
735 | spin_unlock(&obj->page_table_lock); | |
736 | ||
737 | return bytes; | |
738 | } | |
a9dcad5e HD |
739 | |
740 | static void iopgtable_clear_entry_all(struct iommu *obj) | |
741 | { | |
742 | int i; | |
743 | ||
744 | spin_lock(&obj->page_table_lock); | |
745 | ||
746 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | |
747 | u32 da; | |
748 | u32 *iopgd; | |
749 | ||
750 | da = i << IOPGD_SHIFT; | |
751 | iopgd = iopgd_offset(obj, da); | |
752 | ||
753 | if (!*iopgd) | |
754 | continue; | |
755 | ||
a1a54456 | 756 | if (iopgd_is_table(*iopgd)) |
a9dcad5e HD |
757 | iopte_free(iopte_offset(iopgd, 0)); |
758 | ||
759 | *iopgd = 0; | |
760 | flush_iopgd_range(iopgd, iopgd); | |
761 | } | |
762 | ||
763 | flush_iotlb_all(obj); | |
764 | ||
765 | spin_unlock(&obj->page_table_lock); | |
766 | } | |
767 | ||
768 | /* | |
769 | * Device IOMMU generic operations | |
770 | */ | |
771 | static irqreturn_t iommu_fault_handler(int irq, void *data) | |
772 | { | |
d594f1f3 | 773 | u32 da, errs; |
a9dcad5e | 774 | u32 *iopgd, *iopte; |
a9dcad5e HD |
775 | struct iommu *obj = data; |
776 | ||
777 | if (!obj->refcount) | |
778 | return IRQ_NONE; | |
779 | ||
a9dcad5e | 780 | clk_enable(obj->clk); |
d594f1f3 | 781 | errs = iommu_report_fault(obj, &da); |
a9dcad5e | 782 | clk_disable(obj->clk); |
c56b2ddd LP |
783 | if (errs == 0) |
784 | return IRQ_HANDLED; | |
d594f1f3 DC |
785 | |
786 | /* Fault callback or TLB/PTE Dynamic loading */ | |
787 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) | |
a9dcad5e HD |
788 | return IRQ_HANDLED; |
789 | ||
37b29810 HD |
790 | iommu_disable(obj); |
791 | ||
a9dcad5e HD |
792 | iopgd = iopgd_offset(obj, da); |
793 | ||
a1a54456 | 794 | if (!iopgd_is_table(*iopgd)) { |
d594f1f3 DC |
795 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " |
796 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); | |
a9dcad5e HD |
797 | return IRQ_NONE; |
798 | } | |
799 | ||
800 | iopte = iopte_offset(iopgd, da); | |
801 | ||
d594f1f3 DC |
802 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " |
803 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, | |
804 | iopte, *iopte); | |
a9dcad5e HD |
805 | |
806 | return IRQ_NONE; | |
807 | } | |
808 | ||
809 | static int device_match_by_alias(struct device *dev, void *data) | |
810 | { | |
811 | struct iommu *obj = to_iommu(dev); | |
812 | const char *name = data; | |
813 | ||
814 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | |
815 | ||
816 | return strcmp(obj->name, name) == 0; | |
817 | } | |
818 | ||
819 | /** | |
f626b52d OBC |
820 | * omap_find_iommu_device() - find an omap iommu device by name |
821 | * @name: name of the iommu device | |
822 | * | |
823 | * The generic iommu API requires the caller to provide the device | |
824 | * he wishes to attach to a certain iommu domain. | |
825 | * | |
826 | * Drivers generally should not bother with this as it should just | |
827 | * be taken care of by the DMA-API using dev_archdata. | |
828 | * | |
829 | * This function is provided as an interim solution until the latter | |
830 | * materializes, and omap3isp is fully migrated to the DMA-API. | |
831 | */ | |
832 | struct device *omap_find_iommu_device(const char *name) | |
833 | { | |
834 | return driver_find_device(&omap_iommu_driver.driver, NULL, | |
835 | (void *)name, | |
836 | device_match_by_alias); | |
837 | } | |
838 | EXPORT_SYMBOL_GPL(omap_find_iommu_device); | |
839 | ||
840 | /** | |
841 | * omap_iommu_attach() - attach iommu device to an iommu domain | |
842 | * @dev: target omap iommu device | |
843 | * @iopgd: page table | |
a9dcad5e | 844 | **/ |
f626b52d | 845 | static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) |
a9dcad5e HD |
846 | { |
847 | int err = -ENOMEM; | |
f626b52d | 848 | struct iommu *obj = to_iommu(dev); |
a9dcad5e | 849 | |
f626b52d | 850 | spin_lock(&obj->iommu_lock); |
a9dcad5e | 851 | |
f626b52d OBC |
852 | /* an iommu device can only be attached once */ |
853 | if (++obj->refcount > 1) { | |
854 | dev_err(dev, "%s: already attached!\n", obj->name); | |
855 | err = -EBUSY; | |
856 | goto err_enable; | |
a9dcad5e HD |
857 | } |
858 | ||
f626b52d OBC |
859 | obj->iopgd = iopgd; |
860 | err = iommu_enable(obj); | |
861 | if (err) | |
862 | goto err_enable; | |
863 | flush_iotlb_all(obj); | |
864 | ||
a9dcad5e HD |
865 | if (!try_module_get(obj->owner)) |
866 | goto err_module; | |
867 | ||
f626b52d | 868 | spin_unlock(&obj->iommu_lock); |
a9dcad5e HD |
869 | |
870 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
871 | return obj; | |
872 | ||
873 | err_module: | |
874 | if (obj->refcount == 1) | |
875 | iommu_disable(obj); | |
876 | err_enable: | |
877 | obj->refcount--; | |
f626b52d | 878 | spin_unlock(&obj->iommu_lock); |
a9dcad5e HD |
879 | return ERR_PTR(err); |
880 | } | |
a9dcad5e HD |
881 | |
882 | /** | |
f626b52d | 883 | * omap_iommu_detach - release iommu device |
a9dcad5e HD |
884 | * @obj: target iommu |
885 | **/ | |
f626b52d | 886 | static void omap_iommu_detach(struct iommu *obj) |
a9dcad5e | 887 | { |
acf9d467 | 888 | if (!obj || IS_ERR(obj)) |
a9dcad5e HD |
889 | return; |
890 | ||
f626b52d | 891 | spin_lock(&obj->iommu_lock); |
a9dcad5e HD |
892 | |
893 | if (--obj->refcount == 0) | |
894 | iommu_disable(obj); | |
895 | ||
896 | module_put(obj->owner); | |
897 | ||
f626b52d OBC |
898 | obj->iopgd = NULL; |
899 | ||
900 | spin_unlock(&obj->iommu_lock); | |
a9dcad5e HD |
901 | |
902 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
903 | } | |
a9dcad5e | 904 | |
d594f1f3 DC |
905 | int iommu_set_isr(const char *name, |
906 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, | |
907 | void *priv), | |
908 | void *isr_priv) | |
909 | { | |
910 | struct device *dev; | |
911 | struct iommu *obj; | |
912 | ||
913 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | |
914 | device_match_by_alias); | |
915 | if (!dev) | |
916 | return -ENODEV; | |
917 | ||
918 | obj = to_iommu(dev); | |
919 | mutex_lock(&obj->iommu_lock); | |
920 | if (obj->refcount != 0) { | |
921 | mutex_unlock(&obj->iommu_lock); | |
922 | return -EBUSY; | |
923 | } | |
924 | obj->isr = isr; | |
925 | obj->isr_priv = isr_priv; | |
926 | mutex_unlock(&obj->iommu_lock); | |
927 | ||
928 | return 0; | |
929 | } | |
930 | EXPORT_SYMBOL_GPL(iommu_set_isr); | |
931 | ||
a9dcad5e HD |
932 | /* |
933 | * OMAP Device MMU(IOMMU) detection | |
934 | */ | |
935 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | |
936 | { | |
937 | int err = -ENODEV; | |
a9dcad5e HD |
938 | int irq; |
939 | struct iommu *obj; | |
940 | struct resource *res; | |
941 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | |
942 | ||
943 | if (pdev->num_resources != 2) | |
944 | return -EINVAL; | |
945 | ||
946 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | |
947 | if (!obj) | |
948 | return -ENOMEM; | |
949 | ||
950 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | |
951 | if (IS_ERR(obj->clk)) | |
952 | goto err_clk; | |
953 | ||
954 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | |
955 | obj->name = pdata->name; | |
956 | obj->dev = &pdev->dev; | |
957 | obj->ctx = (void *)obj + sizeof(*obj); | |
c7f4ab26 GLF |
958 | obj->da_start = pdata->da_start; |
959 | obj->da_end = pdata->da_end; | |
a9dcad5e | 960 | |
f626b52d | 961 | spin_lock_init(&obj->iommu_lock); |
a9dcad5e HD |
962 | mutex_init(&obj->mmap_lock); |
963 | spin_lock_init(&obj->page_table_lock); | |
964 | INIT_LIST_HEAD(&obj->mmap); | |
965 | ||
966 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
967 | if (!res) { | |
968 | err = -ENODEV; | |
969 | goto err_mem; | |
970 | } | |
a9dcad5e HD |
971 | |
972 | res = request_mem_region(res->start, resource_size(res), | |
973 | dev_name(&pdev->dev)); | |
974 | if (!res) { | |
975 | err = -EIO; | |
976 | goto err_mem; | |
977 | } | |
978 | ||
da4a0f76 AK |
979 | obj->regbase = ioremap(res->start, resource_size(res)); |
980 | if (!obj->regbase) { | |
981 | err = -ENOMEM; | |
982 | goto err_ioremap; | |
983 | } | |
984 | ||
a9dcad5e HD |
985 | irq = platform_get_irq(pdev, 0); |
986 | if (irq < 0) { | |
987 | err = -ENODEV; | |
988 | goto err_irq; | |
989 | } | |
990 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | |
991 | dev_name(&pdev->dev), obj); | |
992 | if (err < 0) | |
993 | goto err_irq; | |
994 | platform_set_drvdata(pdev, obj); | |
995 | ||
a9dcad5e HD |
996 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
997 | return 0; | |
998 | ||
a9dcad5e | 999 | err_irq: |
a9dcad5e | 1000 | iounmap(obj->regbase); |
da4a0f76 AK |
1001 | err_ioremap: |
1002 | release_mem_region(res->start, resource_size(res)); | |
a9dcad5e HD |
1003 | err_mem: |
1004 | clk_put(obj->clk); | |
1005 | err_clk: | |
1006 | kfree(obj); | |
1007 | return err; | |
1008 | } | |
1009 | ||
1010 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | |
1011 | { | |
1012 | int irq; | |
1013 | struct resource *res; | |
1014 | struct iommu *obj = platform_get_drvdata(pdev); | |
1015 | ||
1016 | platform_set_drvdata(pdev, NULL); | |
1017 | ||
1018 | iopgtable_clear_entry_all(obj); | |
a9dcad5e HD |
1019 | |
1020 | irq = platform_get_irq(pdev, 0); | |
1021 | free_irq(irq, obj); | |
1022 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1023 | release_mem_region(res->start, resource_size(res)); | |
1024 | iounmap(obj->regbase); | |
1025 | ||
1026 | clk_put(obj->clk); | |
1027 | dev_info(&pdev->dev, "%s removed\n", obj->name); | |
1028 | kfree(obj); | |
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | static struct platform_driver omap_iommu_driver = { | |
1033 | .probe = omap_iommu_probe, | |
1034 | .remove = __devexit_p(omap_iommu_remove), | |
1035 | .driver = { | |
1036 | .name = "omap-iommu", | |
1037 | }, | |
1038 | }; | |
1039 | ||
1040 | static void iopte_cachep_ctor(void *iopte) | |
1041 | { | |
1042 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | |
1043 | } | |
1044 | ||
f626b52d OBC |
1045 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
1046 | phys_addr_t pa, int order, int prot) | |
1047 | { | |
1048 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1049 | struct iommu *oiommu = omap_domain->iommu_dev; | |
1050 | struct device *dev = oiommu->dev; | |
1051 | size_t bytes = PAGE_SIZE << order; | |
1052 | struct iotlb_entry e; | |
1053 | int omap_pgsz; | |
1054 | u32 ret, flags; | |
1055 | ||
1056 | /* we only support mapping a single iommu page for now */ | |
1057 | omap_pgsz = bytes_to_iopgsz(bytes); | |
1058 | if (omap_pgsz < 0) { | |
1059 | dev_err(dev, "invalid size to map: %d\n", bytes); | |
1060 | return -EINVAL; | |
1061 | } | |
1062 | ||
1063 | dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); | |
1064 | ||
1065 | flags = omap_pgsz | prot; | |
1066 | ||
1067 | iotlb_init_entry(&e, da, pa, flags); | |
1068 | ||
1069 | ret = iopgtable_store_entry(oiommu, &e); | |
1070 | if (ret) { | |
1071 | dev_err(dev, "iopgtable_store_entry failed: %d\n", ret); | |
1072 | return ret; | |
1073 | } | |
1074 | ||
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | |
1079 | int order) | |
1080 | { | |
1081 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1082 | struct iommu *oiommu = omap_domain->iommu_dev; | |
1083 | struct device *dev = oiommu->dev; | |
1084 | size_t bytes = PAGE_SIZE << order; | |
1085 | size_t ret; | |
1086 | ||
1087 | dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes); | |
1088 | ||
1089 | ret = iopgtable_clear_entry(oiommu, da); | |
1090 | if (ret != bytes) { | |
1091 | dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes); | |
1092 | return -EINVAL; | |
1093 | } | |
1094 | ||
1095 | return 0; | |
1096 | } | |
1097 | ||
1098 | static int | |
1099 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
1100 | { | |
1101 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1102 | struct iommu *oiommu; | |
1103 | int ret = 0; | |
1104 | ||
1105 | spin_lock(&omap_domain->lock); | |
1106 | ||
1107 | /* only a single device is supported per domain for now */ | |
1108 | if (omap_domain->iommu_dev) { | |
1109 | dev_err(dev, "iommu domain is already attached\n"); | |
1110 | ret = -EBUSY; | |
1111 | goto out; | |
1112 | } | |
1113 | ||
1114 | /* get a handle to and enable the omap iommu */ | |
1115 | oiommu = omap_iommu_attach(dev, omap_domain->pgtable); | |
1116 | if (IS_ERR(oiommu)) { | |
1117 | ret = PTR_ERR(oiommu); | |
1118 | dev_err(dev, "can't get omap iommu: %d\n", ret); | |
1119 | goto out; | |
1120 | } | |
1121 | ||
1122 | omap_domain->iommu_dev = oiommu; | |
1123 | ||
1124 | out: | |
1125 | spin_unlock(&omap_domain->lock); | |
1126 | return ret; | |
1127 | } | |
1128 | ||
1129 | static void omap_iommu_detach_dev(struct iommu_domain *domain, | |
1130 | struct device *dev) | |
1131 | { | |
1132 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1133 | struct iommu *oiommu = to_iommu(dev); | |
1134 | ||
1135 | spin_lock(&omap_domain->lock); | |
1136 | ||
1137 | /* only a single device is supported per domain for now */ | |
1138 | if (omap_domain->iommu_dev != oiommu) { | |
1139 | dev_err(dev, "invalid iommu device\n"); | |
1140 | goto out; | |
1141 | } | |
1142 | ||
1143 | iopgtable_clear_entry_all(oiommu); | |
1144 | ||
1145 | omap_iommu_detach(oiommu); | |
1146 | ||
1147 | omap_domain->iommu_dev = NULL; | |
1148 | ||
1149 | out: | |
1150 | spin_unlock(&omap_domain->lock); | |
1151 | } | |
1152 | ||
1153 | static int omap_iommu_domain_init(struct iommu_domain *domain) | |
1154 | { | |
1155 | struct omap_iommu_domain *omap_domain; | |
1156 | ||
1157 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); | |
1158 | if (!omap_domain) { | |
1159 | pr_err("kzalloc failed\n"); | |
1160 | goto out; | |
1161 | } | |
1162 | ||
1163 | omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); | |
1164 | if (!omap_domain->pgtable) { | |
1165 | pr_err("kzalloc failed\n"); | |
1166 | goto fail_nomem; | |
1167 | } | |
1168 | ||
1169 | /* | |
1170 | * should never fail, but please keep this around to ensure | |
1171 | * we keep the hardware happy | |
1172 | */ | |
1173 | BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); | |
1174 | ||
1175 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); | |
1176 | spin_lock_init(&omap_domain->lock); | |
1177 | ||
1178 | domain->priv = omap_domain; | |
1179 | ||
1180 | return 0; | |
1181 | ||
1182 | fail_nomem: | |
1183 | kfree(omap_domain); | |
1184 | out: | |
1185 | return -ENOMEM; | |
1186 | } | |
1187 | ||
1188 | /* assume device was already detached */ | |
1189 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) | |
1190 | { | |
1191 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1192 | ||
1193 | domain->priv = NULL; | |
1194 | ||
1195 | kfree(omap_domain->pgtable); | |
1196 | kfree(omap_domain); | |
1197 | } | |
1198 | ||
1199 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | |
1200 | unsigned long da) | |
1201 | { | |
1202 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1203 | struct iommu *oiommu = omap_domain->iommu_dev; | |
1204 | struct device *dev = oiommu->dev; | |
1205 | u32 *pgd, *pte; | |
1206 | phys_addr_t ret = 0; | |
1207 | ||
1208 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); | |
1209 | ||
1210 | if (pte) { | |
1211 | if (iopte_is_small(*pte)) | |
1212 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); | |
1213 | else if (iopte_is_large(*pte)) | |
1214 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); | |
1215 | else | |
1216 | dev_err(dev, "bogus pte 0x%x", *pte); | |
1217 | } else { | |
1218 | if (iopgd_is_section(*pgd)) | |
1219 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); | |
1220 | else if (iopgd_is_super(*pgd)) | |
1221 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); | |
1222 | else | |
1223 | dev_err(dev, "bogus pgd 0x%x", *pgd); | |
1224 | } | |
1225 | ||
1226 | return ret; | |
1227 | } | |
1228 | ||
1229 | static int omap_iommu_domain_has_cap(struct iommu_domain *domain, | |
1230 | unsigned long cap) | |
1231 | { | |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | static struct iommu_ops omap_iommu_ops = { | |
1236 | .domain_init = omap_iommu_domain_init, | |
1237 | .domain_destroy = omap_iommu_domain_destroy, | |
1238 | .attach_dev = omap_iommu_attach_dev, | |
1239 | .detach_dev = omap_iommu_detach_dev, | |
1240 | .map = omap_iommu_map, | |
1241 | .unmap = omap_iommu_unmap, | |
1242 | .iova_to_phys = omap_iommu_iova_to_phys, | |
1243 | .domain_has_cap = omap_iommu_domain_has_cap, | |
1244 | }; | |
1245 | ||
a9dcad5e HD |
1246 | static int __init omap_iommu_init(void) |
1247 | { | |
1248 | struct kmem_cache *p; | |
1249 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
1250 | size_t align = 1 << 10; /* L2 pagetable alignement */ | |
1251 | ||
1252 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | |
1253 | iopte_cachep_ctor); | |
1254 | if (!p) | |
1255 | return -ENOMEM; | |
1256 | iopte_cachep = p; | |
1257 | ||
f626b52d OBC |
1258 | register_iommu(&omap_iommu_ops); |
1259 | ||
a9dcad5e HD |
1260 | return platform_driver_register(&omap_iommu_driver); |
1261 | } | |
1262 | module_init(omap_iommu_init); | |
1263 | ||
1264 | static void __exit omap_iommu_exit(void) | |
1265 | { | |
1266 | kmem_cache_destroy(iopte_cachep); | |
1267 | ||
1268 | platform_driver_unregister(&omap_iommu_driver); | |
1269 | } | |
1270 | module_exit(omap_iommu_exit); | |
1271 | ||
1272 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | |
1273 | MODULE_ALIAS("platform:omap-iommu"); | |
1274 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | |
1275 | MODULE_LICENSE("GPL v2"); |