2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 #include <linux/iommu.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
25 #include <asm/cacheflush.h>
27 #include <plat/iommu.h>
29 #include "iopgtable.h"
31 #define for_each_iotlb_cr(obj, n, __i, cr) \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
43 struct omap_iommu_domain
{
45 struct iommu
*iommu_dev
;
49 /* accommodate the difference between omap1 and omap2/3 */
50 static const struct iommu_functions
*arch_iommu
;
52 static struct platform_driver omap_iommu_driver
;
53 static struct kmem_cache
*iopte_cachep
;
56 * install_iommu_arch - Install archtecure specific iommu functions
57 * @ops: a pointer to architecture specific iommu functions
59 * There are several kind of iommu algorithm(tlb, pagetable) among
60 * omap series. This interface installs such an iommu algorighm.
62 int install_iommu_arch(const struct iommu_functions
*ops
)
70 EXPORT_SYMBOL_GPL(install_iommu_arch
);
73 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 * @ops: a pointer to architecture specific iommu functions
76 * This interface uninstalls the iommu algorighm installed previously.
78 void uninstall_iommu_arch(const struct iommu_functions
*ops
)
80 if (arch_iommu
!= ops
)
81 pr_err("%s: not your arch\n", __func__
);
85 EXPORT_SYMBOL_GPL(uninstall_iommu_arch
);
88 * iommu_save_ctx - Save registers for pm off-mode support
91 void iommu_save_ctx(struct iommu
*obj
)
93 arch_iommu
->save_ctx(obj
);
95 EXPORT_SYMBOL_GPL(iommu_save_ctx
);
98 * iommu_restore_ctx - Restore registers for pm off-mode support
101 void iommu_restore_ctx(struct iommu
*obj
)
103 arch_iommu
->restore_ctx(obj
);
105 EXPORT_SYMBOL_GPL(iommu_restore_ctx
);
108 * iommu_arch_version - Return running iommu arch version
110 u32
iommu_arch_version(void)
112 return arch_iommu
->version
;
114 EXPORT_SYMBOL_GPL(iommu_arch_version
);
116 static int iommu_enable(struct iommu
*obj
)
126 clk_enable(obj
->clk
);
128 err
= arch_iommu
->enable(obj
);
130 clk_disable(obj
->clk
);
134 static void iommu_disable(struct iommu
*obj
)
139 clk_enable(obj
->clk
);
141 arch_iommu
->disable(obj
);
143 clk_disable(obj
->clk
);
149 void iotlb_cr_to_e(struct cr_regs
*cr
, struct iotlb_entry
*e
)
153 arch_iommu
->cr_to_e(cr
, e
);
155 EXPORT_SYMBOL_GPL(iotlb_cr_to_e
);
157 static inline int iotlb_cr_valid(struct cr_regs
*cr
)
162 return arch_iommu
->cr_valid(cr
);
165 static inline struct cr_regs
*iotlb_alloc_cr(struct iommu
*obj
,
166 struct iotlb_entry
*e
)
171 return arch_iommu
->alloc_cr(obj
, e
);
174 u32
iotlb_cr_to_virt(struct cr_regs
*cr
)
176 return arch_iommu
->cr_to_virt(cr
);
178 EXPORT_SYMBOL_GPL(iotlb_cr_to_virt
);
180 static u32
get_iopte_attr(struct iotlb_entry
*e
)
182 return arch_iommu
->get_pte_attr(e
);
185 static u32
iommu_report_fault(struct iommu
*obj
, u32
*da
)
187 return arch_iommu
->fault_isr(obj
, da
);
190 static void iotlb_lock_get(struct iommu
*obj
, struct iotlb_lock
*l
)
194 val
= iommu_read_reg(obj
, MMU_LOCK
);
196 l
->base
= MMU_LOCK_BASE(val
);
197 l
->vict
= MMU_LOCK_VICT(val
);
201 static void iotlb_lock_set(struct iommu
*obj
, struct iotlb_lock
*l
)
205 val
= (l
->base
<< MMU_LOCK_BASE_SHIFT
);
206 val
|= (l
->vict
<< MMU_LOCK_VICT_SHIFT
);
208 iommu_write_reg(obj
, val
, MMU_LOCK
);
211 static void iotlb_read_cr(struct iommu
*obj
, struct cr_regs
*cr
)
213 arch_iommu
->tlb_read_cr(obj
, cr
);
216 static void iotlb_load_cr(struct iommu
*obj
, struct cr_regs
*cr
)
218 arch_iommu
->tlb_load_cr(obj
, cr
);
220 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
221 iommu_write_reg(obj
, 1, MMU_LD_TLB
);
225 * iotlb_dump_cr - Dump an iommu tlb entry into buf
227 * @cr: contents of cam and ram register
228 * @buf: output buffer
230 static inline ssize_t
iotlb_dump_cr(struct iommu
*obj
, struct cr_regs
*cr
,
235 return arch_iommu
->dump_cr(obj
, cr
, buf
);
238 /* only used in iotlb iteration for-loop */
239 static struct cr_regs
__iotlb_read_cr(struct iommu
*obj
, int n
)
244 iotlb_lock_get(obj
, &l
);
246 iotlb_lock_set(obj
, &l
);
247 iotlb_read_cr(obj
, &cr
);
253 * load_iotlb_entry - Set an iommu tlb entry
255 * @e: an iommu tlb entry info
257 int load_iotlb_entry(struct iommu
*obj
, struct iotlb_entry
*e
)
263 if (!obj
|| !obj
->nr_tlb_entries
|| !e
)
266 clk_enable(obj
->clk
);
268 iotlb_lock_get(obj
, &l
);
269 if (l
.base
== obj
->nr_tlb_entries
) {
270 dev_warn(obj
->dev
, "%s: preserve entries full\n", __func__
);
278 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, tmp
)
279 if (!iotlb_cr_valid(&tmp
))
282 if (i
== obj
->nr_tlb_entries
) {
283 dev_dbg(obj
->dev
, "%s: full: no entry\n", __func__
);
288 iotlb_lock_get(obj
, &l
);
291 iotlb_lock_set(obj
, &l
);
294 cr
= iotlb_alloc_cr(obj
, e
);
296 clk_disable(obj
->clk
);
300 iotlb_load_cr(obj
, cr
);
305 /* increment victim for next tlb load */
306 if (++l
.vict
== obj
->nr_tlb_entries
)
308 iotlb_lock_set(obj
, &l
);
310 clk_disable(obj
->clk
);
313 EXPORT_SYMBOL_GPL(load_iotlb_entry
);
316 * flush_iotlb_page - Clear an iommu tlb entry
318 * @da: iommu device virtual address
320 * Clear an iommu tlb entry which includes 'da' address.
322 void flush_iotlb_page(struct iommu
*obj
, u32 da
)
327 clk_enable(obj
->clk
);
329 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, cr
) {
333 if (!iotlb_cr_valid(&cr
))
336 start
= iotlb_cr_to_virt(&cr
);
337 bytes
= iopgsz_to_bytes(cr
.cam
& 3);
339 if ((start
<= da
) && (da
< start
+ bytes
)) {
340 dev_dbg(obj
->dev
, "%s: %08x<=%08x(%x)\n",
341 __func__
, start
, da
, bytes
);
342 iotlb_load_cr(obj
, &cr
);
343 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
346 clk_disable(obj
->clk
);
348 if (i
== obj
->nr_tlb_entries
)
349 dev_dbg(obj
->dev
, "%s: no page for %08x\n", __func__
, da
);
351 EXPORT_SYMBOL_GPL(flush_iotlb_page
);
354 * flush_iotlb_range - Clear an iommu tlb entries
356 * @start: iommu device virtual address(start)
357 * @end: iommu device virtual address(end)
359 * Clear an iommu tlb entry which includes 'da' address.
361 void flush_iotlb_range(struct iommu
*obj
, u32 start
, u32 end
)
366 flush_iotlb_page(obj
, da
);
367 /* FIXME: Optimize for multiple page size */
371 EXPORT_SYMBOL_GPL(flush_iotlb_range
);
374 * flush_iotlb_all - Clear all iommu tlb entries
377 void flush_iotlb_all(struct iommu
*obj
)
381 clk_enable(obj
->clk
);
385 iotlb_lock_set(obj
, &l
);
387 iommu_write_reg(obj
, 1, MMU_GFLUSH
);
389 clk_disable(obj
->clk
);
391 EXPORT_SYMBOL_GPL(flush_iotlb_all
);
394 * iommu_set_twl - enable/disable table walking logic
396 * @on: enable/disable
398 * Function used to enable/disable TWL. If one wants to work
399 * exclusively with locked TLB entries and receive notifications
400 * for TLB miss then call this function to disable TWL.
402 void iommu_set_twl(struct iommu
*obj
, bool on
)
404 clk_enable(obj
->clk
);
405 arch_iommu
->set_twl(obj
, on
);
406 clk_disable(obj
->clk
);
408 EXPORT_SYMBOL_GPL(iommu_set_twl
);
410 #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
412 ssize_t
iommu_dump_ctx(struct iommu
*obj
, char *buf
, ssize_t bytes
)
417 clk_enable(obj
->clk
);
419 bytes
= arch_iommu
->dump_ctx(obj
, buf
, bytes
);
421 clk_disable(obj
->clk
);
425 EXPORT_SYMBOL_GPL(iommu_dump_ctx
);
427 static int __dump_tlb_entries(struct iommu
*obj
, struct cr_regs
*crs
, int num
)
430 struct iotlb_lock saved
;
432 struct cr_regs
*p
= crs
;
434 clk_enable(obj
->clk
);
435 iotlb_lock_get(obj
, &saved
);
437 for_each_iotlb_cr(obj
, num
, i
, tmp
) {
438 if (!iotlb_cr_valid(&tmp
))
443 iotlb_lock_set(obj
, &saved
);
444 clk_disable(obj
->clk
);
450 * dump_tlb_entries - dump cr arrays to given buffer
452 * @buf: output buffer
454 size_t dump_tlb_entries(struct iommu
*obj
, char *buf
, ssize_t bytes
)
460 num
= bytes
/ sizeof(*cr
);
461 num
= min(obj
->nr_tlb_entries
, num
);
463 cr
= kcalloc(num
, sizeof(*cr
), GFP_KERNEL
);
467 num
= __dump_tlb_entries(obj
, cr
, num
);
468 for (i
= 0; i
< num
; i
++)
469 p
+= iotlb_dump_cr(obj
, cr
+ i
, p
);
474 EXPORT_SYMBOL_GPL(dump_tlb_entries
);
476 int foreach_iommu_device(void *data
, int (*fn
)(struct device
*, void *))
478 return driver_for_each_device(&omap_iommu_driver
.driver
,
481 EXPORT_SYMBOL_GPL(foreach_iommu_device
);
483 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
486 * H/W pagetable operations
488 static void flush_iopgd_range(u32
*first
, u32
*last
)
490 /* FIXME: L2 cache should be taken care of if it exists */
492 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
494 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
495 } while (first
<= last
);
498 static void flush_iopte_range(u32
*first
, u32
*last
)
500 /* FIXME: L2 cache should be taken care of if it exists */
502 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
504 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
505 } while (first
<= last
);
508 static void iopte_free(u32
*iopte
)
510 /* Note: freed iopte's must be clean ready for re-use */
511 kmem_cache_free(iopte_cachep
, iopte
);
514 static u32
*iopte_alloc(struct iommu
*obj
, u32
*iopgd
, u32 da
)
518 /* a table has already existed */
523 * do the allocation outside the page table lock
525 spin_unlock(&obj
->page_table_lock
);
526 iopte
= kmem_cache_zalloc(iopte_cachep
, GFP_KERNEL
);
527 spin_lock(&obj
->page_table_lock
);
531 return ERR_PTR(-ENOMEM
);
533 *iopgd
= virt_to_phys(iopte
) | IOPGD_TABLE
;
534 flush_iopgd_range(iopgd
, iopgd
);
536 dev_vdbg(obj
->dev
, "%s: a new pte:%p\n", __func__
, iopte
);
538 /* We raced, free the reduniovant table */
543 iopte
= iopte_offset(iopgd
, da
);
546 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
547 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
552 static int iopgd_alloc_section(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
554 u32
*iopgd
= iopgd_offset(obj
, da
);
556 if ((da
| pa
) & ~IOSECTION_MASK
) {
557 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__
, da
, pa
, IOSECTION_SIZE
);
562 *iopgd
= (pa
& IOSECTION_MASK
) | prot
| IOPGD_SECTION
;
563 flush_iopgd_range(iopgd
, iopgd
);
567 static int iopgd_alloc_super(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
569 u32
*iopgd
= iopgd_offset(obj
, da
);
572 if ((da
| pa
) & ~IOSUPER_MASK
) {
573 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
574 __func__
, da
, pa
, IOSUPER_SIZE
);
578 for (i
= 0; i
< 16; i
++)
579 *(iopgd
+ i
) = (pa
& IOSUPER_MASK
) | prot
| IOPGD_SUPER
;
580 flush_iopgd_range(iopgd
, iopgd
+ 15);
584 static int iopte_alloc_page(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
586 u32
*iopgd
= iopgd_offset(obj
, da
);
587 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
590 return PTR_ERR(iopte
);
592 *iopte
= (pa
& IOPAGE_MASK
) | prot
| IOPTE_SMALL
;
593 flush_iopte_range(iopte
, iopte
);
595 dev_vdbg(obj
->dev
, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
596 __func__
, da
, pa
, iopte
, *iopte
);
601 static int iopte_alloc_large(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
603 u32
*iopgd
= iopgd_offset(obj
, da
);
604 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
607 if ((da
| pa
) & ~IOLARGE_MASK
) {
608 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
609 __func__
, da
, pa
, IOLARGE_SIZE
);
614 return PTR_ERR(iopte
);
616 for (i
= 0; i
< 16; i
++)
617 *(iopte
+ i
) = (pa
& IOLARGE_MASK
) | prot
| IOPTE_LARGE
;
618 flush_iopte_range(iopte
, iopte
+ 15);
622 static int iopgtable_store_entry_core(struct iommu
*obj
, struct iotlb_entry
*e
)
624 int (*fn
)(struct iommu
*, u32
, u32
, u32
);
632 case MMU_CAM_PGSZ_16M
:
633 fn
= iopgd_alloc_super
;
635 case MMU_CAM_PGSZ_1M
:
636 fn
= iopgd_alloc_section
;
638 case MMU_CAM_PGSZ_64K
:
639 fn
= iopte_alloc_large
;
641 case MMU_CAM_PGSZ_4K
:
642 fn
= iopte_alloc_page
;
650 prot
= get_iopte_attr(e
);
652 spin_lock(&obj
->page_table_lock
);
653 err
= fn(obj
, e
->da
, e
->pa
, prot
);
654 spin_unlock(&obj
->page_table_lock
);
660 * iopgtable_store_entry - Make an iommu pte entry
662 * @e: an iommu tlb entry info
664 int iopgtable_store_entry(struct iommu
*obj
, struct iotlb_entry
*e
)
668 flush_iotlb_page(obj
, e
->da
);
669 err
= iopgtable_store_entry_core(obj
, e
);
670 #ifdef PREFETCH_IOTLB
672 load_iotlb_entry(obj
, e
);
676 EXPORT_SYMBOL_GPL(iopgtable_store_entry
);
679 * iopgtable_lookup_entry - Lookup an iommu pte entry
681 * @da: iommu device virtual address
682 * @ppgd: iommu pgd entry pointer to be returned
683 * @ppte: iommu pte entry pointer to be returned
685 void iopgtable_lookup_entry(struct iommu
*obj
, u32 da
, u32
**ppgd
, u32
**ppte
)
687 u32
*iopgd
, *iopte
= NULL
;
689 iopgd
= iopgd_offset(obj
, da
);
693 if (iopgd_is_table(*iopgd
))
694 iopte
= iopte_offset(iopgd
, da
);
699 EXPORT_SYMBOL_GPL(iopgtable_lookup_entry
);
701 static size_t iopgtable_clear_entry_core(struct iommu
*obj
, u32 da
)
704 u32
*iopgd
= iopgd_offset(obj
, da
);
710 if (iopgd_is_table(*iopgd
)) {
712 u32
*iopte
= iopte_offset(iopgd
, da
);
715 if (*iopte
& IOPTE_LARGE
) {
717 /* rewind to the 1st entry */
718 iopte
= iopte_offset(iopgd
, (da
& IOLARGE_MASK
));
721 memset(iopte
, 0, nent
* sizeof(*iopte
));
722 flush_iopte_range(iopte
, iopte
+ (nent
- 1) * sizeof(*iopte
));
725 * do table walk to check if this table is necessary or not
727 iopte
= iopte_offset(iopgd
, 0);
728 for (i
= 0; i
< PTRS_PER_IOPTE
; i
++)
733 nent
= 1; /* for the next L1 entry */
736 if ((*iopgd
& IOPGD_SUPER
) == IOPGD_SUPER
) {
738 /* rewind to the 1st entry */
739 iopgd
= iopgd_offset(obj
, (da
& IOSUPER_MASK
));
743 memset(iopgd
, 0, nent
* sizeof(*iopgd
));
744 flush_iopgd_range(iopgd
, iopgd
+ (nent
- 1) * sizeof(*iopgd
));
750 * iopgtable_clear_entry - Remove an iommu pte entry
752 * @da: iommu device virtual address
754 size_t iopgtable_clear_entry(struct iommu
*obj
, u32 da
)
758 spin_lock(&obj
->page_table_lock
);
760 bytes
= iopgtable_clear_entry_core(obj
, da
);
761 flush_iotlb_page(obj
, da
);
763 spin_unlock(&obj
->page_table_lock
);
767 EXPORT_SYMBOL_GPL(iopgtable_clear_entry
);
769 static void iopgtable_clear_entry_all(struct iommu
*obj
)
773 spin_lock(&obj
->page_table_lock
);
775 for (i
= 0; i
< PTRS_PER_IOPGD
; i
++) {
779 da
= i
<< IOPGD_SHIFT
;
780 iopgd
= iopgd_offset(obj
, da
);
785 if (iopgd_is_table(*iopgd
))
786 iopte_free(iopte_offset(iopgd
, 0));
789 flush_iopgd_range(iopgd
, iopgd
);
792 flush_iotlb_all(obj
);
794 spin_unlock(&obj
->page_table_lock
);
798 * Device IOMMU generic operations
800 static irqreturn_t
iommu_fault_handler(int irq
, void *data
)
804 struct iommu
*obj
= data
;
809 clk_enable(obj
->clk
);
810 errs
= iommu_report_fault(obj
, &da
);
811 clk_disable(obj
->clk
);
815 /* Fault callback or TLB/PTE Dynamic loading */
816 if (obj
->isr
&& !obj
->isr(obj
, da
, errs
, obj
->isr_priv
))
821 iopgd
= iopgd_offset(obj
, da
);
823 if (!iopgd_is_table(*iopgd
)) {
824 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
825 "*pgd:px%08x\n", obj
->name
, errs
, da
, iopgd
, *iopgd
);
829 iopte
= iopte_offset(iopgd
, da
);
831 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
832 "pte:0x%p *pte:0x%08x\n", obj
->name
, errs
, da
, iopgd
, *iopgd
,
838 static int device_match_by_alias(struct device
*dev
, void *data
)
840 struct iommu
*obj
= to_iommu(dev
);
841 const char *name
= data
;
843 pr_debug("%s: %s %s\n", __func__
, obj
->name
, name
);
845 return strcmp(obj
->name
, name
) == 0;
849 * iommu_set_da_range - Set a valid device address range
851 * @start Start of valid range
852 * @end End of valid range
854 int iommu_set_da_range(struct iommu
*obj
, u32 start
, u32 end
)
860 if (end
< start
|| !PAGE_ALIGN(start
| end
))
863 obj
->da_start
= start
;
868 EXPORT_SYMBOL_GPL(iommu_set_da_range
);
871 * omap_find_iommu_device() - find an omap iommu device by name
872 * @name: name of the iommu device
874 * The generic iommu API requires the caller to provide the device
875 * he wishes to attach to a certain iommu domain.
877 * Drivers generally should not bother with this as it should just
878 * be taken care of by the DMA-API using dev_archdata.
880 * This function is provided as an interim solution until the latter
881 * materializes, and omap3isp is fully migrated to the DMA-API.
883 struct device
*omap_find_iommu_device(const char *name
)
885 return driver_find_device(&omap_iommu_driver
.driver
, NULL
,
887 device_match_by_alias
);
889 EXPORT_SYMBOL_GPL(omap_find_iommu_device
);
892 * omap_iommu_attach() - attach iommu device to an iommu domain
893 * @dev: target omap iommu device
896 static struct iommu
*omap_iommu_attach(struct device
*dev
, u32
*iopgd
)
899 struct iommu
*obj
= to_iommu(dev
);
901 spin_lock(&obj
->iommu_lock
);
903 /* an iommu device can only be attached once */
904 if (++obj
->refcount
> 1) {
905 dev_err(dev
, "%s: already attached!\n", obj
->name
);
911 err
= iommu_enable(obj
);
914 flush_iotlb_all(obj
);
916 if (!try_module_get(obj
->owner
))
919 spin_unlock(&obj
->iommu_lock
);
921 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
925 if (obj
->refcount
== 1)
929 spin_unlock(&obj
->iommu_lock
);
934 * omap_iommu_detach - release iommu device
937 static void omap_iommu_detach(struct iommu
*obj
)
939 if (!obj
|| IS_ERR(obj
))
942 spin_lock(&obj
->iommu_lock
);
944 if (--obj
->refcount
== 0)
947 module_put(obj
->owner
);
951 spin_unlock(&obj
->iommu_lock
);
953 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
956 int iommu_set_isr(const char *name
,
957 int (*isr
)(struct iommu
*obj
, u32 da
, u32 iommu_errs
,
964 dev
= driver_find_device(&omap_iommu_driver
.driver
, NULL
, (void *)name
,
965 device_match_by_alias
);
970 mutex_lock(&obj
->iommu_lock
);
971 if (obj
->refcount
!= 0) {
972 mutex_unlock(&obj
->iommu_lock
);
976 obj
->isr_priv
= isr_priv
;
977 mutex_unlock(&obj
->iommu_lock
);
981 EXPORT_SYMBOL_GPL(iommu_set_isr
);
984 * OMAP Device MMU(IOMMU) detection
986 static int __devinit
omap_iommu_probe(struct platform_device
*pdev
)
991 struct resource
*res
;
992 struct iommu_platform_data
*pdata
= pdev
->dev
.platform_data
;
994 if (pdev
->num_resources
!= 2)
997 obj
= kzalloc(sizeof(*obj
) + MMU_REG_SIZE
, GFP_KERNEL
);
1001 obj
->clk
= clk_get(&pdev
->dev
, pdata
->clk_name
);
1002 if (IS_ERR(obj
->clk
))
1005 obj
->nr_tlb_entries
= pdata
->nr_tlb_entries
;
1006 obj
->name
= pdata
->name
;
1007 obj
->dev
= &pdev
->dev
;
1008 obj
->ctx
= (void *)obj
+ sizeof(*obj
);
1009 obj
->da_start
= pdata
->da_start
;
1010 obj
->da_end
= pdata
->da_end
;
1012 spin_lock_init(&obj
->iommu_lock
);
1013 mutex_init(&obj
->mmap_lock
);
1014 spin_lock_init(&obj
->page_table_lock
);
1015 INIT_LIST_HEAD(&obj
->mmap
);
1017 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1023 res
= request_mem_region(res
->start
, resource_size(res
),
1024 dev_name(&pdev
->dev
));
1030 obj
->regbase
= ioremap(res
->start
, resource_size(res
));
1031 if (!obj
->regbase
) {
1036 irq
= platform_get_irq(pdev
, 0);
1041 err
= request_irq(irq
, iommu_fault_handler
, IRQF_SHARED
,
1042 dev_name(&pdev
->dev
), obj
);
1045 platform_set_drvdata(pdev
, obj
);
1047 dev_info(&pdev
->dev
, "%s registered\n", obj
->name
);
1051 iounmap(obj
->regbase
);
1053 release_mem_region(res
->start
, resource_size(res
));
1061 static int __devexit
omap_iommu_remove(struct platform_device
*pdev
)
1064 struct resource
*res
;
1065 struct iommu
*obj
= platform_get_drvdata(pdev
);
1067 platform_set_drvdata(pdev
, NULL
);
1069 iopgtable_clear_entry_all(obj
);
1071 irq
= platform_get_irq(pdev
, 0);
1073 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1074 release_mem_region(res
->start
, resource_size(res
));
1075 iounmap(obj
->regbase
);
1078 dev_info(&pdev
->dev
, "%s removed\n", obj
->name
);
1083 static struct platform_driver omap_iommu_driver
= {
1084 .probe
= omap_iommu_probe
,
1085 .remove
= __devexit_p(omap_iommu_remove
),
1087 .name
= "omap-iommu",
1091 static void iopte_cachep_ctor(void *iopte
)
1093 clean_dcache_area(iopte
, IOPTE_TABLE_SIZE
);
1096 static int omap_iommu_map(struct iommu_domain
*domain
, unsigned long da
,
1097 phys_addr_t pa
, int order
, int prot
)
1099 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1100 struct iommu
*oiommu
= omap_domain
->iommu_dev
;
1101 struct device
*dev
= oiommu
->dev
;
1102 size_t bytes
= PAGE_SIZE
<< order
;
1103 struct iotlb_entry e
;
1107 /* we only support mapping a single iommu page for now */
1108 omap_pgsz
= bytes_to_iopgsz(bytes
);
1109 if (omap_pgsz
< 0) {
1110 dev_err(dev
, "invalid size to map: %d\n", bytes
);
1114 dev_dbg(dev
, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da
, pa
, bytes
);
1116 flags
= omap_pgsz
| prot
;
1118 iotlb_init_entry(&e
, da
, pa
, flags
);
1120 ret
= iopgtable_store_entry(oiommu
, &e
);
1122 dev_err(dev
, "iopgtable_store_entry failed: %d\n", ret
);
1129 static int omap_iommu_unmap(struct iommu_domain
*domain
, unsigned long da
,
1132 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1133 struct iommu
*oiommu
= omap_domain
->iommu_dev
;
1134 struct device
*dev
= oiommu
->dev
;
1135 size_t bytes
= PAGE_SIZE
<< order
;
1138 dev_dbg(dev
, "unmapping da 0x%lx size 0x%x\n", da
, bytes
);
1140 ret
= iopgtable_clear_entry(oiommu
, da
);
1142 dev_err(dev
, "entry @ 0x%lx was %d; not %d\n", da
, ret
, bytes
);
1150 omap_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1152 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1153 struct iommu
*oiommu
;
1156 spin_lock(&omap_domain
->lock
);
1158 /* only a single device is supported per domain for now */
1159 if (omap_domain
->iommu_dev
) {
1160 dev_err(dev
, "iommu domain is already attached\n");
1165 /* get a handle to and enable the omap iommu */
1166 oiommu
= omap_iommu_attach(dev
, omap_domain
->pgtable
);
1167 if (IS_ERR(oiommu
)) {
1168 ret
= PTR_ERR(oiommu
);
1169 dev_err(dev
, "can't get omap iommu: %d\n", ret
);
1173 omap_domain
->iommu_dev
= oiommu
;
1176 spin_unlock(&omap_domain
->lock
);
1180 static void omap_iommu_detach_dev(struct iommu_domain
*domain
,
1183 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1184 struct iommu
*oiommu
= to_iommu(dev
);
1186 spin_lock(&omap_domain
->lock
);
1188 /* only a single device is supported per domain for now */
1189 if (omap_domain
->iommu_dev
!= oiommu
) {
1190 dev_err(dev
, "invalid iommu device\n");
1194 iopgtable_clear_entry_all(oiommu
);
1196 omap_iommu_detach(oiommu
);
1198 omap_domain
->iommu_dev
= NULL
;
1201 spin_unlock(&omap_domain
->lock
);
1204 static int omap_iommu_domain_init(struct iommu_domain
*domain
)
1206 struct omap_iommu_domain
*omap_domain
;
1208 omap_domain
= kzalloc(sizeof(*omap_domain
), GFP_KERNEL
);
1210 pr_err("kzalloc failed\n");
1214 omap_domain
->pgtable
= kzalloc(IOPGD_TABLE_SIZE
, GFP_KERNEL
);
1215 if (!omap_domain
->pgtable
) {
1216 pr_err("kzalloc failed\n");
1221 * should never fail, but please keep this around to ensure
1222 * we keep the hardware happy
1224 BUG_ON(!IS_ALIGNED((long)omap_domain
->pgtable
, IOPGD_TABLE_SIZE
));
1226 clean_dcache_area(omap_domain
->pgtable
, IOPGD_TABLE_SIZE
);
1227 spin_lock_init(&omap_domain
->lock
);
1229 domain
->priv
= omap_domain
;
1239 /* assume device was already detached */
1240 static void omap_iommu_domain_destroy(struct iommu_domain
*domain
)
1242 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1244 domain
->priv
= NULL
;
1246 kfree(omap_domain
->pgtable
);
1250 static phys_addr_t
omap_iommu_iova_to_phys(struct iommu_domain
*domain
,
1253 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1254 struct iommu
*oiommu
= omap_domain
->iommu_dev
;
1255 struct device
*dev
= oiommu
->dev
;
1257 phys_addr_t ret
= 0;
1259 iopgtable_lookup_entry(oiommu
, da
, &pgd
, &pte
);
1262 if (iopte_is_small(*pte
))
1263 ret
= omap_iommu_translate(*pte
, da
, IOPTE_MASK
);
1264 else if (iopte_is_large(*pte
))
1265 ret
= omap_iommu_translate(*pte
, da
, IOLARGE_MASK
);
1267 dev_err(dev
, "bogus pte 0x%x", *pte
);
1269 if (iopgd_is_section(*pgd
))
1270 ret
= omap_iommu_translate(*pgd
, da
, IOSECTION_MASK
);
1271 else if (iopgd_is_super(*pgd
))
1272 ret
= omap_iommu_translate(*pgd
, da
, IOSUPER_MASK
);
1274 dev_err(dev
, "bogus pgd 0x%x", *pgd
);
1280 static int omap_iommu_domain_has_cap(struct iommu_domain
*domain
,
1286 static struct iommu_ops omap_iommu_ops
= {
1287 .domain_init
= omap_iommu_domain_init
,
1288 .domain_destroy
= omap_iommu_domain_destroy
,
1289 .attach_dev
= omap_iommu_attach_dev
,
1290 .detach_dev
= omap_iommu_detach_dev
,
1291 .map
= omap_iommu_map
,
1292 .unmap
= omap_iommu_unmap
,
1293 .iova_to_phys
= omap_iommu_iova_to_phys
,
1294 .domain_has_cap
= omap_iommu_domain_has_cap
,
1297 static int __init
omap_iommu_init(void)
1299 struct kmem_cache
*p
;
1300 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
1301 size_t align
= 1 << 10; /* L2 pagetable alignement */
1303 p
= kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE
, align
, flags
,
1309 register_iommu(&omap_iommu_ops
);
1311 return platform_driver_register(&omap_iommu_driver
);
1313 module_init(omap_iommu_init
);
1315 static void __exit
omap_iommu_exit(void)
1317 kmem_cache_destroy(iopte_cachep
);
1319 platform_driver_unregister(&omap_iommu_driver
);
1321 module_exit(omap_iommu_exit
);
1323 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1324 MODULE_ALIAS("platform:omap-iommu");
1325 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1326 MODULE_LICENSE("GPL v2");