2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
22 #include <asm/cacheflush.h>
24 #include <plat/iommu.h>
26 #include "iopgtable.h"
28 /* accommodate the difference between omap1 and omap2/3 */
29 static const struct iommu_functions
*arch_iommu
;
31 static struct platform_driver omap_iommu_driver
;
32 static struct kmem_cache
*iopte_cachep
;
35 * install_iommu_arch - Install archtecure specific iommu functions
36 * @ops: a pointer to architecture specific iommu functions
38 * There are several kind of iommu algorithm(tlb, pagetable) among
39 * omap series. This interface installs such an iommu algorighm.
41 int install_iommu_arch(const struct iommu_functions
*ops
)
49 EXPORT_SYMBOL_GPL(install_iommu_arch
);
52 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
53 * @ops: a pointer to architecture specific iommu functions
55 * This interface uninstalls the iommu algorighm installed previously.
57 void uninstall_iommu_arch(const struct iommu_functions
*ops
)
59 if (arch_iommu
!= ops
)
60 pr_err("%s: not your arch\n", __func__
);
64 EXPORT_SYMBOL_GPL(uninstall_iommu_arch
);
67 * iommu_save_ctx - Save registers for pm off-mode support
70 void iommu_save_ctx(struct iommu
*obj
)
72 arch_iommu
->save_ctx(obj
);
74 EXPORT_SYMBOL_GPL(iommu_save_ctx
);
77 * iommu_restore_ctx - Restore registers for pm off-mode support
80 void iommu_restore_ctx(struct iommu
*obj
)
82 arch_iommu
->restore_ctx(obj
);
84 EXPORT_SYMBOL_GPL(iommu_restore_ctx
);
87 * iommu_arch_version - Return running iommu arch version
89 u32
iommu_arch_version(void)
91 return arch_iommu
->version
;
93 EXPORT_SYMBOL_GPL(iommu_arch_version
);
95 static int iommu_enable(struct iommu
*obj
)
102 clk_enable(obj
->clk
);
104 err
= arch_iommu
->enable(obj
);
106 clk_disable(obj
->clk
);
110 static void iommu_disable(struct iommu
*obj
)
115 clk_enable(obj
->clk
);
117 arch_iommu
->disable(obj
);
119 clk_disable(obj
->clk
);
125 void iotlb_cr_to_e(struct cr_regs
*cr
, struct iotlb_entry
*e
)
129 arch_iommu
->cr_to_e(cr
, e
);
131 EXPORT_SYMBOL_GPL(iotlb_cr_to_e
);
133 static inline int iotlb_cr_valid(struct cr_regs
*cr
)
138 return arch_iommu
->cr_valid(cr
);
141 static inline struct cr_regs
*iotlb_alloc_cr(struct iommu
*obj
,
142 struct iotlb_entry
*e
)
147 return arch_iommu
->alloc_cr(obj
, e
);
150 u32
iotlb_cr_to_virt(struct cr_regs
*cr
)
152 return arch_iommu
->cr_to_virt(cr
);
154 EXPORT_SYMBOL_GPL(iotlb_cr_to_virt
);
156 static u32
get_iopte_attr(struct iotlb_entry
*e
)
158 return arch_iommu
->get_pte_attr(e
);
161 static u32
iommu_report_fault(struct iommu
*obj
, u32
*da
)
163 return arch_iommu
->fault_isr(obj
, da
);
166 static void iotlb_lock_get(struct iommu
*obj
, struct iotlb_lock
*l
)
170 val
= iommu_read_reg(obj
, MMU_LOCK
);
172 l
->base
= MMU_LOCK_BASE(val
);
173 l
->vict
= MMU_LOCK_VICT(val
);
177 static void iotlb_lock_set(struct iommu
*obj
, struct iotlb_lock
*l
)
181 val
= (l
->base
<< MMU_LOCK_BASE_SHIFT
);
182 val
|= (l
->vict
<< MMU_LOCK_VICT_SHIFT
);
184 iommu_write_reg(obj
, val
, MMU_LOCK
);
187 static void iotlb_read_cr(struct iommu
*obj
, struct cr_regs
*cr
)
189 arch_iommu
->tlb_read_cr(obj
, cr
);
192 static void iotlb_load_cr(struct iommu
*obj
, struct cr_regs
*cr
)
194 arch_iommu
->tlb_load_cr(obj
, cr
);
196 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
197 iommu_write_reg(obj
, 1, MMU_LD_TLB
);
201 * iotlb_dump_cr - Dump an iommu tlb entry into buf
203 * @cr: contents of cam and ram register
204 * @buf: output buffer
206 static inline ssize_t
iotlb_dump_cr(struct iommu
*obj
, struct cr_regs
*cr
,
211 return arch_iommu
->dump_cr(obj
, cr
, buf
);
215 * load_iotlb_entry - Set an iommu tlb entry
217 * @e: an iommu tlb entry info
219 int load_iotlb_entry(struct iommu
*obj
, struct iotlb_entry
*e
)
226 if (!obj
|| !obj
->nr_tlb_entries
|| !e
)
229 clk_enable(obj
->clk
);
231 iotlb_lock_get(obj
, &l
);
232 if (l
.base
== obj
->nr_tlb_entries
) {
233 dev_warn(obj
->dev
, "%s: preserve entries full\n", __func__
);
238 for (i
= l
.base
; i
< obj
->nr_tlb_entries
; i
++) {
241 iotlb_lock_get(obj
, &l
);
243 iotlb_lock_set(obj
, &l
);
244 iotlb_read_cr(obj
, &tmp
);
245 if (!iotlb_cr_valid(&tmp
))
248 if (i
== obj
->nr_tlb_entries
) {
249 dev_dbg(obj
->dev
, "%s: full: no entry\n", __func__
);
255 iotlb_lock_set(obj
, &l
);
258 cr
= iotlb_alloc_cr(obj
, e
);
260 clk_disable(obj
->clk
);
264 iotlb_load_cr(obj
, cr
);
269 /* increment victim for next tlb load */
270 if (++l
.vict
== obj
->nr_tlb_entries
)
272 iotlb_lock_set(obj
, &l
);
274 clk_disable(obj
->clk
);
277 EXPORT_SYMBOL_GPL(load_iotlb_entry
);
280 * flush_iotlb_page - Clear an iommu tlb entry
282 * @da: iommu device virtual address
284 * Clear an iommu tlb entry which includes 'da' address.
286 void flush_iotlb_page(struct iommu
*obj
, u32 da
)
291 clk_enable(obj
->clk
);
293 for (i
= 0; i
< obj
->nr_tlb_entries
; i
++) {
298 iotlb_lock_get(obj
, &l
);
300 iotlb_lock_set(obj
, &l
);
301 iotlb_read_cr(obj
, &cr
);
302 if (!iotlb_cr_valid(&cr
))
305 start
= iotlb_cr_to_virt(&cr
);
306 bytes
= iopgsz_to_bytes(cr
.cam
& 3);
308 if ((start
<= da
) && (da
< start
+ bytes
)) {
309 dev_dbg(obj
->dev
, "%s: %08x<=%08x(%x)\n",
310 __func__
, start
, da
, bytes
);
311 iotlb_load_cr(obj
, &cr
);
312 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
315 clk_disable(obj
->clk
);
317 if (i
== obj
->nr_tlb_entries
)
318 dev_dbg(obj
->dev
, "%s: no page for %08x\n", __func__
, da
);
320 EXPORT_SYMBOL_GPL(flush_iotlb_page
);
323 * flush_iotlb_range - Clear an iommu tlb entries
325 * @start: iommu device virtual address(start)
326 * @end: iommu device virtual address(end)
328 * Clear an iommu tlb entry which includes 'da' address.
330 void flush_iotlb_range(struct iommu
*obj
, u32 start
, u32 end
)
335 flush_iotlb_page(obj
, da
);
336 /* FIXME: Optimize for multiple page size */
340 EXPORT_SYMBOL_GPL(flush_iotlb_range
);
343 * flush_iotlb_all - Clear all iommu tlb entries
346 void flush_iotlb_all(struct iommu
*obj
)
350 clk_enable(obj
->clk
);
354 iotlb_lock_set(obj
, &l
);
356 iommu_write_reg(obj
, 1, MMU_GFLUSH
);
358 clk_disable(obj
->clk
);
360 EXPORT_SYMBOL_GPL(flush_iotlb_all
);
362 #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
364 ssize_t
iommu_dump_ctx(struct iommu
*obj
, char *buf
, ssize_t bytes
)
369 clk_enable(obj
->clk
);
371 bytes
= arch_iommu
->dump_ctx(obj
, buf
, bytes
);
373 clk_disable(obj
->clk
);
377 EXPORT_SYMBOL_GPL(iommu_dump_ctx
);
379 static int __dump_tlb_entries(struct iommu
*obj
, struct cr_regs
*crs
, int num
)
382 struct iotlb_lock saved
, l
;
383 struct cr_regs
*p
= crs
;
385 clk_enable(obj
->clk
);
387 iotlb_lock_get(obj
, &saved
);
388 memcpy(&l
, &saved
, sizeof(saved
));
390 for (i
= 0; i
< num
; i
++) {
393 iotlb_lock_get(obj
, &l
);
395 iotlb_lock_set(obj
, &l
);
396 iotlb_read_cr(obj
, &tmp
);
397 if (!iotlb_cr_valid(&tmp
))
402 iotlb_lock_set(obj
, &saved
);
403 clk_disable(obj
->clk
);
409 * dump_tlb_entries - dump cr arrays to given buffer
411 * @buf: output buffer
413 size_t dump_tlb_entries(struct iommu
*obj
, char *buf
, ssize_t bytes
)
419 num
= bytes
/ sizeof(*cr
);
420 num
= min(obj
->nr_tlb_entries
, num
);
422 cr
= kcalloc(num
, sizeof(*cr
), GFP_KERNEL
);
426 num
= __dump_tlb_entries(obj
, cr
, num
);
427 for (i
= 0; i
< num
; i
++)
428 p
+= iotlb_dump_cr(obj
, cr
+ i
, p
);
433 EXPORT_SYMBOL_GPL(dump_tlb_entries
);
435 int foreach_iommu_device(void *data
, int (*fn
)(struct device
*, void *))
437 return driver_for_each_device(&omap_iommu_driver
.driver
,
440 EXPORT_SYMBOL_GPL(foreach_iommu_device
);
442 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
445 * H/W pagetable operations
447 static void flush_iopgd_range(u32
*first
, u32
*last
)
449 /* FIXME: L2 cache should be taken care of if it exists */
451 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
453 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
454 } while (first
<= last
);
457 static void flush_iopte_range(u32
*first
, u32
*last
)
459 /* FIXME: L2 cache should be taken care of if it exists */
461 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
463 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
464 } while (first
<= last
);
467 static void iopte_free(u32
*iopte
)
469 /* Note: freed iopte's must be clean ready for re-use */
470 kmem_cache_free(iopte_cachep
, iopte
);
473 static u32
*iopte_alloc(struct iommu
*obj
, u32
*iopgd
, u32 da
)
477 /* a table has already existed */
482 * do the allocation outside the page table lock
484 spin_unlock(&obj
->page_table_lock
);
485 iopte
= kmem_cache_zalloc(iopte_cachep
, GFP_KERNEL
);
486 spin_lock(&obj
->page_table_lock
);
490 return ERR_PTR(-ENOMEM
);
492 *iopgd
= virt_to_phys(iopte
) | IOPGD_TABLE
;
493 flush_iopgd_range(iopgd
, iopgd
);
495 dev_vdbg(obj
->dev
, "%s: a new pte:%p\n", __func__
, iopte
);
497 /* We raced, free the reduniovant table */
502 iopte
= iopte_offset(iopgd
, da
);
505 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
506 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
511 static int iopgd_alloc_section(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
513 u32
*iopgd
= iopgd_offset(obj
, da
);
515 *iopgd
= (pa
& IOSECTION_MASK
) | prot
| IOPGD_SECTION
;
516 flush_iopgd_range(iopgd
, iopgd
);
520 static int iopgd_alloc_super(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
522 u32
*iopgd
= iopgd_offset(obj
, da
);
525 for (i
= 0; i
< 16; i
++)
526 *(iopgd
+ i
) = (pa
& IOSUPER_MASK
) | prot
| IOPGD_SUPER
;
527 flush_iopgd_range(iopgd
, iopgd
+ 15);
531 static int iopte_alloc_page(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
533 u32
*iopgd
= iopgd_offset(obj
, da
);
534 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
537 return PTR_ERR(iopte
);
539 *iopte
= (pa
& IOPAGE_MASK
) | prot
| IOPTE_SMALL
;
540 flush_iopte_range(iopte
, iopte
);
542 dev_vdbg(obj
->dev
, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
543 __func__
, da
, pa
, iopte
, *iopte
);
548 static int iopte_alloc_large(struct iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
550 u32
*iopgd
= iopgd_offset(obj
, da
);
551 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
555 return PTR_ERR(iopte
);
557 for (i
= 0; i
< 16; i
++)
558 *(iopte
+ i
) = (pa
& IOLARGE_MASK
) | prot
| IOPTE_LARGE
;
559 flush_iopte_range(iopte
, iopte
+ 15);
563 static int iopgtable_store_entry_core(struct iommu
*obj
, struct iotlb_entry
*e
)
565 int (*fn
)(struct iommu
*, u32
, u32
, u32
);
573 case MMU_CAM_PGSZ_16M
:
574 fn
= iopgd_alloc_super
;
576 case MMU_CAM_PGSZ_1M
:
577 fn
= iopgd_alloc_section
;
579 case MMU_CAM_PGSZ_64K
:
580 fn
= iopte_alloc_large
;
582 case MMU_CAM_PGSZ_4K
:
583 fn
= iopte_alloc_page
;
591 prot
= get_iopte_attr(e
);
593 spin_lock(&obj
->page_table_lock
);
594 err
= fn(obj
, e
->da
, e
->pa
, prot
);
595 spin_unlock(&obj
->page_table_lock
);
601 * iopgtable_store_entry - Make an iommu pte entry
603 * @e: an iommu tlb entry info
605 int iopgtable_store_entry(struct iommu
*obj
, struct iotlb_entry
*e
)
609 flush_iotlb_page(obj
, e
->da
);
610 err
= iopgtable_store_entry_core(obj
, e
);
611 #ifdef PREFETCH_IOTLB
613 load_iotlb_entry(obj
, e
);
617 EXPORT_SYMBOL_GPL(iopgtable_store_entry
);
620 * iopgtable_lookup_entry - Lookup an iommu pte entry
622 * @da: iommu device virtual address
623 * @ppgd: iommu pgd entry pointer to be returned
624 * @ppte: iommu pte entry pointer to be returned
626 void iopgtable_lookup_entry(struct iommu
*obj
, u32 da
, u32
**ppgd
, u32
**ppte
)
628 u32
*iopgd
, *iopte
= NULL
;
630 iopgd
= iopgd_offset(obj
, da
);
634 if (*iopgd
& IOPGD_TABLE
)
635 iopte
= iopte_offset(iopgd
, da
);
640 EXPORT_SYMBOL_GPL(iopgtable_lookup_entry
);
642 static size_t iopgtable_clear_entry_core(struct iommu
*obj
, u32 da
)
645 u32
*iopgd
= iopgd_offset(obj
, da
);
651 if (*iopgd
& IOPGD_TABLE
) {
653 u32
*iopte
= iopte_offset(iopgd
, da
);
656 if (*iopte
& IOPTE_LARGE
) {
658 /* rewind to the 1st entry */
659 iopte
= iopte_offset(iopgd
, (da
& IOLARGE_MASK
));
662 memset(iopte
, 0, nent
* sizeof(*iopte
));
663 flush_iopte_range(iopte
, iopte
+ (nent
- 1) * sizeof(*iopte
));
666 * do table walk to check if this table is necessary or not
668 iopte
= iopte_offset(iopgd
, 0);
669 for (i
= 0; i
< PTRS_PER_IOPTE
; i
++)
674 nent
= 1; /* for the next L1 entry */
677 if ((*iopgd
& IOPGD_SUPER
) == IOPGD_SUPER
) {
679 /* rewind to the 1st entry */
680 iopgd
= iopgd_offset(obj
, (da
& IOSUPER_MASK
));
684 memset(iopgd
, 0, nent
* sizeof(*iopgd
));
685 flush_iopgd_range(iopgd
, iopgd
+ (nent
- 1) * sizeof(*iopgd
));
691 * iopgtable_clear_entry - Remove an iommu pte entry
693 * @da: iommu device virtual address
695 size_t iopgtable_clear_entry(struct iommu
*obj
, u32 da
)
699 spin_lock(&obj
->page_table_lock
);
701 bytes
= iopgtable_clear_entry_core(obj
, da
);
702 flush_iotlb_page(obj
, da
);
704 spin_unlock(&obj
->page_table_lock
);
708 EXPORT_SYMBOL_GPL(iopgtable_clear_entry
);
710 static void iopgtable_clear_entry_all(struct iommu
*obj
)
714 spin_lock(&obj
->page_table_lock
);
716 for (i
= 0; i
< PTRS_PER_IOPGD
; i
++) {
720 da
= i
<< IOPGD_SHIFT
;
721 iopgd
= iopgd_offset(obj
, da
);
726 if (*iopgd
& IOPGD_TABLE
)
727 iopte_free(iopte_offset(iopgd
, 0));
730 flush_iopgd_range(iopgd
, iopgd
);
733 flush_iotlb_all(obj
);
735 spin_unlock(&obj
->page_table_lock
);
739 * Device IOMMU generic operations
741 static irqreturn_t
iommu_fault_handler(int irq
, void *data
)
746 struct iommu
*obj
= data
;
751 /* Dynamic loading TLB or PTE */
758 clk_enable(obj
->clk
);
759 stat
= iommu_report_fault(obj
, &da
);
760 clk_disable(obj
->clk
);
764 iopgd
= iopgd_offset(obj
, da
);
766 if (!(*iopgd
& IOPGD_TABLE
)) {
767 dev_err(obj
->dev
, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__
,
772 iopte
= iopte_offset(iopgd
, da
);
774 dev_err(obj
->dev
, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
775 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
780 static int device_match_by_alias(struct device
*dev
, void *data
)
782 struct iommu
*obj
= to_iommu(dev
);
783 const char *name
= data
;
785 pr_debug("%s: %s %s\n", __func__
, obj
->name
, name
);
787 return strcmp(obj
->name
, name
) == 0;
791 * iommu_get - Get iommu handler
792 * @name: target iommu name
794 struct iommu
*iommu_get(const char *name
)
800 dev
= driver_find_device(&omap_iommu_driver
.driver
, NULL
, (void *)name
,
801 device_match_by_alias
);
803 return ERR_PTR(-ENODEV
);
807 mutex_lock(&obj
->iommu_lock
);
809 if (obj
->refcount
++ == 0) {
810 err
= iommu_enable(obj
);
813 flush_iotlb_all(obj
);
816 if (!try_module_get(obj
->owner
))
819 mutex_unlock(&obj
->iommu_lock
);
821 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
825 if (obj
->refcount
== 1)
829 mutex_unlock(&obj
->iommu_lock
);
832 EXPORT_SYMBOL_GPL(iommu_get
);
835 * iommu_put - Put back iommu handler
838 void iommu_put(struct iommu
*obj
)
840 if (!obj
|| IS_ERR(obj
))
843 mutex_lock(&obj
->iommu_lock
);
845 if (--obj
->refcount
== 0)
848 module_put(obj
->owner
);
850 mutex_unlock(&obj
->iommu_lock
);
852 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
854 EXPORT_SYMBOL_GPL(iommu_put
);
857 * OMAP Device MMU(IOMMU) detection
859 static int __devinit
omap_iommu_probe(struct platform_device
*pdev
)
865 struct resource
*res
;
866 struct iommu_platform_data
*pdata
= pdev
->dev
.platform_data
;
868 if (pdev
->num_resources
!= 2)
871 obj
= kzalloc(sizeof(*obj
) + MMU_REG_SIZE
, GFP_KERNEL
);
875 obj
->clk
= clk_get(&pdev
->dev
, pdata
->clk_name
);
876 if (IS_ERR(obj
->clk
))
879 obj
->nr_tlb_entries
= pdata
->nr_tlb_entries
;
880 obj
->name
= pdata
->name
;
881 obj
->dev
= &pdev
->dev
;
882 obj
->ctx
= (void *)obj
+ sizeof(*obj
);
884 mutex_init(&obj
->iommu_lock
);
885 mutex_init(&obj
->mmap_lock
);
886 spin_lock_init(&obj
->page_table_lock
);
887 INIT_LIST_HEAD(&obj
->mmap
);
889 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
894 obj
->regbase
= ioremap(res
->start
, resource_size(res
));
900 res
= request_mem_region(res
->start
, resource_size(res
),
901 dev_name(&pdev
->dev
));
907 irq
= platform_get_irq(pdev
, 0);
912 err
= request_irq(irq
, iommu_fault_handler
, IRQF_SHARED
,
913 dev_name(&pdev
->dev
), obj
);
916 platform_set_drvdata(pdev
, obj
);
918 p
= (void *)__get_free_pages(GFP_KERNEL
, get_order(IOPGD_TABLE_SIZE
));
923 memset(p
, 0, IOPGD_TABLE_SIZE
);
924 clean_dcache_area(p
, IOPGD_TABLE_SIZE
);
927 BUG_ON(!IS_ALIGNED((unsigned long)obj
->iopgd
, IOPGD_TABLE_SIZE
));
929 dev_info(&pdev
->dev
, "%s registered\n", obj
->name
);
935 release_mem_region(res
->start
, resource_size(res
));
936 iounmap(obj
->regbase
);
944 static int __devexit
omap_iommu_remove(struct platform_device
*pdev
)
947 struct resource
*res
;
948 struct iommu
*obj
= platform_get_drvdata(pdev
);
950 platform_set_drvdata(pdev
, NULL
);
952 iopgtable_clear_entry_all(obj
);
953 free_pages((unsigned long)obj
->iopgd
, get_order(IOPGD_TABLE_SIZE
));
955 irq
= platform_get_irq(pdev
, 0);
957 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
958 release_mem_region(res
->start
, resource_size(res
));
959 iounmap(obj
->regbase
);
962 dev_info(&pdev
->dev
, "%s removed\n", obj
->name
);
967 static struct platform_driver omap_iommu_driver
= {
968 .probe
= omap_iommu_probe
,
969 .remove
= __devexit_p(omap_iommu_remove
),
971 .name
= "omap-iommu",
975 static void iopte_cachep_ctor(void *iopte
)
977 clean_dcache_area(iopte
, IOPTE_TABLE_SIZE
);
980 static int __init
omap_iommu_init(void)
982 struct kmem_cache
*p
;
983 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
984 size_t align
= 1 << 10; /* L2 pagetable alignement */
986 p
= kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE
, align
, flags
,
992 return platform_driver_register(&omap_iommu_driver
);
994 module_init(omap_iommu_init
);
996 static void __exit
omap_iommu_exit(void)
998 kmem_cache_destroy(iopte_cachep
);
1000 platform_driver_unregister(&omap_iommu_driver
);
1002 module_exit(omap_iommu_exit
);
1004 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1005 MODULE_ALIAS("platform:omap-iommu");
1006 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1007 MODULE_LICENSE("GPL v2");