2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/iommu.h>
20 #include <linux/omap-iommu.h>
21 #include <linux/mutex.h>
22 #include <linux/spinlock.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/of_iommu.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_platform.h>
30 #include <asm/cacheflush.h>
32 #include <linux/platform_data/iommu-omap.h>
34 #include "omap-iopgtable.h"
35 #include "omap-iommu.h"
37 #define to_iommu(dev) \
38 ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
40 /* bitmap of the page sizes currently supported */
41 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
44 * struct omap_iommu_domain - omap iommu domain
45 * @pgtable: the page table
46 * @iommu_dev: an omap iommu device attached to this domain. only a single
47 * iommu device can be attached for now.
48 * @dev: Device using this domain.
49 * @lock: domain lock, should be taken when attaching/detaching
51 struct omap_iommu_domain
{
53 struct omap_iommu
*iommu_dev
;
56 struct iommu_domain domain
;
59 #define MMU_LOCK_BASE_SHIFT 10
60 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
61 #define MMU_LOCK_BASE(x) \
62 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
64 #define MMU_LOCK_VICT_SHIFT 4
65 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
66 #define MMU_LOCK_VICT(x) \
67 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
69 static struct platform_driver omap_iommu_driver
;
70 static struct kmem_cache
*iopte_cachep
;
73 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
74 * @dom: generic iommu domain handle
76 static struct omap_iommu_domain
*to_omap_domain(struct iommu_domain
*dom
)
78 return container_of(dom
, struct omap_iommu_domain
, domain
);
82 * omap_iommu_save_ctx - Save registers for pm off-mode support
85 void omap_iommu_save_ctx(struct device
*dev
)
87 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
91 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
92 p
[i
] = iommu_read_reg(obj
, i
* sizeof(u32
));
93 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
, p
[i
]);
96 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx
);
99 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
100 * @dev: client device
102 void omap_iommu_restore_ctx(struct device
*dev
)
104 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
108 for (i
= 0; i
< (MMU_REG_SIZE
/ sizeof(u32
)); i
++) {
109 iommu_write_reg(obj
, p
[i
], i
* sizeof(u32
));
110 dev_dbg(obj
->dev
, "%s\t[%02d] %08x\n", __func__
, i
, p
[i
]);
113 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx
);
115 static void __iommu_set_twl(struct omap_iommu
*obj
, bool on
)
117 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
120 iommu_write_reg(obj
, MMU_IRQ_TWL_MASK
, MMU_IRQENABLE
);
122 iommu_write_reg(obj
, MMU_IRQ_TLB_MISS_MASK
, MMU_IRQENABLE
);
126 l
|= (MMU_CNTL_MMU_EN
| MMU_CNTL_TWL_EN
);
128 l
|= (MMU_CNTL_MMU_EN
);
130 iommu_write_reg(obj
, l
, MMU_CNTL
);
133 static int omap2_iommu_enable(struct omap_iommu
*obj
)
137 if (!obj
->iopgd
|| !IS_ALIGNED((u32
)obj
->iopgd
, SZ_16K
))
140 pa
= virt_to_phys(obj
->iopgd
);
141 if (!IS_ALIGNED(pa
, SZ_16K
))
144 l
= iommu_read_reg(obj
, MMU_REVISION
);
145 dev_info(obj
->dev
, "%s: version %d.%d\n", obj
->name
,
146 (l
>> 4) & 0xf, l
& 0xf);
148 iommu_write_reg(obj
, pa
, MMU_TTB
);
150 if (obj
->has_bus_err_back
)
151 iommu_write_reg(obj
, MMU_GP_REG_BUS_ERR_BACK_EN
, MMU_GP_REG
);
153 __iommu_set_twl(obj
, true);
158 static void omap2_iommu_disable(struct omap_iommu
*obj
)
160 u32 l
= iommu_read_reg(obj
, MMU_CNTL
);
163 iommu_write_reg(obj
, l
, MMU_CNTL
);
165 dev_dbg(obj
->dev
, "%s is shutting down\n", obj
->name
);
168 static int iommu_enable(struct omap_iommu
*obj
)
171 struct platform_device
*pdev
= to_platform_device(obj
->dev
);
172 struct iommu_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
174 if (pdata
&& pdata
->deassert_reset
) {
175 err
= pdata
->deassert_reset(pdev
, pdata
->reset_name
);
177 dev_err(obj
->dev
, "deassert_reset failed: %d\n", err
);
182 pm_runtime_get_sync(obj
->dev
);
184 err
= omap2_iommu_enable(obj
);
189 static void iommu_disable(struct omap_iommu
*obj
)
191 struct platform_device
*pdev
= to_platform_device(obj
->dev
);
192 struct iommu_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
194 omap2_iommu_disable(obj
);
196 pm_runtime_put_sync(obj
->dev
);
198 if (pdata
&& pdata
->assert_reset
)
199 pdata
->assert_reset(pdev
, pdata
->reset_name
);
205 static u32
iotlb_cr_to_virt(struct cr_regs
*cr
)
207 u32 page_size
= cr
->cam
& MMU_CAM_PGSZ_MASK
;
208 u32 mask
= get_cam_va_mask(cr
->cam
& page_size
);
210 return cr
->cam
& mask
;
213 static u32
get_iopte_attr(struct iotlb_entry
*e
)
217 attr
= e
->mixed
<< 5;
219 attr
|= e
->elsz
>> 3;
220 attr
<<= (((e
->pgsz
== MMU_CAM_PGSZ_4K
) ||
221 (e
->pgsz
== MMU_CAM_PGSZ_64K
)) ? 0 : 6);
225 static u32
iommu_report_fault(struct omap_iommu
*obj
, u32
*da
)
227 u32 status
, fault_addr
;
229 status
= iommu_read_reg(obj
, MMU_IRQSTATUS
);
230 status
&= MMU_IRQ_MASK
;
236 fault_addr
= iommu_read_reg(obj
, MMU_FAULT_AD
);
239 iommu_write_reg(obj
, status
, MMU_IRQSTATUS
);
244 void iotlb_lock_get(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
248 val
= iommu_read_reg(obj
, MMU_LOCK
);
250 l
->base
= MMU_LOCK_BASE(val
);
251 l
->vict
= MMU_LOCK_VICT(val
);
254 void iotlb_lock_set(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
258 val
= (l
->base
<< MMU_LOCK_BASE_SHIFT
);
259 val
|= (l
->vict
<< MMU_LOCK_VICT_SHIFT
);
261 iommu_write_reg(obj
, val
, MMU_LOCK
);
264 static void iotlb_read_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
266 cr
->cam
= iommu_read_reg(obj
, MMU_READ_CAM
);
267 cr
->ram
= iommu_read_reg(obj
, MMU_READ_RAM
);
270 static void iotlb_load_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
272 iommu_write_reg(obj
, cr
->cam
| MMU_CAM_V
, MMU_CAM
);
273 iommu_write_reg(obj
, cr
->ram
, MMU_RAM
);
275 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
276 iommu_write_reg(obj
, 1, MMU_LD_TLB
);
279 /* only used in iotlb iteration for-loop */
280 struct cr_regs
__iotlb_read_cr(struct omap_iommu
*obj
, int n
)
285 iotlb_lock_get(obj
, &l
);
287 iotlb_lock_set(obj
, &l
);
288 iotlb_read_cr(obj
, &cr
);
293 #ifdef PREFETCH_IOTLB
294 static struct cr_regs
*iotlb_alloc_cr(struct omap_iommu
*obj
,
295 struct iotlb_entry
*e
)
302 if (e
->da
& ~(get_cam_va_mask(e
->pgsz
))) {
303 dev_err(obj
->dev
, "%s:\twrong alignment: %08x\n", __func__
,
305 return ERR_PTR(-EINVAL
);
308 cr
= kmalloc(sizeof(*cr
), GFP_KERNEL
);
310 return ERR_PTR(-ENOMEM
);
312 cr
->cam
= (e
->da
& MMU_CAM_VATAG_MASK
) | e
->prsvd
| e
->pgsz
| e
->valid
;
313 cr
->ram
= e
->pa
| e
->endian
| e
->elsz
| e
->mixed
;
319 * load_iotlb_entry - Set an iommu tlb entry
321 * @e: an iommu tlb entry info
323 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
329 if (!obj
|| !obj
->nr_tlb_entries
|| !e
)
332 pm_runtime_get_sync(obj
->dev
);
334 iotlb_lock_get(obj
, &l
);
335 if (l
.base
== obj
->nr_tlb_entries
) {
336 dev_warn(obj
->dev
, "%s: preserve entries full\n", __func__
);
344 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, tmp
)
345 if (!iotlb_cr_valid(&tmp
))
348 if (i
== obj
->nr_tlb_entries
) {
349 dev_dbg(obj
->dev
, "%s: full: no entry\n", __func__
);
354 iotlb_lock_get(obj
, &l
);
357 iotlb_lock_set(obj
, &l
);
360 cr
= iotlb_alloc_cr(obj
, e
);
362 pm_runtime_put_sync(obj
->dev
);
366 iotlb_load_cr(obj
, cr
);
371 /* increment victim for next tlb load */
372 if (++l
.vict
== obj
->nr_tlb_entries
)
374 iotlb_lock_set(obj
, &l
);
376 pm_runtime_put_sync(obj
->dev
);
380 #else /* !PREFETCH_IOTLB */
382 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
387 #endif /* !PREFETCH_IOTLB */
389 static int prefetch_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
391 return load_iotlb_entry(obj
, e
);
395 * flush_iotlb_page - Clear an iommu tlb entry
397 * @da: iommu device virtual address
399 * Clear an iommu tlb entry which includes 'da' address.
401 static void flush_iotlb_page(struct omap_iommu
*obj
, u32 da
)
406 pm_runtime_get_sync(obj
->dev
);
408 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, cr
) {
412 if (!iotlb_cr_valid(&cr
))
415 start
= iotlb_cr_to_virt(&cr
);
416 bytes
= iopgsz_to_bytes(cr
.cam
& 3);
418 if ((start
<= da
) && (da
< start
+ bytes
)) {
419 dev_dbg(obj
->dev
, "%s: %08x<=%08x(%x)\n",
420 __func__
, start
, da
, bytes
);
421 iotlb_load_cr(obj
, &cr
);
422 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
426 pm_runtime_put_sync(obj
->dev
);
428 if (i
== obj
->nr_tlb_entries
)
429 dev_dbg(obj
->dev
, "%s: no page for %08x\n", __func__
, da
);
433 * flush_iotlb_all - Clear all iommu tlb entries
436 static void flush_iotlb_all(struct omap_iommu
*obj
)
440 pm_runtime_get_sync(obj
->dev
);
444 iotlb_lock_set(obj
, &l
);
446 iommu_write_reg(obj
, 1, MMU_GFLUSH
);
448 pm_runtime_put_sync(obj
->dev
);
452 * H/W pagetable operations
454 static void flush_iopgd_range(u32
*first
, u32
*last
)
456 /* FIXME: L2 cache should be taken care of if it exists */
458 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
460 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
461 } while (first
<= last
);
464 static void flush_iopte_range(u32
*first
, u32
*last
)
466 /* FIXME: L2 cache should be taken care of if it exists */
468 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
470 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
471 } while (first
<= last
);
474 static void iopte_free(u32
*iopte
)
476 /* Note: freed iopte's must be clean ready for re-use */
478 kmem_cache_free(iopte_cachep
, iopte
);
481 static u32
*iopte_alloc(struct omap_iommu
*obj
, u32
*iopgd
, u32 da
)
485 /* a table has already existed */
490 * do the allocation outside the page table lock
492 spin_unlock(&obj
->page_table_lock
);
493 iopte
= kmem_cache_zalloc(iopte_cachep
, GFP_KERNEL
);
494 spin_lock(&obj
->page_table_lock
);
498 return ERR_PTR(-ENOMEM
);
500 *iopgd
= virt_to_phys(iopte
) | IOPGD_TABLE
;
501 flush_iopgd_range(iopgd
, iopgd
);
503 dev_vdbg(obj
->dev
, "%s: a new pte:%p\n", __func__
, iopte
);
505 /* We raced, free the reduniovant table */
510 iopte
= iopte_offset(iopgd
, da
);
513 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
514 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
519 static int iopgd_alloc_section(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
521 u32
*iopgd
= iopgd_offset(obj
, da
);
523 if ((da
| pa
) & ~IOSECTION_MASK
) {
524 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
525 __func__
, da
, pa
, IOSECTION_SIZE
);
529 *iopgd
= (pa
& IOSECTION_MASK
) | prot
| IOPGD_SECTION
;
530 flush_iopgd_range(iopgd
, iopgd
);
534 static int iopgd_alloc_super(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
536 u32
*iopgd
= iopgd_offset(obj
, da
);
539 if ((da
| pa
) & ~IOSUPER_MASK
) {
540 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
541 __func__
, da
, pa
, IOSUPER_SIZE
);
545 for (i
= 0; i
< 16; i
++)
546 *(iopgd
+ i
) = (pa
& IOSUPER_MASK
) | prot
| IOPGD_SUPER
;
547 flush_iopgd_range(iopgd
, iopgd
+ 15);
551 static int iopte_alloc_page(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
553 u32
*iopgd
= iopgd_offset(obj
, da
);
554 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
557 return PTR_ERR(iopte
);
559 *iopte
= (pa
& IOPAGE_MASK
) | prot
| IOPTE_SMALL
;
560 flush_iopte_range(iopte
, iopte
);
562 dev_vdbg(obj
->dev
, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
563 __func__
, da
, pa
, iopte
, *iopte
);
568 static int iopte_alloc_large(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
570 u32
*iopgd
= iopgd_offset(obj
, da
);
571 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
574 if ((da
| pa
) & ~IOLARGE_MASK
) {
575 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
576 __func__
, da
, pa
, IOLARGE_SIZE
);
581 return PTR_ERR(iopte
);
583 for (i
= 0; i
< 16; i
++)
584 *(iopte
+ i
) = (pa
& IOLARGE_MASK
) | prot
| IOPTE_LARGE
;
585 flush_iopte_range(iopte
, iopte
+ 15);
590 iopgtable_store_entry_core(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
592 int (*fn
)(struct omap_iommu
*, u32
, u32
, u32
);
600 case MMU_CAM_PGSZ_16M
:
601 fn
= iopgd_alloc_super
;
603 case MMU_CAM_PGSZ_1M
:
604 fn
= iopgd_alloc_section
;
606 case MMU_CAM_PGSZ_64K
:
607 fn
= iopte_alloc_large
;
609 case MMU_CAM_PGSZ_4K
:
610 fn
= iopte_alloc_page
;
618 prot
= get_iopte_attr(e
);
620 spin_lock(&obj
->page_table_lock
);
621 err
= fn(obj
, e
->da
, e
->pa
, prot
);
622 spin_unlock(&obj
->page_table_lock
);
628 * omap_iopgtable_store_entry - Make an iommu pte entry
630 * @e: an iommu tlb entry info
633 omap_iopgtable_store_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
637 flush_iotlb_page(obj
, e
->da
);
638 err
= iopgtable_store_entry_core(obj
, e
);
640 prefetch_iotlb_entry(obj
, e
);
645 * iopgtable_lookup_entry - Lookup an iommu pte entry
647 * @da: iommu device virtual address
648 * @ppgd: iommu pgd entry pointer to be returned
649 * @ppte: iommu pte entry pointer to be returned
652 iopgtable_lookup_entry(struct omap_iommu
*obj
, u32 da
, u32
**ppgd
, u32
**ppte
)
654 u32
*iopgd
, *iopte
= NULL
;
656 iopgd
= iopgd_offset(obj
, da
);
660 if (iopgd_is_table(*iopgd
))
661 iopte
= iopte_offset(iopgd
, da
);
667 static size_t iopgtable_clear_entry_core(struct omap_iommu
*obj
, u32 da
)
670 u32
*iopgd
= iopgd_offset(obj
, da
);
676 if (iopgd_is_table(*iopgd
)) {
678 u32
*iopte
= iopte_offset(iopgd
, da
);
681 if (*iopte
& IOPTE_LARGE
) {
683 /* rewind to the 1st entry */
684 iopte
= iopte_offset(iopgd
, (da
& IOLARGE_MASK
));
687 memset(iopte
, 0, nent
* sizeof(*iopte
));
688 flush_iopte_range(iopte
, iopte
+ (nent
- 1) * sizeof(*iopte
));
691 * do table walk to check if this table is necessary or not
693 iopte
= iopte_offset(iopgd
, 0);
694 for (i
= 0; i
< PTRS_PER_IOPTE
; i
++)
699 nent
= 1; /* for the next L1 entry */
702 if ((*iopgd
& IOPGD_SUPER
) == IOPGD_SUPER
) {
704 /* rewind to the 1st entry */
705 iopgd
= iopgd_offset(obj
, (da
& IOSUPER_MASK
));
709 memset(iopgd
, 0, nent
* sizeof(*iopgd
));
710 flush_iopgd_range(iopgd
, iopgd
+ (nent
- 1) * sizeof(*iopgd
));
716 * iopgtable_clear_entry - Remove an iommu pte entry
718 * @da: iommu device virtual address
720 static size_t iopgtable_clear_entry(struct omap_iommu
*obj
, u32 da
)
724 spin_lock(&obj
->page_table_lock
);
726 bytes
= iopgtable_clear_entry_core(obj
, da
);
727 flush_iotlb_page(obj
, da
);
729 spin_unlock(&obj
->page_table_lock
);
734 static void iopgtable_clear_entry_all(struct omap_iommu
*obj
)
738 spin_lock(&obj
->page_table_lock
);
740 for (i
= 0; i
< PTRS_PER_IOPGD
; i
++) {
744 da
= i
<< IOPGD_SHIFT
;
745 iopgd
= iopgd_offset(obj
, da
);
750 if (iopgd_is_table(*iopgd
))
751 iopte_free(iopte_offset(iopgd
, 0));
754 flush_iopgd_range(iopgd
, iopgd
);
757 flush_iotlb_all(obj
);
759 spin_unlock(&obj
->page_table_lock
);
763 * Device IOMMU generic operations
765 static irqreturn_t
iommu_fault_handler(int irq
, void *data
)
769 struct omap_iommu
*obj
= data
;
770 struct iommu_domain
*domain
= obj
->domain
;
771 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
773 if (!omap_domain
->iommu_dev
)
776 errs
= iommu_report_fault(obj
, &da
);
780 /* Fault callback or TLB/PTE Dynamic loading */
781 if (!report_iommu_fault(domain
, obj
->dev
, da
, 0))
786 iopgd
= iopgd_offset(obj
, da
);
788 if (!iopgd_is_table(*iopgd
)) {
789 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
790 obj
->name
, errs
, da
, iopgd
, *iopgd
);
794 iopte
= iopte_offset(iopgd
, da
);
796 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
797 obj
->name
, errs
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
802 static int device_match_by_alias(struct device
*dev
, void *data
)
804 struct omap_iommu
*obj
= to_iommu(dev
);
805 const char *name
= data
;
807 pr_debug("%s: %s %s\n", __func__
, obj
->name
, name
);
809 return strcmp(obj
->name
, name
) == 0;
813 * omap_iommu_attach() - attach iommu device to an iommu domain
814 * @name: name of target omap iommu device
817 static struct omap_iommu
*omap_iommu_attach(const char *name
, u32
*iopgd
)
821 struct omap_iommu
*obj
;
823 dev
= driver_find_device(&omap_iommu_driver
.driver
, NULL
, (void *)name
,
824 device_match_by_alias
);
826 return ERR_PTR(-ENODEV
);
830 spin_lock(&obj
->iommu_lock
);
833 err
= iommu_enable(obj
);
836 flush_iotlb_all(obj
);
838 spin_unlock(&obj
->iommu_lock
);
840 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
844 spin_unlock(&obj
->iommu_lock
);
849 * omap_iommu_detach - release iommu device
852 static void omap_iommu_detach(struct omap_iommu
*obj
)
854 if (!obj
|| IS_ERR(obj
))
857 spin_lock(&obj
->iommu_lock
);
862 spin_unlock(&obj
->iommu_lock
);
864 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
868 * OMAP Device MMU(IOMMU) detection
870 static int omap_iommu_probe(struct platform_device
*pdev
)
874 struct omap_iommu
*obj
;
875 struct resource
*res
;
876 struct iommu_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
877 struct device_node
*of
= pdev
->dev
.of_node
;
879 obj
= devm_kzalloc(&pdev
->dev
, sizeof(*obj
) + MMU_REG_SIZE
, GFP_KERNEL
);
884 obj
->name
= dev_name(&pdev
->dev
);
885 obj
->nr_tlb_entries
= 32;
886 err
= of_property_read_u32(of
, "ti,#tlb-entries",
887 &obj
->nr_tlb_entries
);
888 if (err
&& err
!= -EINVAL
)
890 if (obj
->nr_tlb_entries
!= 32 && obj
->nr_tlb_entries
!= 8)
892 if (of_find_property(of
, "ti,iommu-bus-err-back", NULL
))
893 obj
->has_bus_err_back
= MMU_GP_REG_BUS_ERR_BACK_EN
;
895 obj
->nr_tlb_entries
= pdata
->nr_tlb_entries
;
896 obj
->name
= pdata
->name
;
899 obj
->dev
= &pdev
->dev
;
900 obj
->ctx
= (void *)obj
+ sizeof(*obj
);
902 spin_lock_init(&obj
->iommu_lock
);
903 spin_lock_init(&obj
->page_table_lock
);
905 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
906 obj
->regbase
= devm_ioremap_resource(obj
->dev
, res
);
907 if (IS_ERR(obj
->regbase
))
908 return PTR_ERR(obj
->regbase
);
910 irq
= platform_get_irq(pdev
, 0);
914 err
= devm_request_irq(obj
->dev
, irq
, iommu_fault_handler
, IRQF_SHARED
,
915 dev_name(obj
->dev
), obj
);
918 platform_set_drvdata(pdev
, obj
);
920 pm_runtime_irq_safe(obj
->dev
);
921 pm_runtime_enable(obj
->dev
);
923 omap_iommu_debugfs_add(obj
);
925 dev_info(&pdev
->dev
, "%s registered\n", obj
->name
);
929 static int omap_iommu_remove(struct platform_device
*pdev
)
931 struct omap_iommu
*obj
= platform_get_drvdata(pdev
);
933 iopgtable_clear_entry_all(obj
);
934 omap_iommu_debugfs_remove(obj
);
936 pm_runtime_disable(obj
->dev
);
938 dev_info(&pdev
->dev
, "%s removed\n", obj
->name
);
942 static const struct of_device_id omap_iommu_of_match
[] = {
943 { .compatible
= "ti,omap2-iommu" },
944 { .compatible
= "ti,omap4-iommu" },
945 { .compatible
= "ti,dra7-iommu" },
949 static struct platform_driver omap_iommu_driver
= {
950 .probe
= omap_iommu_probe
,
951 .remove
= omap_iommu_remove
,
953 .name
= "omap-iommu",
954 .of_match_table
= of_match_ptr(omap_iommu_of_match
),
958 static void iopte_cachep_ctor(void *iopte
)
960 clean_dcache_area(iopte
, IOPTE_TABLE_SIZE
);
963 static u32
iotlb_init_entry(struct iotlb_entry
*e
, u32 da
, u32 pa
, int pgsz
)
965 memset(e
, 0, sizeof(*e
));
969 e
->valid
= MMU_CAM_V
;
971 e
->endian
= MMU_RAM_ENDIAN_LITTLE
;
972 e
->elsz
= MMU_RAM_ELSZ_8
;
975 return iopgsz_to_bytes(e
->pgsz
);
978 static int omap_iommu_map(struct iommu_domain
*domain
, unsigned long da
,
979 phys_addr_t pa
, size_t bytes
, int prot
)
981 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
982 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
983 struct device
*dev
= oiommu
->dev
;
984 struct iotlb_entry e
;
988 omap_pgsz
= bytes_to_iopgsz(bytes
);
990 dev_err(dev
, "invalid size to map: %d\n", bytes
);
994 dev_dbg(dev
, "mapping da 0x%lx to pa %pa size 0x%x\n", da
, &pa
, bytes
);
996 iotlb_init_entry(&e
, da
, pa
, omap_pgsz
);
998 ret
= omap_iopgtable_store_entry(oiommu
, &e
);
1000 dev_err(dev
, "omap_iopgtable_store_entry failed: %d\n", ret
);
1005 static size_t omap_iommu_unmap(struct iommu_domain
*domain
, unsigned long da
,
1008 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1009 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1010 struct device
*dev
= oiommu
->dev
;
1012 dev_dbg(dev
, "unmapping da 0x%lx size %u\n", da
, size
);
1014 return iopgtable_clear_entry(oiommu
, da
);
1018 omap_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1020 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1021 struct omap_iommu
*oiommu
;
1022 struct omap_iommu_arch_data
*arch_data
= dev
->archdata
.iommu
;
1025 if (!arch_data
|| !arch_data
->name
) {
1026 dev_err(dev
, "device doesn't have an associated iommu\n");
1030 spin_lock(&omap_domain
->lock
);
1032 /* only a single device is supported per domain for now */
1033 if (omap_domain
->iommu_dev
) {
1034 dev_err(dev
, "iommu domain is already attached\n");
1039 /* get a handle to and enable the omap iommu */
1040 oiommu
= omap_iommu_attach(arch_data
->name
, omap_domain
->pgtable
);
1041 if (IS_ERR(oiommu
)) {
1042 ret
= PTR_ERR(oiommu
);
1043 dev_err(dev
, "can't get omap iommu: %d\n", ret
);
1047 omap_domain
->iommu_dev
= arch_data
->iommu_dev
= oiommu
;
1048 omap_domain
->dev
= dev
;
1049 oiommu
->domain
= domain
;
1052 spin_unlock(&omap_domain
->lock
);
1056 static void _omap_iommu_detach_dev(struct omap_iommu_domain
*omap_domain
,
1059 struct omap_iommu
*oiommu
= dev_to_omap_iommu(dev
);
1060 struct omap_iommu_arch_data
*arch_data
= dev
->archdata
.iommu
;
1062 /* only a single device is supported per domain for now */
1063 if (omap_domain
->iommu_dev
!= oiommu
) {
1064 dev_err(dev
, "invalid iommu device\n");
1068 iopgtable_clear_entry_all(oiommu
);
1070 omap_iommu_detach(oiommu
);
1072 omap_domain
->iommu_dev
= arch_data
->iommu_dev
= NULL
;
1073 omap_domain
->dev
= NULL
;
1074 oiommu
->domain
= NULL
;
1077 static void omap_iommu_detach_dev(struct iommu_domain
*domain
,
1080 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1082 spin_lock(&omap_domain
->lock
);
1083 _omap_iommu_detach_dev(omap_domain
, dev
);
1084 spin_unlock(&omap_domain
->lock
);
1087 static struct iommu_domain
*omap_iommu_domain_alloc(unsigned type
)
1089 struct omap_iommu_domain
*omap_domain
;
1091 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
1094 omap_domain
= kzalloc(sizeof(*omap_domain
), GFP_KERNEL
);
1098 omap_domain
->pgtable
= kzalloc(IOPGD_TABLE_SIZE
, GFP_KERNEL
);
1099 if (!omap_domain
->pgtable
)
1103 * should never fail, but please keep this around to ensure
1104 * we keep the hardware happy
1106 BUG_ON(!IS_ALIGNED((long)omap_domain
->pgtable
, IOPGD_TABLE_SIZE
));
1108 clean_dcache_area(omap_domain
->pgtable
, IOPGD_TABLE_SIZE
);
1109 spin_lock_init(&omap_domain
->lock
);
1111 omap_domain
->domain
.geometry
.aperture_start
= 0;
1112 omap_domain
->domain
.geometry
.aperture_end
= (1ULL << 32) - 1;
1113 omap_domain
->domain
.geometry
.force_aperture
= true;
1115 return &omap_domain
->domain
;
1123 static void omap_iommu_domain_free(struct iommu_domain
*domain
)
1125 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1128 * An iommu device is still attached
1129 * (currently, only one device can be attached) ?
1131 if (omap_domain
->iommu_dev
)
1132 _omap_iommu_detach_dev(omap_domain
, omap_domain
->dev
);
1134 kfree(omap_domain
->pgtable
);
1138 static phys_addr_t
omap_iommu_iova_to_phys(struct iommu_domain
*domain
,
1141 struct omap_iommu_domain
*omap_domain
= to_omap_domain(domain
);
1142 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1143 struct device
*dev
= oiommu
->dev
;
1145 phys_addr_t ret
= 0;
1147 iopgtable_lookup_entry(oiommu
, da
, &pgd
, &pte
);
1150 if (iopte_is_small(*pte
))
1151 ret
= omap_iommu_translate(*pte
, da
, IOPTE_MASK
);
1152 else if (iopte_is_large(*pte
))
1153 ret
= omap_iommu_translate(*pte
, da
, IOLARGE_MASK
);
1155 dev_err(dev
, "bogus pte 0x%x, da 0x%llx", *pte
,
1156 (unsigned long long)da
);
1158 if (iopgd_is_section(*pgd
))
1159 ret
= omap_iommu_translate(*pgd
, da
, IOSECTION_MASK
);
1160 else if (iopgd_is_super(*pgd
))
1161 ret
= omap_iommu_translate(*pgd
, da
, IOSUPER_MASK
);
1163 dev_err(dev
, "bogus pgd 0x%x, da 0x%llx", *pgd
,
1164 (unsigned long long)da
);
1170 static int omap_iommu_add_device(struct device
*dev
)
1172 struct omap_iommu_arch_data
*arch_data
;
1173 struct device_node
*np
;
1174 struct platform_device
*pdev
;
1177 * Allocate the archdata iommu structure for DT-based devices.
1179 * TODO: Simplify this when removing non-DT support completely from the
1185 np
= of_parse_phandle(dev
->of_node
, "iommus", 0);
1189 pdev
= of_find_device_by_node(np
);
1190 if (WARN_ON(!pdev
)) {
1195 arch_data
= kzalloc(sizeof(*arch_data
), GFP_KERNEL
);
1201 arch_data
->name
= kstrdup(dev_name(&pdev
->dev
), GFP_KERNEL
);
1202 dev
->archdata
.iommu
= arch_data
;
1209 static void omap_iommu_remove_device(struct device
*dev
)
1211 struct omap_iommu_arch_data
*arch_data
= dev
->archdata
.iommu
;
1213 if (!dev
->of_node
|| !arch_data
)
1216 kfree(arch_data
->name
);
1220 static const struct iommu_ops omap_iommu_ops
= {
1221 .domain_alloc
= omap_iommu_domain_alloc
,
1222 .domain_free
= omap_iommu_domain_free
,
1223 .attach_dev
= omap_iommu_attach_dev
,
1224 .detach_dev
= omap_iommu_detach_dev
,
1225 .map
= omap_iommu_map
,
1226 .unmap
= omap_iommu_unmap
,
1227 .map_sg
= default_iommu_map_sg
,
1228 .iova_to_phys
= omap_iommu_iova_to_phys
,
1229 .add_device
= omap_iommu_add_device
,
1230 .remove_device
= omap_iommu_remove_device
,
1231 .pgsize_bitmap
= OMAP_IOMMU_PGSIZES
,
1234 static int __init
omap_iommu_init(void)
1236 struct kmem_cache
*p
;
1237 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
1238 size_t align
= 1 << 10; /* L2 pagetable alignement */
1239 struct device_node
*np
;
1241 np
= of_find_matching_node(NULL
, omap_iommu_of_match
);
1247 p
= kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE
, align
, flags
,
1253 bus_set_iommu(&platform_bus_type
, &omap_iommu_ops
);
1255 omap_iommu_debugfs_init();
1257 return platform_driver_register(&omap_iommu_driver
);
1259 subsys_initcall(omap_iommu_init
);
1260 /* must be ready before omap3isp is probed */