1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/clk.h>
21 #include <linux/err.h>
23 #include <linux/iommu.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/export.h>
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
32 typedef u32 sysmmu_iova_t
;
33 typedef u32 sysmmu_pte_t
;
35 /* We do not consider super section mapping (16MB) */
37 #define LPAGE_ORDER 16
38 #define SPAGE_ORDER 12
40 #define SECT_SIZE (1 << SECT_ORDER)
41 #define LPAGE_SIZE (1 << LPAGE_ORDER)
42 #define SPAGE_SIZE (1 << SPAGE_ORDER)
44 #define SECT_MASK (~(SECT_SIZE - 1))
45 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
46 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
48 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
54 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
56 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
58 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
60 static u32
sysmmu_page_offset(sysmmu_iova_t iova
, u32 size
)
62 return iova
& (size
- 1);
65 #define section_phys(sent) (*(sent) & SECT_MASK)
66 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
67 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
68 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
69 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
70 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
72 #define NUM_LV1ENTRIES 4096
73 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
75 static u32
lv1ent_offset(sysmmu_iova_t iova
)
77 return iova
>> SECT_ORDER
;
80 static u32
lv2ent_offset(sysmmu_iova_t iova
)
82 return (iova
>> SPAGE_ORDER
) & (NUM_LV2ENTRIES
- 1);
85 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
87 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
89 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
91 #define mk_lv1ent_sect(pa) ((pa) | 2)
92 #define mk_lv1ent_page(pa) ((pa) | 1)
93 #define mk_lv2ent_lpage(pa) ((pa) | 1)
94 #define mk_lv2ent_spage(pa) ((pa) | 2)
96 #define CTRL_ENABLE 0x5
97 #define CTRL_BLOCK 0x7
98 #define CTRL_DISABLE 0x0
101 #define CFG_QOS(n) ((n & 0xF) << 7)
102 #define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
104 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
105 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
107 #define REG_MMU_CTRL 0x000
108 #define REG_MMU_CFG 0x004
109 #define REG_MMU_STATUS 0x008
110 #define REG_MMU_FLUSH 0x00C
111 #define REG_MMU_FLUSH_ENTRY 0x010
112 #define REG_PT_BASE_ADDR 0x014
113 #define REG_INT_STATUS 0x018
114 #define REG_INT_CLEAR 0x01C
116 #define REG_PAGE_FAULT_ADDR 0x024
117 #define REG_AW_FAULT_ADDR 0x028
118 #define REG_AR_FAULT_ADDR 0x02C
119 #define REG_DEFAULT_SLAVE_ADDR 0x030
121 #define REG_MMU_VERSION 0x034
123 #define MMU_MAJ_VER(val) ((val) >> 7)
124 #define MMU_MIN_VER(val) ((val) & 0x7F)
125 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
127 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
129 #define REG_PB0_SADDR 0x04C
130 #define REG_PB0_EADDR 0x050
131 #define REG_PB1_SADDR 0x054
132 #define REG_PB1_EADDR 0x058
134 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
136 static struct kmem_cache
*lv2table_kmem_cache
;
137 static sysmmu_pte_t
*zero_lv2_table
;
138 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
140 static sysmmu_pte_t
*section_entry(sysmmu_pte_t
*pgtable
, sysmmu_iova_t iova
)
142 return pgtable
+ lv1ent_offset(iova
);
145 static sysmmu_pte_t
*page_entry(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
)
147 return (sysmmu_pte_t
*)phys_to_virt(
148 lv2table_base(sent
)) + lv2ent_offset(iova
);
151 enum exynos_sysmmu_inttype
{
159 SYSMMU_AW_PROTECTION
, /* 7 */
160 SYSMMU_FAULT_UNKNOWN
,
164 static unsigned short fault_reg_offset
[SYSMMU_FAULTS_NUM
] = {
168 REG_DEFAULT_SLAVE_ADDR
,
175 static char *sysmmu_fault_name
[SYSMMU_FAULTS_NUM
] = {
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
187 /* attached to dev.archdata.iommu of the master device */
188 struct exynos_iommu_owner
{
189 struct list_head client
; /* entry of exynos_iommu_domain.clients */
191 struct device
*sysmmu
;
192 struct iommu_domain
*domain
;
193 void *vmm_data
; /* IO virtual memory manager's data */
194 spinlock_t lock
; /* Lock to preserve consistency of System MMU */
197 struct exynos_iommu_domain
{
198 struct list_head clients
; /* list of sysmmu_drvdata.node */
199 sysmmu_pte_t
*pgtable
; /* lv1 page table, 16KB */
200 short *lv2entcnt
; /* free lv2 entry counter for each section */
201 spinlock_t lock
; /* lock for this structure */
202 spinlock_t pgtablelock
; /* lock for modifying page table @ pgtable */
203 struct iommu_domain domain
; /* generic domain data structure */
206 struct sysmmu_drvdata
{
207 struct device
*sysmmu
; /* System MMU's device descriptor */
208 struct device
*master
; /* Owner of system MMU */
209 void __iomem
*sfrbase
;
211 struct clk
*clk_master
;
214 struct iommu_domain
*domain
;
218 static struct exynos_iommu_domain
*to_exynos_domain(struct iommu_domain
*dom
)
220 return container_of(dom
, struct exynos_iommu_domain
, domain
);
223 static bool set_sysmmu_active(struct sysmmu_drvdata
*data
)
225 /* return true if the System MMU was not active previously
226 and it needs to be initialized */
227 return ++data
->activations
== 1;
230 static bool set_sysmmu_inactive(struct sysmmu_drvdata
*data
)
232 /* return true if the System MMU is needed to be disabled */
233 BUG_ON(data
->activations
< 1);
234 return --data
->activations
== 0;
237 static bool is_sysmmu_active(struct sysmmu_drvdata
*data
)
239 return data
->activations
> 0;
242 static void sysmmu_unblock(void __iomem
*sfrbase
)
244 __raw_writel(CTRL_ENABLE
, sfrbase
+ REG_MMU_CTRL
);
247 static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata
*data
)
249 return MMU_RAW_VER(__raw_readl(data
->sfrbase
+ REG_MMU_VERSION
));
252 static bool sysmmu_block(void __iomem
*sfrbase
)
256 __raw_writel(CTRL_BLOCK
, sfrbase
+ REG_MMU_CTRL
);
257 while ((i
> 0) && !(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1))
260 if (!(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1)) {
261 sysmmu_unblock(sfrbase
);
268 static void __sysmmu_tlb_invalidate(void __iomem
*sfrbase
)
270 __raw_writel(0x1, sfrbase
+ REG_MMU_FLUSH
);
273 static void __sysmmu_tlb_invalidate_entry(void __iomem
*sfrbase
,
274 sysmmu_iova_t iova
, unsigned int num_inv
)
278 for (i
= 0; i
< num_inv
; i
++) {
279 __raw_writel((iova
& SPAGE_MASK
) | 1,
280 sfrbase
+ REG_MMU_FLUSH_ENTRY
);
285 static void __sysmmu_set_ptbase(void __iomem
*sfrbase
,
288 __raw_writel(pgd
, sfrbase
+ REG_PT_BASE_ADDR
);
290 __sysmmu_tlb_invalidate(sfrbase
);
293 static void show_fault_information(const char *name
,
294 enum exynos_sysmmu_inttype itype
,
295 phys_addr_t pgtable_base
, sysmmu_iova_t fault_addr
)
299 if ((itype
>= SYSMMU_FAULTS_NUM
) || (itype
< SYSMMU_PAGEFAULT
))
300 itype
= SYSMMU_FAULT_UNKNOWN
;
302 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
303 sysmmu_fault_name
[itype
], fault_addr
, name
, &pgtable_base
);
305 ent
= section_entry(phys_to_virt(pgtable_base
), fault_addr
);
306 pr_err("\tLv1 entry: %#x\n", *ent
);
308 if (lv1ent_page(ent
)) {
309 ent
= page_entry(ent
, fault_addr
);
310 pr_err("\t Lv2 entry: %#x\n", *ent
);
314 static irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
)
316 /* SYSMMU is in blocked state when interrupt occurred. */
317 struct sysmmu_drvdata
*data
= dev_id
;
318 enum exynos_sysmmu_inttype itype
;
319 sysmmu_iova_t addr
= -1;
322 WARN_ON(!is_sysmmu_active(data
));
324 spin_lock(&data
->lock
);
326 if (!IS_ERR(data
->clk_master
))
327 clk_enable(data
->clk_master
);
329 itype
= (enum exynos_sysmmu_inttype
)
330 __ffs(__raw_readl(data
->sfrbase
+ REG_INT_STATUS
));
331 if (WARN_ON(!((itype
>= 0) && (itype
< SYSMMU_FAULT_UNKNOWN
))))
332 itype
= SYSMMU_FAULT_UNKNOWN
;
334 addr
= __raw_readl(data
->sfrbase
+ fault_reg_offset
[itype
]);
336 if (itype
== SYSMMU_FAULT_UNKNOWN
) {
337 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
338 __func__
, dev_name(data
->sysmmu
));
339 pr_err("%s: Please check if IRQ is correctly configured.\n",
344 __raw_readl(data
->sfrbase
+ REG_PT_BASE_ADDR
);
345 show_fault_information(dev_name(data
->sysmmu
),
348 ret
= report_iommu_fault(data
->domain
,
349 data
->master
, addr
, itype
);
352 /* fault is not recovered by fault handler */
355 __raw_writel(1 << itype
, data
->sfrbase
+ REG_INT_CLEAR
);
357 sysmmu_unblock(data
->sfrbase
);
359 if (!IS_ERR(data
->clk_master
))
360 clk_disable(data
->clk_master
);
362 spin_unlock(&data
->lock
);
367 static void __sysmmu_disable_nocount(struct sysmmu_drvdata
*data
)
369 if (!IS_ERR(data
->clk_master
))
370 clk_enable(data
->clk_master
);
372 __raw_writel(CTRL_DISABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
373 __raw_writel(0, data
->sfrbase
+ REG_MMU_CFG
);
375 clk_disable(data
->clk
);
376 if (!IS_ERR(data
->clk_master
))
377 clk_disable(data
->clk_master
);
380 static bool __sysmmu_disable(struct sysmmu_drvdata
*data
)
385 spin_lock_irqsave(&data
->lock
, flags
);
387 disabled
= set_sysmmu_inactive(data
);
393 __sysmmu_disable_nocount(data
);
395 dev_dbg(data
->sysmmu
, "Disabled\n");
397 dev_dbg(data
->sysmmu
, "%d times left to disable\n",
401 spin_unlock_irqrestore(&data
->lock
, flags
);
406 static void __sysmmu_init_config(struct sysmmu_drvdata
*data
)
408 unsigned int cfg
= CFG_LRU
| CFG_QOS(15);
411 ver
= __raw_sysmmu_version(data
);
412 if (MMU_MAJ_VER(ver
) == 3) {
413 if (MMU_MIN_VER(ver
) >= 2) {
414 cfg
|= CFG_FLPDCACHE
;
415 if (MMU_MIN_VER(ver
) == 3) {
424 __raw_writel(cfg
, data
->sfrbase
+ REG_MMU_CFG
);
427 static void __sysmmu_enable_nocount(struct sysmmu_drvdata
*data
)
429 if (!IS_ERR(data
->clk_master
))
430 clk_enable(data
->clk_master
);
431 clk_enable(data
->clk
);
433 __raw_writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
435 __sysmmu_init_config(data
);
437 __sysmmu_set_ptbase(data
->sfrbase
, data
->pgtable
);
439 __raw_writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
441 if (!IS_ERR(data
->clk_master
))
442 clk_disable(data
->clk_master
);
445 static int __sysmmu_enable(struct sysmmu_drvdata
*data
,
446 phys_addr_t pgtable
, struct iommu_domain
*domain
)
451 spin_lock_irqsave(&data
->lock
, flags
);
452 if (set_sysmmu_active(data
)) {
453 data
->pgtable
= pgtable
;
454 data
->domain
= domain
;
456 __sysmmu_enable_nocount(data
);
458 dev_dbg(data
->sysmmu
, "Enabled\n");
460 ret
= (pgtable
== data
->pgtable
) ? 1 : -EBUSY
;
462 dev_dbg(data
->sysmmu
, "already enabled\n");
465 if (WARN_ON(ret
< 0))
466 set_sysmmu_inactive(data
); /* decrement count */
468 spin_unlock_irqrestore(&data
->lock
, flags
);
473 /* __exynos_sysmmu_enable: Enables System MMU
475 * returns -error if an error occurred and System MMU is not enabled,
476 * 0 if the System MMU has been just enabled and 1 if System MMU was already
479 static int __exynos_sysmmu_enable(struct device
*dev
, phys_addr_t pgtable
,
480 struct iommu_domain
*domain
)
484 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
485 struct sysmmu_drvdata
*data
;
487 BUG_ON(!has_sysmmu(dev
));
489 spin_lock_irqsave(&owner
->lock
, flags
);
491 data
= dev_get_drvdata(owner
->sysmmu
);
493 ret
= __sysmmu_enable(data
, pgtable
, domain
);
497 spin_unlock_irqrestore(&owner
->lock
, flags
);
502 int exynos_sysmmu_enable(struct device
*dev
, phys_addr_t pgtable
)
504 BUG_ON(!memblock_is_memory(pgtable
));
506 return __exynos_sysmmu_enable(dev
, pgtable
, NULL
);
509 static bool exynos_sysmmu_disable(struct device
*dev
)
512 bool disabled
= true;
513 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
514 struct sysmmu_drvdata
*data
;
516 BUG_ON(!has_sysmmu(dev
));
518 spin_lock_irqsave(&owner
->lock
, flags
);
520 data
= dev_get_drvdata(owner
->sysmmu
);
522 disabled
= __sysmmu_disable(data
);
526 spin_unlock_irqrestore(&owner
->lock
, flags
);
531 static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata
*data
,
534 if (__raw_sysmmu_version(data
) == MAKE_MMU_VER(3, 3))
535 __raw_writel(iova
| 0x1, data
->sfrbase
+ REG_MMU_FLUSH_ENTRY
);
538 static void sysmmu_tlb_invalidate_flpdcache(struct device
*dev
,
542 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
543 struct sysmmu_drvdata
*data
= dev_get_drvdata(owner
->sysmmu
);
545 if (!IS_ERR(data
->clk_master
))
546 clk_enable(data
->clk_master
);
548 spin_lock_irqsave(&data
->lock
, flags
);
549 if (is_sysmmu_active(data
))
550 __sysmmu_tlb_invalidate_flpdcache(data
, iova
);
551 spin_unlock_irqrestore(&data
->lock
, flags
);
553 if (!IS_ERR(data
->clk_master
))
554 clk_disable(data
->clk_master
);
557 static void sysmmu_tlb_invalidate_entry(struct device
*dev
, sysmmu_iova_t iova
,
560 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
562 struct sysmmu_drvdata
*data
;
564 data
= dev_get_drvdata(owner
->sysmmu
);
566 spin_lock_irqsave(&data
->lock
, flags
);
567 if (is_sysmmu_active(data
)) {
568 unsigned int num_inv
= 1;
570 if (!IS_ERR(data
->clk_master
))
571 clk_enable(data
->clk_master
);
574 * L2TLB invalidation required
575 * 4KB page: 1 invalidation
576 * 64KB page: 16 invalidations
577 * 1MB page: 64 invalidations
578 * because it is set-associative TLB
579 * with 8-way and 64 sets.
580 * 1MB page can be cached in one of all sets.
581 * 64KB page can be one of 16 consecutive sets.
583 if (MMU_MAJ_VER(__raw_sysmmu_version(data
)) == 2)
584 num_inv
= min_t(unsigned int, size
/ PAGE_SIZE
, 64);
586 if (sysmmu_block(data
->sfrbase
)) {
587 __sysmmu_tlb_invalidate_entry(
588 data
->sfrbase
, iova
, num_inv
);
589 sysmmu_unblock(data
->sfrbase
);
591 if (!IS_ERR(data
->clk_master
))
592 clk_disable(data
->clk_master
);
594 dev_dbg(dev
, "disabled. Skipping TLB invalidation @ %#x\n",
597 spin_unlock_irqrestore(&data
->lock
, flags
);
600 void exynos_sysmmu_tlb_invalidate(struct device
*dev
)
602 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
604 struct sysmmu_drvdata
*data
;
606 data
= dev_get_drvdata(owner
->sysmmu
);
608 spin_lock_irqsave(&data
->lock
, flags
);
609 if (is_sysmmu_active(data
)) {
610 if (!IS_ERR(data
->clk_master
))
611 clk_enable(data
->clk_master
);
612 if (sysmmu_block(data
->sfrbase
)) {
613 __sysmmu_tlb_invalidate(data
->sfrbase
);
614 sysmmu_unblock(data
->sfrbase
);
616 if (!IS_ERR(data
->clk_master
))
617 clk_disable(data
->clk_master
);
619 dev_dbg(dev
, "disabled. Skipping TLB invalidation\n");
621 spin_unlock_irqrestore(&data
->lock
, flags
);
624 static int __init
exynos_sysmmu_probe(struct platform_device
*pdev
)
627 struct device
*dev
= &pdev
->dev
;
628 struct sysmmu_drvdata
*data
;
629 struct resource
*res
;
631 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
635 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
636 data
->sfrbase
= devm_ioremap_resource(dev
, res
);
637 if (IS_ERR(data
->sfrbase
))
638 return PTR_ERR(data
->sfrbase
);
640 irq
= platform_get_irq(pdev
, 0);
642 dev_err(dev
, "Unable to find IRQ resource\n");
646 ret
= devm_request_irq(dev
, irq
, exynos_sysmmu_irq
, 0,
647 dev_name(dev
), data
);
649 dev_err(dev
, "Unabled to register handler of irq %d\n", irq
);
653 data
->clk
= devm_clk_get(dev
, "sysmmu");
654 if (IS_ERR(data
->clk
)) {
655 dev_err(dev
, "Failed to get clock!\n");
656 return PTR_ERR(data
->clk
);
658 ret
= clk_prepare(data
->clk
);
660 dev_err(dev
, "Failed to prepare clk\n");
665 data
->clk_master
= devm_clk_get(dev
, "master");
666 if (!IS_ERR(data
->clk_master
)) {
667 ret
= clk_prepare(data
->clk_master
);
669 clk_unprepare(data
->clk
);
670 dev_err(dev
, "Failed to prepare master's clk\n");
676 spin_lock_init(&data
->lock
);
678 platform_set_drvdata(pdev
, data
);
680 pm_runtime_enable(dev
);
685 static const struct of_device_id sysmmu_of_match
[] __initconst
= {
686 { .compatible
= "samsung,exynos-sysmmu", },
690 static struct platform_driver exynos_sysmmu_driver __refdata
= {
691 .probe
= exynos_sysmmu_probe
,
693 .name
= "exynos-sysmmu",
694 .of_match_table
= sysmmu_of_match
,
698 static inline void pgtable_flush(void *vastart
, void *vaend
)
700 dmac_flush_range(vastart
, vaend
);
701 outer_flush_range(virt_to_phys(vastart
),
702 virt_to_phys(vaend
));
705 static struct iommu_domain
*exynos_iommu_domain_alloc(unsigned type
)
707 struct exynos_iommu_domain
*exynos_domain
;
710 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
713 exynos_domain
= kzalloc(sizeof(*exynos_domain
), GFP_KERNEL
);
717 exynos_domain
->pgtable
= (sysmmu_pte_t
*)__get_free_pages(GFP_KERNEL
, 2);
718 if (!exynos_domain
->pgtable
)
721 exynos_domain
->lv2entcnt
= (short *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
722 if (!exynos_domain
->lv2entcnt
)
725 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
726 for (i
= 0; i
< NUM_LV1ENTRIES
; i
+= 8) {
727 exynos_domain
->pgtable
[i
+ 0] = ZERO_LV2LINK
;
728 exynos_domain
->pgtable
[i
+ 1] = ZERO_LV2LINK
;
729 exynos_domain
->pgtable
[i
+ 2] = ZERO_LV2LINK
;
730 exynos_domain
->pgtable
[i
+ 3] = ZERO_LV2LINK
;
731 exynos_domain
->pgtable
[i
+ 4] = ZERO_LV2LINK
;
732 exynos_domain
->pgtable
[i
+ 5] = ZERO_LV2LINK
;
733 exynos_domain
->pgtable
[i
+ 6] = ZERO_LV2LINK
;
734 exynos_domain
->pgtable
[i
+ 7] = ZERO_LV2LINK
;
737 pgtable_flush(exynos_domain
->pgtable
, exynos_domain
->pgtable
+ NUM_LV1ENTRIES
);
739 spin_lock_init(&exynos_domain
->lock
);
740 spin_lock_init(&exynos_domain
->pgtablelock
);
741 INIT_LIST_HEAD(&exynos_domain
->clients
);
743 exynos_domain
->domain
.geometry
.aperture_start
= 0;
744 exynos_domain
->domain
.geometry
.aperture_end
= ~0UL;
745 exynos_domain
->domain
.geometry
.force_aperture
= true;
747 return &exynos_domain
->domain
;
750 free_pages((unsigned long)exynos_domain
->pgtable
, 2);
752 kfree(exynos_domain
);
756 static void exynos_iommu_domain_free(struct iommu_domain
*domain
)
758 struct exynos_iommu_domain
*priv
= to_exynos_domain(domain
);
759 struct exynos_iommu_owner
*owner
;
763 WARN_ON(!list_empty(&priv
->clients
));
765 spin_lock_irqsave(&priv
->lock
, flags
);
767 list_for_each_entry(owner
, &priv
->clients
, client
) {
768 while (!exynos_sysmmu_disable(owner
->dev
))
769 ; /* until System MMU is actually disabled */
772 while (!list_empty(&priv
->clients
))
773 list_del_init(priv
->clients
.next
);
775 spin_unlock_irqrestore(&priv
->lock
, flags
);
777 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
778 if (lv1ent_page(priv
->pgtable
+ i
))
779 kmem_cache_free(lv2table_kmem_cache
,
780 phys_to_virt(lv2table_base(priv
->pgtable
+ i
)));
782 free_pages((unsigned long)priv
->pgtable
, 2);
783 free_pages((unsigned long)priv
->lv2entcnt
, 1);
787 static int exynos_iommu_attach_device(struct iommu_domain
*domain
,
790 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
791 struct exynos_iommu_domain
*priv
= to_exynos_domain(domain
);
792 phys_addr_t pagetable
= virt_to_phys(priv
->pgtable
);
796 spin_lock_irqsave(&priv
->lock
, flags
);
798 ret
= __exynos_sysmmu_enable(dev
, pagetable
, domain
);
800 list_add_tail(&owner
->client
, &priv
->clients
);
801 owner
->domain
= domain
;
804 spin_unlock_irqrestore(&priv
->lock
, flags
);
807 dev_err(dev
, "%s: Failed to attach IOMMU with pgtable %pa\n",
808 __func__
, &pagetable
);
812 dev_dbg(dev
, "%s: Attached IOMMU with pgtable %pa %s\n",
813 __func__
, &pagetable
, (ret
== 0) ? "" : ", again");
818 static void exynos_iommu_detach_device(struct iommu_domain
*domain
,
821 struct exynos_iommu_owner
*owner
;
822 struct exynos_iommu_domain
*priv
= to_exynos_domain(domain
);
823 phys_addr_t pagetable
= virt_to_phys(priv
->pgtable
);
826 spin_lock_irqsave(&priv
->lock
, flags
);
828 list_for_each_entry(owner
, &priv
->clients
, client
) {
829 if (owner
== dev
->archdata
.iommu
) {
830 if (exynos_sysmmu_disable(dev
)) {
831 list_del_init(&owner
->client
);
832 owner
->domain
= NULL
;
838 spin_unlock_irqrestore(&priv
->lock
, flags
);
840 if (owner
== dev
->archdata
.iommu
)
841 dev_dbg(dev
, "%s: Detached IOMMU with pgtable %pa\n",
842 __func__
, &pagetable
);
844 dev_err(dev
, "%s: No IOMMU is attached\n", __func__
);
847 static sysmmu_pte_t
*alloc_lv2entry(struct exynos_iommu_domain
*priv
,
848 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
, short *pgcounter
)
850 if (lv1ent_section(sent
)) {
851 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova
);
852 return ERR_PTR(-EADDRINUSE
);
855 if (lv1ent_fault(sent
)) {
857 bool need_flush_flpd_cache
= lv1ent_zero(sent
);
859 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
860 BUG_ON((unsigned int)pent
& (LV2TABLE_SIZE
- 1));
862 return ERR_PTR(-ENOMEM
);
864 *sent
= mk_lv1ent_page(virt_to_phys(pent
));
865 *pgcounter
= NUM_LV2ENTRIES
;
866 pgtable_flush(pent
, pent
+ NUM_LV2ENTRIES
);
867 pgtable_flush(sent
, sent
+ 1);
870 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
871 * FLPD cache may cache the address of zero_l2_table. This
872 * function replaces the zero_l2_table with new L2 page table
873 * to write valid mappings.
874 * Accessing the valid area may cause page fault since FLPD
875 * cache may still cache zero_l2_table for the valid area
876 * instead of new L2 page table that has the mapping
877 * information of the valid area.
878 * Thus any replacement of zero_l2_table with other valid L2
879 * page table must involve FLPD cache invalidation for System
881 * FLPD cache invalidation is performed with TLB invalidation
882 * by VPN without blocking. It is safe to invalidate TLB without
883 * blocking because the target address of TLB invalidation is
884 * not currently mapped.
886 if (need_flush_flpd_cache
) {
887 struct exynos_iommu_owner
*owner
;
889 spin_lock(&priv
->lock
);
890 list_for_each_entry(owner
, &priv
->clients
, client
)
891 sysmmu_tlb_invalidate_flpdcache(
893 spin_unlock(&priv
->lock
);
897 return page_entry(sent
, iova
);
900 static int lv1set_section(struct exynos_iommu_domain
*priv
,
901 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
,
902 phys_addr_t paddr
, short *pgcnt
)
904 if (lv1ent_section(sent
)) {
905 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
910 if (lv1ent_page(sent
)) {
911 if (*pgcnt
!= NUM_LV2ENTRIES
) {
912 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
917 kmem_cache_free(lv2table_kmem_cache
, page_entry(sent
, 0));
921 *sent
= mk_lv1ent_sect(paddr
);
923 pgtable_flush(sent
, sent
+ 1);
925 spin_lock(&priv
->lock
);
926 if (lv1ent_page_zero(sent
)) {
927 struct exynos_iommu_owner
*owner
;
929 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
930 * entry by speculative prefetch of SLPD which has no mapping.
932 list_for_each_entry(owner
, &priv
->clients
, client
)
933 sysmmu_tlb_invalidate_flpdcache(owner
->dev
, iova
);
935 spin_unlock(&priv
->lock
);
940 static int lv2set_page(sysmmu_pte_t
*pent
, phys_addr_t paddr
, size_t size
,
943 if (size
== SPAGE_SIZE
) {
944 if (WARN_ON(!lv2ent_fault(pent
)))
947 *pent
= mk_lv2ent_spage(paddr
);
948 pgtable_flush(pent
, pent
+ 1);
950 } else { /* size == LPAGE_SIZE */
953 for (i
= 0; i
< SPAGES_PER_LPAGE
; i
++, pent
++) {
954 if (WARN_ON(!lv2ent_fault(pent
))) {
956 memset(pent
- i
, 0, sizeof(*pent
) * i
);
960 *pent
= mk_lv2ent_lpage(paddr
);
962 pgtable_flush(pent
- SPAGES_PER_LPAGE
, pent
);
963 *pgcnt
-= SPAGES_PER_LPAGE
;
970 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
972 * System MMU v3.x has advanced logic to improve address translation
973 * performance with caching more page table entries by a page table walk.
974 * However, the logic has a bug that while caching faulty page table entries,
975 * System MMU reports page fault if the cached fault entry is hit even though
976 * the fault entry is updated to a valid entry after the entry is cached.
977 * To prevent caching faulty page table entries which may be updated to valid
978 * entries later, the virtual memory manager should care about the workaround
979 * for the problem. The following describes the workaround.
981 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
982 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
984 * Precisely, any start address of I/O virtual region must be aligned with
985 * the following sizes for System MMU v3.1 and v3.2.
986 * System MMU v3.1: 128KiB
987 * System MMU v3.2: 256KiB
989 * Because System MMU v3.3 caches page table entries more aggressively, it needs
991 * - Any two consecutive I/O virtual regions must have a hole of size larger
992 * than or equal to 128KiB.
993 * - Start address of an I/O virtual region must be aligned by 128KiB.
995 static int exynos_iommu_map(struct iommu_domain
*domain
, unsigned long l_iova
,
996 phys_addr_t paddr
, size_t size
, int prot
)
998 struct exynos_iommu_domain
*priv
= to_exynos_domain(domain
);
1000 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1001 unsigned long flags
;
1004 BUG_ON(priv
->pgtable
== NULL
);
1006 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
1008 entry
= section_entry(priv
->pgtable
, iova
);
1010 if (size
== SECT_SIZE
) {
1011 ret
= lv1set_section(priv
, entry
, iova
, paddr
,
1012 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
1016 pent
= alloc_lv2entry(priv
, entry
, iova
,
1017 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
1020 ret
= PTR_ERR(pent
);
1022 ret
= lv2set_page(pent
, paddr
, size
,
1023 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
1027 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1028 __func__
, ret
, size
, iova
);
1030 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
1035 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain
*priv
,
1036 sysmmu_iova_t iova
, size_t size
)
1038 struct exynos_iommu_owner
*owner
;
1039 unsigned long flags
;
1041 spin_lock_irqsave(&priv
->lock
, flags
);
1043 list_for_each_entry(owner
, &priv
->clients
, client
)
1044 sysmmu_tlb_invalidate_entry(owner
->dev
, iova
, size
);
1046 spin_unlock_irqrestore(&priv
->lock
, flags
);
1049 static size_t exynos_iommu_unmap(struct iommu_domain
*domain
,
1050 unsigned long l_iova
, size_t size
)
1052 struct exynos_iommu_domain
*priv
= to_exynos_domain(domain
);
1053 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1056 unsigned long flags
;
1058 BUG_ON(priv
->pgtable
== NULL
);
1060 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
1062 ent
= section_entry(priv
->pgtable
, iova
);
1064 if (lv1ent_section(ent
)) {
1065 if (WARN_ON(size
< SECT_SIZE
)) {
1066 err_pgsize
= SECT_SIZE
;
1070 /* workaround for h/w bug in System MMU v3.3 */
1071 *ent
= ZERO_LV2LINK
;
1072 pgtable_flush(ent
, ent
+ 1);
1077 if (unlikely(lv1ent_fault(ent
))) {
1078 if (size
> SECT_SIZE
)
1083 /* lv1ent_page(sent) == true here */
1085 ent
= page_entry(ent
, iova
);
1087 if (unlikely(lv2ent_fault(ent
))) {
1092 if (lv2ent_small(ent
)) {
1095 pgtable_flush(ent
, ent
+ 1);
1096 priv
->lv2entcnt
[lv1ent_offset(iova
)] += 1;
1100 /* lv1ent_large(ent) == true here */
1101 if (WARN_ON(size
< LPAGE_SIZE
)) {
1102 err_pgsize
= LPAGE_SIZE
;
1106 memset(ent
, 0, sizeof(*ent
) * SPAGES_PER_LPAGE
);
1107 pgtable_flush(ent
, ent
+ SPAGES_PER_LPAGE
);
1110 priv
->lv2entcnt
[lv1ent_offset(iova
)] += SPAGES_PER_LPAGE
;
1112 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
1114 exynos_iommu_tlb_invalidate_entry(priv
, iova
, size
);
1118 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
1120 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1121 __func__
, size
, iova
, err_pgsize
);
1126 static phys_addr_t
exynos_iommu_iova_to_phys(struct iommu_domain
*domain
,
1129 struct exynos_iommu_domain
*priv
= to_exynos_domain(domain
);
1130 sysmmu_pte_t
*entry
;
1131 unsigned long flags
;
1132 phys_addr_t phys
= 0;
1134 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
1136 entry
= section_entry(priv
->pgtable
, iova
);
1138 if (lv1ent_section(entry
)) {
1139 phys
= section_phys(entry
) + section_offs(iova
);
1140 } else if (lv1ent_page(entry
)) {
1141 entry
= page_entry(entry
, iova
);
1143 if (lv2ent_large(entry
))
1144 phys
= lpage_phys(entry
) + lpage_offs(iova
);
1145 else if (lv2ent_small(entry
))
1146 phys
= spage_phys(entry
) + spage_offs(iova
);
1149 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
1154 static int exynos_iommu_add_device(struct device
*dev
)
1156 struct iommu_group
*group
;
1159 group
= iommu_group_get(dev
);
1162 group
= iommu_group_alloc();
1163 if (IS_ERR(group
)) {
1164 dev_err(dev
, "Failed to allocate IOMMU group\n");
1165 return PTR_ERR(group
);
1169 ret
= iommu_group_add_device(group
, dev
);
1170 iommu_group_put(group
);
1175 static void exynos_iommu_remove_device(struct device
*dev
)
1177 iommu_group_remove_device(dev
);
1180 static const struct iommu_ops exynos_iommu_ops
= {
1181 .domain_alloc
= exynos_iommu_domain_alloc
,
1182 .domain_free
= exynos_iommu_domain_free
,
1183 .attach_dev
= exynos_iommu_attach_device
,
1184 .detach_dev
= exynos_iommu_detach_device
,
1185 .map
= exynos_iommu_map
,
1186 .unmap
= exynos_iommu_unmap
,
1187 .map_sg
= default_iommu_map_sg
,
1188 .iova_to_phys
= exynos_iommu_iova_to_phys
,
1189 .add_device
= exynos_iommu_add_device
,
1190 .remove_device
= exynos_iommu_remove_device
,
1191 .pgsize_bitmap
= SECT_SIZE
| LPAGE_SIZE
| SPAGE_SIZE
,
1194 static int __init
exynos_iommu_init(void)
1196 struct device_node
*np
;
1199 np
= of_find_matching_node(NULL
, sysmmu_of_match
);
1205 lv2table_kmem_cache
= kmem_cache_create("exynos-iommu-lv2table",
1206 LV2TABLE_SIZE
, LV2TABLE_SIZE
, 0, NULL
);
1207 if (!lv2table_kmem_cache
) {
1208 pr_err("%s: Failed to create kmem cache\n", __func__
);
1212 ret
= platform_driver_register(&exynos_sysmmu_driver
);
1214 pr_err("%s: Failed to register driver\n", __func__
);
1215 goto err_reg_driver
;
1218 zero_lv2_table
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_KERNEL
);
1219 if (zero_lv2_table
== NULL
) {
1220 pr_err("%s: Failed to allocate zero level2 page table\n",
1226 ret
= bus_set_iommu(&platform_bus_type
, &exynos_iommu_ops
);
1228 pr_err("%s: Failed to register exynos-iommu driver.\n",
1235 kmem_cache_free(lv2table_kmem_cache
, zero_lv2_table
);
1237 platform_driver_unregister(&exynos_sysmmu_driver
);
1239 kmem_cache_destroy(lv2table_kmem_cache
);
1242 subsys_initcall(exynos_iommu_init
);