1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
15 #include <linux/clk.h>
16 #include <linux/err.h>
18 #include <linux/iommu.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
25 #include <asm/cacheflush.h>
26 #include <asm/pgtable.h>
28 typedef u32 sysmmu_iova_t
;
29 typedef u32 sysmmu_pte_t
;
31 /* We do not consider super section mapping (16MB) */
33 #define LPAGE_ORDER 16
34 #define SPAGE_ORDER 12
36 #define SECT_SIZE (1 << SECT_ORDER)
37 #define LPAGE_SIZE (1 << LPAGE_ORDER)
38 #define SPAGE_SIZE (1 << SPAGE_ORDER)
40 #define SECT_MASK (~(SECT_SIZE - 1))
41 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
42 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
44 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
45 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
46 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
47 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
48 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
52 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
53 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
54 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
56 static u32
sysmmu_page_offset(sysmmu_iova_t iova
, u32 size
)
58 return iova
& (size
- 1);
61 #define section_phys(sent) (*(sent) & SECT_MASK)
62 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
63 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
64 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
65 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
66 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
68 #define NUM_LV1ENTRIES 4096
69 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
71 static u32
lv1ent_offset(sysmmu_iova_t iova
)
73 return iova
>> SECT_ORDER
;
76 static u32
lv2ent_offset(sysmmu_iova_t iova
)
78 return (iova
>> SPAGE_ORDER
) & (NUM_LV2ENTRIES
- 1);
81 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
83 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
85 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
87 #define mk_lv1ent_sect(pa) ((pa) | 2)
88 #define mk_lv1ent_page(pa) ((pa) | 1)
89 #define mk_lv2ent_lpage(pa) ((pa) | 1)
90 #define mk_lv2ent_spage(pa) ((pa) | 2)
92 #define CTRL_ENABLE 0x5
93 #define CTRL_BLOCK 0x7
94 #define CTRL_DISABLE 0x0
97 #define CFG_QOS(n) ((n & 0xF) << 7)
98 #define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
99 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
100 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
101 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
103 #define REG_MMU_CTRL 0x000
104 #define REG_MMU_CFG 0x004
105 #define REG_MMU_STATUS 0x008
106 #define REG_MMU_FLUSH 0x00C
107 #define REG_MMU_FLUSH_ENTRY 0x010
108 #define REG_PT_BASE_ADDR 0x014
109 #define REG_INT_STATUS 0x018
110 #define REG_INT_CLEAR 0x01C
112 #define REG_PAGE_FAULT_ADDR 0x024
113 #define REG_AW_FAULT_ADDR 0x028
114 #define REG_AR_FAULT_ADDR 0x02C
115 #define REG_DEFAULT_SLAVE_ADDR 0x030
117 #define REG_MMU_VERSION 0x034
119 #define MMU_MAJ_VER(val) ((val) >> 7)
120 #define MMU_MIN_VER(val) ((val) & 0x7F)
121 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
123 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
125 #define REG_PB0_SADDR 0x04C
126 #define REG_PB0_EADDR 0x050
127 #define REG_PB1_SADDR 0x054
128 #define REG_PB1_EADDR 0x058
130 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
132 static struct kmem_cache
*lv2table_kmem_cache
;
133 static sysmmu_pte_t
*zero_lv2_table
;
134 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
136 static sysmmu_pte_t
*section_entry(sysmmu_pte_t
*pgtable
, sysmmu_iova_t iova
)
138 return pgtable
+ lv1ent_offset(iova
);
141 static sysmmu_pte_t
*page_entry(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
)
143 return (sysmmu_pte_t
*)phys_to_virt(
144 lv2table_base(sent
)) + lv2ent_offset(iova
);
147 enum exynos_sysmmu_inttype
{
155 SYSMMU_AW_PROTECTION
, /* 7 */
156 SYSMMU_FAULT_UNKNOWN
,
160 static unsigned short fault_reg_offset
[SYSMMU_FAULTS_NUM
] = {
164 REG_DEFAULT_SLAVE_ADDR
,
171 static char *sysmmu_fault_name
[SYSMMU_FAULTS_NUM
] = {
173 "AR MULTI-HIT FAULT",
174 "AW MULTI-HIT FAULT",
176 "AR SECURITY PROTECTION FAULT",
177 "AR ACCESS PROTECTION FAULT",
178 "AW SECURITY PROTECTION FAULT",
179 "AW ACCESS PROTECTION FAULT",
184 * This structure is attached to dev.archdata.iommu of the master device
185 * on device add, contains a list of SYSMMU controllers defined by device tree,
186 * which are bound to given master device. It is usually referenced by 'owner'
189 struct exynos_iommu_owner
{
190 struct list_head controllers
; /* list of sysmmu_drvdata.owner_node */
194 * This structure exynos specific generalization of struct iommu_domain.
195 * It contains list of SYSMMU controllers from all master devices, which has
196 * been attached to this domain and page tables of IO address space defined by
197 * it. It is usually referenced by 'domain' pointer.
199 struct exynos_iommu_domain
{
200 struct list_head clients
; /* list of sysmmu_drvdata.domain_node */
201 sysmmu_pte_t
*pgtable
; /* lv1 page table, 16KB */
202 short *lv2entcnt
; /* free lv2 entry counter for each section */
203 spinlock_t lock
; /* lock for modyfying list of clients */
204 spinlock_t pgtablelock
; /* lock for modifying page table @ pgtable */
205 struct iommu_domain domain
; /* generic domain data structure */
209 * This structure hold all data of a single SYSMMU controller, this includes
210 * hw resources like registers and clocks, pointers and list nodes to connect
211 * it to all other structures, internal state and parameters read from device
212 * tree. It is usually referenced by 'data' pointer.
214 struct sysmmu_drvdata
{
215 struct device
*sysmmu
; /* SYSMMU controller device */
216 struct device
*master
; /* master device (owner) */
217 void __iomem
*sfrbase
; /* our registers */
218 struct clk
*clk
; /* SYSMMU's clock */
219 struct clk
*clk_master
; /* master's device clock */
220 int activations
; /* number of calls to sysmmu_enable */
221 spinlock_t lock
; /* lock for modyfying state */
222 struct exynos_iommu_domain
*domain
; /* domain we belong to */
223 struct list_head domain_node
; /* node for domain clients list */
224 struct list_head owner_node
; /* node for owner controllers list */
225 phys_addr_t pgtable
; /* assigned page table structure */
226 unsigned int version
; /* our version */
229 static struct exynos_iommu_domain
*to_exynos_domain(struct iommu_domain
*dom
)
231 return container_of(dom
, struct exynos_iommu_domain
, domain
);
234 static bool set_sysmmu_active(struct sysmmu_drvdata
*data
)
236 /* return true if the System MMU was not active previously
237 and it needs to be initialized */
238 return ++data
->activations
== 1;
241 static bool set_sysmmu_inactive(struct sysmmu_drvdata
*data
)
243 /* return true if the System MMU is needed to be disabled */
244 BUG_ON(data
->activations
< 1);
245 return --data
->activations
== 0;
248 static bool is_sysmmu_active(struct sysmmu_drvdata
*data
)
250 return data
->activations
> 0;
253 static void sysmmu_unblock(void __iomem
*sfrbase
)
255 __raw_writel(CTRL_ENABLE
, sfrbase
+ REG_MMU_CTRL
);
258 static bool sysmmu_block(void __iomem
*sfrbase
)
262 __raw_writel(CTRL_BLOCK
, sfrbase
+ REG_MMU_CTRL
);
263 while ((i
> 0) && !(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1))
266 if (!(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1)) {
267 sysmmu_unblock(sfrbase
);
274 static void __sysmmu_tlb_invalidate(void __iomem
*sfrbase
)
276 __raw_writel(0x1, sfrbase
+ REG_MMU_FLUSH
);
279 static void __sysmmu_tlb_invalidate_entry(void __iomem
*sfrbase
,
280 sysmmu_iova_t iova
, unsigned int num_inv
)
284 for (i
= 0; i
< num_inv
; i
++) {
285 __raw_writel((iova
& SPAGE_MASK
) | 1,
286 sfrbase
+ REG_MMU_FLUSH_ENTRY
);
291 static void __sysmmu_set_ptbase(void __iomem
*sfrbase
,
294 __raw_writel(pgd
, sfrbase
+ REG_PT_BASE_ADDR
);
296 __sysmmu_tlb_invalidate(sfrbase
);
299 static void show_fault_information(const char *name
,
300 enum exynos_sysmmu_inttype itype
,
301 phys_addr_t pgtable_base
, sysmmu_iova_t fault_addr
)
305 if ((itype
>= SYSMMU_FAULTS_NUM
) || (itype
< SYSMMU_PAGEFAULT
))
306 itype
= SYSMMU_FAULT_UNKNOWN
;
308 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
309 sysmmu_fault_name
[itype
], fault_addr
, name
, &pgtable_base
);
311 ent
= section_entry(phys_to_virt(pgtable_base
), fault_addr
);
312 pr_err("\tLv1 entry: %#x\n", *ent
);
314 if (lv1ent_page(ent
)) {
315 ent
= page_entry(ent
, fault_addr
);
316 pr_err("\t Lv2 entry: %#x\n", *ent
);
320 static irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
)
322 /* SYSMMU is in blocked state when interrupt occurred. */
323 struct sysmmu_drvdata
*data
= dev_id
;
324 enum exynos_sysmmu_inttype itype
;
325 sysmmu_iova_t addr
= -1;
328 WARN_ON(!is_sysmmu_active(data
));
330 spin_lock(&data
->lock
);
332 if (!IS_ERR(data
->clk_master
))
333 clk_enable(data
->clk_master
);
335 itype
= (enum exynos_sysmmu_inttype
)
336 __ffs(__raw_readl(data
->sfrbase
+ REG_INT_STATUS
));
337 if (WARN_ON(!((itype
>= 0) && (itype
< SYSMMU_FAULT_UNKNOWN
))))
338 itype
= SYSMMU_FAULT_UNKNOWN
;
340 addr
= __raw_readl(data
->sfrbase
+ fault_reg_offset
[itype
]);
342 if (itype
== SYSMMU_FAULT_UNKNOWN
) {
343 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
344 __func__
, dev_name(data
->sysmmu
));
345 pr_err("%s: Please check if IRQ is correctly configured.\n",
350 __raw_readl(data
->sfrbase
+ REG_PT_BASE_ADDR
);
351 show_fault_information(dev_name(data
->sysmmu
),
354 ret
= report_iommu_fault(&data
->domain
->domain
,
355 data
->master
, addr
, itype
);
358 /* fault is not recovered by fault handler */
361 __raw_writel(1 << itype
, data
->sfrbase
+ REG_INT_CLEAR
);
363 sysmmu_unblock(data
->sfrbase
);
365 if (!IS_ERR(data
->clk_master
))
366 clk_disable(data
->clk_master
);
368 spin_unlock(&data
->lock
);
373 static void __sysmmu_disable_nocount(struct sysmmu_drvdata
*data
)
375 if (!IS_ERR(data
->clk_master
))
376 clk_enable(data
->clk_master
);
378 __raw_writel(CTRL_DISABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
379 __raw_writel(0, data
->sfrbase
+ REG_MMU_CFG
);
381 clk_disable(data
->clk
);
382 if (!IS_ERR(data
->clk_master
))
383 clk_disable(data
->clk_master
);
386 static bool __sysmmu_disable(struct sysmmu_drvdata
*data
)
391 spin_lock_irqsave(&data
->lock
, flags
);
393 disabled
= set_sysmmu_inactive(data
);
399 __sysmmu_disable_nocount(data
);
401 dev_dbg(data
->sysmmu
, "Disabled\n");
403 dev_dbg(data
->sysmmu
, "%d times left to disable\n",
407 spin_unlock_irqrestore(&data
->lock
, flags
);
412 static void __sysmmu_init_config(struct sysmmu_drvdata
*data
)
414 unsigned int cfg
= CFG_LRU
| CFG_QOS(15);
417 ver
= MMU_RAW_VER(__raw_readl(data
->sfrbase
+ REG_MMU_VERSION
));
418 if (MMU_MAJ_VER(ver
) == 3) {
419 if (MMU_MIN_VER(ver
) >= 2) {
420 cfg
|= CFG_FLPDCACHE
;
421 if (MMU_MIN_VER(ver
) == 3) {
430 __raw_writel(cfg
, data
->sfrbase
+ REG_MMU_CFG
);
434 static void __sysmmu_enable_nocount(struct sysmmu_drvdata
*data
)
436 if (!IS_ERR(data
->clk_master
))
437 clk_enable(data
->clk_master
);
438 clk_enable(data
->clk
);
440 __raw_writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
442 __sysmmu_init_config(data
);
444 __sysmmu_set_ptbase(data
->sfrbase
, data
->pgtable
);
446 __raw_writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
448 if (!IS_ERR(data
->clk_master
))
449 clk_disable(data
->clk_master
);
452 static int __sysmmu_enable(struct sysmmu_drvdata
*data
, phys_addr_t pgtable
,
453 struct exynos_iommu_domain
*domain
)
458 spin_lock_irqsave(&data
->lock
, flags
);
459 if (set_sysmmu_active(data
)) {
460 data
->pgtable
= pgtable
;
461 data
->domain
= domain
;
463 __sysmmu_enable_nocount(data
);
465 dev_dbg(data
->sysmmu
, "Enabled\n");
467 ret
= (pgtable
== data
->pgtable
) ? 1 : -EBUSY
;
469 dev_dbg(data
->sysmmu
, "already enabled\n");
472 if (WARN_ON(ret
< 0))
473 set_sysmmu_inactive(data
); /* decrement count */
475 spin_unlock_irqrestore(&data
->lock
, flags
);
480 static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata
*data
,
483 if (data
->version
== MAKE_MMU_VER(3, 3))
484 __raw_writel(iova
| 0x1, data
->sfrbase
+ REG_MMU_FLUSH_ENTRY
);
487 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata
*data
,
492 if (!IS_ERR(data
->clk_master
))
493 clk_enable(data
->clk_master
);
495 spin_lock_irqsave(&data
->lock
, flags
);
496 if (is_sysmmu_active(data
))
497 __sysmmu_tlb_invalidate_flpdcache(data
, iova
);
498 spin_unlock_irqrestore(&data
->lock
, flags
);
500 if (!IS_ERR(data
->clk_master
))
501 clk_disable(data
->clk_master
);
504 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata
*data
,
505 sysmmu_iova_t iova
, size_t size
)
509 spin_lock_irqsave(&data
->lock
, flags
);
510 if (is_sysmmu_active(data
)) {
511 unsigned int num_inv
= 1;
513 if (!IS_ERR(data
->clk_master
))
514 clk_enable(data
->clk_master
);
517 * L2TLB invalidation required
518 * 4KB page: 1 invalidation
519 * 64KB page: 16 invalidations
520 * 1MB page: 64 invalidations
521 * because it is set-associative TLB
522 * with 8-way and 64 sets.
523 * 1MB page can be cached in one of all sets.
524 * 64KB page can be one of 16 consecutive sets.
526 if (MMU_MAJ_VER(data
->version
) == 2)
527 num_inv
= min_t(unsigned int, size
/ PAGE_SIZE
, 64);
529 if (sysmmu_block(data
->sfrbase
)) {
530 __sysmmu_tlb_invalidate_entry(
531 data
->sfrbase
, iova
, num_inv
);
532 sysmmu_unblock(data
->sfrbase
);
534 if (!IS_ERR(data
->clk_master
))
535 clk_disable(data
->clk_master
);
537 dev_dbg(data
->master
,
538 "disabled. Skipping TLB invalidation @ %#x\n", iova
);
540 spin_unlock_irqrestore(&data
->lock
, flags
);
543 static int __init
exynos_sysmmu_probe(struct platform_device
*pdev
)
546 struct device
*dev
= &pdev
->dev
;
547 struct sysmmu_drvdata
*data
;
548 struct resource
*res
;
550 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
554 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
555 data
->sfrbase
= devm_ioremap_resource(dev
, res
);
556 if (IS_ERR(data
->sfrbase
))
557 return PTR_ERR(data
->sfrbase
);
559 irq
= platform_get_irq(pdev
, 0);
561 dev_err(dev
, "Unable to find IRQ resource\n");
565 ret
= devm_request_irq(dev
, irq
, exynos_sysmmu_irq
, 0,
566 dev_name(dev
), data
);
568 dev_err(dev
, "Unabled to register handler of irq %d\n", irq
);
572 data
->clk
= devm_clk_get(dev
, "sysmmu");
573 if (IS_ERR(data
->clk
)) {
574 dev_err(dev
, "Failed to get clock!\n");
575 return PTR_ERR(data
->clk
);
577 ret
= clk_prepare(data
->clk
);
579 dev_err(dev
, "Failed to prepare clk\n");
584 data
->clk_master
= devm_clk_get(dev
, "master");
585 if (!IS_ERR(data
->clk_master
)) {
586 ret
= clk_prepare(data
->clk_master
);
588 clk_unprepare(data
->clk
);
589 dev_err(dev
, "Failed to prepare master's clk\n");
595 spin_lock_init(&data
->lock
);
597 platform_set_drvdata(pdev
, data
);
599 pm_runtime_enable(dev
);
604 static const struct of_device_id sysmmu_of_match
[] __initconst
= {
605 { .compatible
= "samsung,exynos-sysmmu", },
609 static struct platform_driver exynos_sysmmu_driver __refdata
= {
610 .probe
= exynos_sysmmu_probe
,
612 .name
= "exynos-sysmmu",
613 .of_match_table
= sysmmu_of_match
,
617 static inline void pgtable_flush(void *vastart
, void *vaend
)
619 dmac_flush_range(vastart
, vaend
);
620 outer_flush_range(virt_to_phys(vastart
),
621 virt_to_phys(vaend
));
624 static struct iommu_domain
*exynos_iommu_domain_alloc(unsigned type
)
626 struct exynos_iommu_domain
*domain
;
629 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
632 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
636 domain
->pgtable
= (sysmmu_pte_t
*)__get_free_pages(GFP_KERNEL
, 2);
637 if (!domain
->pgtable
)
640 domain
->lv2entcnt
= (short *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
641 if (!domain
->lv2entcnt
)
644 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
645 for (i
= 0; i
< NUM_LV1ENTRIES
; i
+= 8) {
646 domain
->pgtable
[i
+ 0] = ZERO_LV2LINK
;
647 domain
->pgtable
[i
+ 1] = ZERO_LV2LINK
;
648 domain
->pgtable
[i
+ 2] = ZERO_LV2LINK
;
649 domain
->pgtable
[i
+ 3] = ZERO_LV2LINK
;
650 domain
->pgtable
[i
+ 4] = ZERO_LV2LINK
;
651 domain
->pgtable
[i
+ 5] = ZERO_LV2LINK
;
652 domain
->pgtable
[i
+ 6] = ZERO_LV2LINK
;
653 domain
->pgtable
[i
+ 7] = ZERO_LV2LINK
;
656 pgtable_flush(domain
->pgtable
, domain
->pgtable
+ NUM_LV1ENTRIES
);
658 spin_lock_init(&domain
->lock
);
659 spin_lock_init(&domain
->pgtablelock
);
660 INIT_LIST_HEAD(&domain
->clients
);
662 domain
->domain
.geometry
.aperture_start
= 0;
663 domain
->domain
.geometry
.aperture_end
= ~0UL;
664 domain
->domain
.geometry
.force_aperture
= true;
666 return &domain
->domain
;
669 free_pages((unsigned long)domain
->pgtable
, 2);
675 static void exynos_iommu_domain_free(struct iommu_domain
*iommu_domain
)
677 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
678 struct sysmmu_drvdata
*data
, *next
;
682 WARN_ON(!list_empty(&domain
->clients
));
684 spin_lock_irqsave(&domain
->lock
, flags
);
686 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
687 if (__sysmmu_disable(data
))
689 list_del_init(&data
->domain_node
);
692 spin_unlock_irqrestore(&domain
->lock
, flags
);
694 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
695 if (lv1ent_page(domain
->pgtable
+ i
))
696 kmem_cache_free(lv2table_kmem_cache
,
697 phys_to_virt(lv2table_base(domain
->pgtable
+ i
)));
699 free_pages((unsigned long)domain
->pgtable
, 2);
700 free_pages((unsigned long)domain
->lv2entcnt
, 1);
704 static int exynos_iommu_attach_device(struct iommu_domain
*iommu_domain
,
707 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
708 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
709 struct sysmmu_drvdata
*data
;
710 phys_addr_t pagetable
= virt_to_phys(domain
->pgtable
);
714 if (!has_sysmmu(dev
))
717 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
718 pm_runtime_get_sync(data
->sysmmu
);
719 ret
= __sysmmu_enable(data
, pagetable
, domain
);
723 spin_lock_irqsave(&domain
->lock
, flags
);
724 list_add_tail(&data
->domain_node
, &domain
->clients
);
725 spin_unlock_irqrestore(&domain
->lock
, flags
);
730 dev_err(dev
, "%s: Failed to attach IOMMU with pgtable %pa\n",
731 __func__
, &pagetable
);
735 dev_dbg(dev
, "%s: Attached IOMMU with pgtable %pa %s\n",
736 __func__
, &pagetable
, (ret
== 0) ? "" : ", again");
741 static void exynos_iommu_detach_device(struct iommu_domain
*iommu_domain
,
744 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
745 phys_addr_t pagetable
= virt_to_phys(domain
->pgtable
);
746 struct sysmmu_drvdata
*data
, *next
;
750 if (!has_sysmmu(dev
))
753 spin_lock_irqsave(&domain
->lock
, flags
);
754 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
755 if (data
->master
== dev
) {
756 if (__sysmmu_disable(data
)) {
758 list_del_init(&data
->domain_node
);
760 pm_runtime_put(data
->sysmmu
);
764 spin_unlock_irqrestore(&domain
->lock
, flags
);
767 dev_dbg(dev
, "%s: Detached IOMMU with pgtable %pa\n",
768 __func__
, &pagetable
);
770 dev_err(dev
, "%s: No IOMMU is attached\n", __func__
);
773 static sysmmu_pte_t
*alloc_lv2entry(struct exynos_iommu_domain
*domain
,
774 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
, short *pgcounter
)
776 if (lv1ent_section(sent
)) {
777 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova
);
778 return ERR_PTR(-EADDRINUSE
);
781 if (lv1ent_fault(sent
)) {
783 bool need_flush_flpd_cache
= lv1ent_zero(sent
);
785 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
786 BUG_ON((unsigned int)pent
& (LV2TABLE_SIZE
- 1));
788 return ERR_PTR(-ENOMEM
);
790 *sent
= mk_lv1ent_page(virt_to_phys(pent
));
791 kmemleak_ignore(pent
);
792 *pgcounter
= NUM_LV2ENTRIES
;
793 pgtable_flush(pent
, pent
+ NUM_LV2ENTRIES
);
794 pgtable_flush(sent
, sent
+ 1);
797 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
798 * FLPD cache may cache the address of zero_l2_table. This
799 * function replaces the zero_l2_table with new L2 page table
800 * to write valid mappings.
801 * Accessing the valid area may cause page fault since FLPD
802 * cache may still cache zero_l2_table for the valid area
803 * instead of new L2 page table that has the mapping
804 * information of the valid area.
805 * Thus any replacement of zero_l2_table with other valid L2
806 * page table must involve FLPD cache invalidation for System
808 * FLPD cache invalidation is performed with TLB invalidation
809 * by VPN without blocking. It is safe to invalidate TLB without
810 * blocking because the target address of TLB invalidation is
811 * not currently mapped.
813 if (need_flush_flpd_cache
) {
814 struct sysmmu_drvdata
*data
;
816 spin_lock(&domain
->lock
);
817 list_for_each_entry(data
, &domain
->clients
, domain_node
)
818 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
819 spin_unlock(&domain
->lock
);
823 return page_entry(sent
, iova
);
826 static int lv1set_section(struct exynos_iommu_domain
*domain
,
827 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
,
828 phys_addr_t paddr
, short *pgcnt
)
830 if (lv1ent_section(sent
)) {
831 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
836 if (lv1ent_page(sent
)) {
837 if (*pgcnt
!= NUM_LV2ENTRIES
) {
838 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
843 kmem_cache_free(lv2table_kmem_cache
, page_entry(sent
, 0));
847 *sent
= mk_lv1ent_sect(paddr
);
849 pgtable_flush(sent
, sent
+ 1);
851 spin_lock(&domain
->lock
);
852 if (lv1ent_page_zero(sent
)) {
853 struct sysmmu_drvdata
*data
;
855 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
856 * entry by speculative prefetch of SLPD which has no mapping.
858 list_for_each_entry(data
, &domain
->clients
, domain_node
)
859 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
861 spin_unlock(&domain
->lock
);
866 static int lv2set_page(sysmmu_pte_t
*pent
, phys_addr_t paddr
, size_t size
,
869 if (size
== SPAGE_SIZE
) {
870 if (WARN_ON(!lv2ent_fault(pent
)))
873 *pent
= mk_lv2ent_spage(paddr
);
874 pgtable_flush(pent
, pent
+ 1);
876 } else { /* size == LPAGE_SIZE */
879 for (i
= 0; i
< SPAGES_PER_LPAGE
; i
++, pent
++) {
880 if (WARN_ON(!lv2ent_fault(pent
))) {
882 memset(pent
- i
, 0, sizeof(*pent
) * i
);
886 *pent
= mk_lv2ent_lpage(paddr
);
888 pgtable_flush(pent
- SPAGES_PER_LPAGE
, pent
);
889 *pgcnt
-= SPAGES_PER_LPAGE
;
896 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
898 * System MMU v3.x has advanced logic to improve address translation
899 * performance with caching more page table entries by a page table walk.
900 * However, the logic has a bug that while caching faulty page table entries,
901 * System MMU reports page fault if the cached fault entry is hit even though
902 * the fault entry is updated to a valid entry after the entry is cached.
903 * To prevent caching faulty page table entries which may be updated to valid
904 * entries later, the virtual memory manager should care about the workaround
905 * for the problem. The following describes the workaround.
907 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
908 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
910 * Precisely, any start address of I/O virtual region must be aligned with
911 * the following sizes for System MMU v3.1 and v3.2.
912 * System MMU v3.1: 128KiB
913 * System MMU v3.2: 256KiB
915 * Because System MMU v3.3 caches page table entries more aggressively, it needs
917 * - Any two consecutive I/O virtual regions must have a hole of size larger
918 * than or equal to 128KiB.
919 * - Start address of an I/O virtual region must be aligned by 128KiB.
921 static int exynos_iommu_map(struct iommu_domain
*iommu_domain
,
922 unsigned long l_iova
, phys_addr_t paddr
, size_t size
,
925 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
927 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
931 BUG_ON(domain
->pgtable
== NULL
);
933 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
935 entry
= section_entry(domain
->pgtable
, iova
);
937 if (size
== SECT_SIZE
) {
938 ret
= lv1set_section(domain
, entry
, iova
, paddr
,
939 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
943 pent
= alloc_lv2entry(domain
, entry
, iova
,
944 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
949 ret
= lv2set_page(pent
, paddr
, size
,
950 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
954 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
955 __func__
, ret
, size
, iova
);
957 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
962 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain
*domain
,
963 sysmmu_iova_t iova
, size_t size
)
965 struct sysmmu_drvdata
*data
;
968 spin_lock_irqsave(&domain
->lock
, flags
);
970 list_for_each_entry(data
, &domain
->clients
, domain_node
)
971 sysmmu_tlb_invalidate_entry(data
, iova
, size
);
973 spin_unlock_irqrestore(&domain
->lock
, flags
);
976 static size_t exynos_iommu_unmap(struct iommu_domain
*iommu_domain
,
977 unsigned long l_iova
, size_t size
)
979 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
980 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
985 BUG_ON(domain
->pgtable
== NULL
);
987 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
989 ent
= section_entry(domain
->pgtable
, iova
);
991 if (lv1ent_section(ent
)) {
992 if (WARN_ON(size
< SECT_SIZE
)) {
993 err_pgsize
= SECT_SIZE
;
997 /* workaround for h/w bug in System MMU v3.3 */
999 pgtable_flush(ent
, ent
+ 1);
1004 if (unlikely(lv1ent_fault(ent
))) {
1005 if (size
> SECT_SIZE
)
1010 /* lv1ent_page(sent) == true here */
1012 ent
= page_entry(ent
, iova
);
1014 if (unlikely(lv2ent_fault(ent
))) {
1019 if (lv2ent_small(ent
)) {
1022 pgtable_flush(ent
, ent
+ 1);
1023 domain
->lv2entcnt
[lv1ent_offset(iova
)] += 1;
1027 /* lv1ent_large(ent) == true here */
1028 if (WARN_ON(size
< LPAGE_SIZE
)) {
1029 err_pgsize
= LPAGE_SIZE
;
1033 memset(ent
, 0, sizeof(*ent
) * SPAGES_PER_LPAGE
);
1034 pgtable_flush(ent
, ent
+ SPAGES_PER_LPAGE
);
1037 domain
->lv2entcnt
[lv1ent_offset(iova
)] += SPAGES_PER_LPAGE
;
1039 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1041 exynos_iommu_tlb_invalidate_entry(domain
, iova
, size
);
1045 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1047 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1048 __func__
, size
, iova
, err_pgsize
);
1053 static phys_addr_t
exynos_iommu_iova_to_phys(struct iommu_domain
*iommu_domain
,
1056 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1057 sysmmu_pte_t
*entry
;
1058 unsigned long flags
;
1059 phys_addr_t phys
= 0;
1061 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1063 entry
= section_entry(domain
->pgtable
, iova
);
1065 if (lv1ent_section(entry
)) {
1066 phys
= section_phys(entry
) + section_offs(iova
);
1067 } else if (lv1ent_page(entry
)) {
1068 entry
= page_entry(entry
, iova
);
1070 if (lv2ent_large(entry
))
1071 phys
= lpage_phys(entry
) + lpage_offs(iova
);
1072 else if (lv2ent_small(entry
))
1073 phys
= spage_phys(entry
) + spage_offs(iova
);
1076 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1081 static int exynos_iommu_add_device(struct device
*dev
)
1083 struct iommu_group
*group
;
1086 if (!has_sysmmu(dev
))
1089 group
= iommu_group_get(dev
);
1092 group
= iommu_group_alloc();
1093 if (IS_ERR(group
)) {
1094 dev_err(dev
, "Failed to allocate IOMMU group\n");
1095 return PTR_ERR(group
);
1099 ret
= iommu_group_add_device(group
, dev
);
1100 iommu_group_put(group
);
1105 static void exynos_iommu_remove_device(struct device
*dev
)
1107 if (!has_sysmmu(dev
))
1110 iommu_group_remove_device(dev
);
1113 static const struct iommu_ops exynos_iommu_ops
= {
1114 .domain_alloc
= exynos_iommu_domain_alloc
,
1115 .domain_free
= exynos_iommu_domain_free
,
1116 .attach_dev
= exynos_iommu_attach_device
,
1117 .detach_dev
= exynos_iommu_detach_device
,
1118 .map
= exynos_iommu_map
,
1119 .unmap
= exynos_iommu_unmap
,
1120 .map_sg
= default_iommu_map_sg
,
1121 .iova_to_phys
= exynos_iommu_iova_to_phys
,
1122 .add_device
= exynos_iommu_add_device
,
1123 .remove_device
= exynos_iommu_remove_device
,
1124 .pgsize_bitmap
= SECT_SIZE
| LPAGE_SIZE
| SPAGE_SIZE
,
1127 static int __init
exynos_iommu_init(void)
1129 struct device_node
*np
;
1132 np
= of_find_matching_node(NULL
, sysmmu_of_match
);
1138 lv2table_kmem_cache
= kmem_cache_create("exynos-iommu-lv2table",
1139 LV2TABLE_SIZE
, LV2TABLE_SIZE
, 0, NULL
);
1140 if (!lv2table_kmem_cache
) {
1141 pr_err("%s: Failed to create kmem cache\n", __func__
);
1145 ret
= platform_driver_register(&exynos_sysmmu_driver
);
1147 pr_err("%s: Failed to register driver\n", __func__
);
1148 goto err_reg_driver
;
1151 zero_lv2_table
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_KERNEL
);
1152 if (zero_lv2_table
== NULL
) {
1153 pr_err("%s: Failed to allocate zero level2 page table\n",
1159 ret
= bus_set_iommu(&platform_bus_type
, &exynos_iommu_ops
);
1161 pr_err("%s: Failed to register exynos-iommu driver.\n",
1168 kmem_cache_free(lv2table_kmem_cache
, zero_lv2_table
);
1170 platform_driver_unregister(&exynos_sysmmu_driver
);
1172 kmem_cache_destroy(lv2table_kmem_cache
);
1175 subsys_initcall(exynos_iommu_init
);