iommu/exynos: Refactor function parameters to simplify code
[deliverable/linux.git] / drivers / iommu / exynos-iommu.c
CommitLineData
2a96536e
KC
1/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
d09d78fc
CK
32typedef u32 sysmmu_iova_t;
33typedef u32 sysmmu_pte_t;
34
f171abab 35/* We do not consider super section mapping (16MB) */
2a96536e
KC
36#define SECT_ORDER 20
37#define LPAGE_ORDER 16
38#define SPAGE_ORDER 12
39
40#define SECT_SIZE (1 << SECT_ORDER)
41#define LPAGE_SIZE (1 << LPAGE_ORDER)
42#define SPAGE_SIZE (1 << SPAGE_ORDER)
43
44#define SECT_MASK (~(SECT_SIZE - 1))
45#define LPAGE_MASK (~(LPAGE_SIZE - 1))
46#define SPAGE_MASK (~(SPAGE_SIZE - 1))
47
66a7ed84
CK
48#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
53 ((*(sent) & 3) == 1))
2a96536e
KC
54#define lv1ent_section(sent) ((*(sent) & 3) == 2)
55
56#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57#define lv2ent_small(pent) ((*(pent) & 2) == 2)
58#define lv2ent_large(pent) ((*(pent) & 3) == 1)
59
d09d78fc
CK
60static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
61{
62 return iova & (size - 1);
63}
64
2a96536e 65#define section_phys(sent) (*(sent) & SECT_MASK)
d09d78fc 66#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
2a96536e 67#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
d09d78fc 68#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
2a96536e 69#define spage_phys(pent) (*(pent) & SPAGE_MASK)
d09d78fc 70#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
2a96536e
KC
71
72#define NUM_LV1ENTRIES 4096
d09d78fc 73#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2a96536e 74
d09d78fc
CK
75static u32 lv1ent_offset(sysmmu_iova_t iova)
76{
77 return iova >> SECT_ORDER;
78}
79
80static u32 lv2ent_offset(sysmmu_iova_t iova)
81{
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
83}
84
85#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2a96536e
KC
86
87#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
88
89#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
90
91#define mk_lv1ent_sect(pa) ((pa) | 2)
92#define mk_lv1ent_page(pa) ((pa) | 1)
93#define mk_lv2ent_lpage(pa) ((pa) | 1)
94#define mk_lv2ent_spage(pa) ((pa) | 2)
95
96#define CTRL_ENABLE 0x5
97#define CTRL_BLOCK 0x7
98#define CTRL_DISABLE 0x0
99
eeb5184b
CK
100#define CFG_LRU 0x1
101#define CFG_QOS(n) ((n & 0xF) << 7)
102#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
104#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
105#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
106
2a96536e
KC
107#define REG_MMU_CTRL 0x000
108#define REG_MMU_CFG 0x004
109#define REG_MMU_STATUS 0x008
110#define REG_MMU_FLUSH 0x00C
111#define REG_MMU_FLUSH_ENTRY 0x010
112#define REG_PT_BASE_ADDR 0x014
113#define REG_INT_STATUS 0x018
114#define REG_INT_CLEAR 0x01C
115
116#define REG_PAGE_FAULT_ADDR 0x024
117#define REG_AW_FAULT_ADDR 0x028
118#define REG_AR_FAULT_ADDR 0x02C
119#define REG_DEFAULT_SLAVE_ADDR 0x030
120
121#define REG_MMU_VERSION 0x034
122
eeb5184b
CK
123#define MMU_MAJ_VER(val) ((val) >> 7)
124#define MMU_MIN_VER(val) ((val) & 0x7F)
125#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
126
127#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
128
2a96536e
KC
129#define REG_PB0_SADDR 0x04C
130#define REG_PB0_EADDR 0x050
131#define REG_PB1_SADDR 0x054
132#define REG_PB1_EADDR 0x058
133
6b21a5db
CK
134#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
135
734c3c73 136static struct kmem_cache *lv2table_kmem_cache;
66a7ed84
CK
137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
734c3c73 139
d09d78fc 140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
2a96536e
KC
141{
142 return pgtable + lv1ent_offset(iova);
143}
144
d09d78fc 145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
2a96536e 146{
d09d78fc 147 return (sysmmu_pte_t *)phys_to_virt(
7222e8db 148 lv2table_base(sent)) + lv2ent_offset(iova);
2a96536e
KC
149}
150
151enum exynos_sysmmu_inttype {
152 SYSMMU_PAGEFAULT,
153 SYSMMU_AR_MULTIHIT,
154 SYSMMU_AW_MULTIHIT,
155 SYSMMU_BUSERROR,
156 SYSMMU_AR_SECURITY,
157 SYSMMU_AR_ACCESS,
158 SYSMMU_AW_SECURITY,
159 SYSMMU_AW_PROTECTION, /* 7 */
160 SYSMMU_FAULT_UNKNOWN,
161 SYSMMU_FAULTS_NUM
162};
163
2a96536e
KC
164static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 REG_PAGE_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_DEFAULT_SLAVE_ADDR,
169 REG_AR_FAULT_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AW_FAULT_ADDR,
172 REG_AW_FAULT_ADDR
173};
174
175static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 "PAGE FAULT",
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
179 "BUS ERROR",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
184 "UNKNOWN FAULT"
185};
186
6b21a5db
CK
187/* attached to dev.archdata.iommu of the master device */
188struct exynos_iommu_owner {
6b21a5db 189 struct device *sysmmu;
6b21a5db
CK
190};
191
2a96536e
KC
192struct exynos_iommu_domain {
193 struct list_head clients; /* list of sysmmu_drvdata.node */
d09d78fc 194 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
2a96536e
KC
195 short *lv2entcnt; /* free lv2 entry counter for each section */
196 spinlock_t lock; /* lock for this structure */
197 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
e1fd1eaa 198 struct iommu_domain domain; /* generic domain data structure */
2a96536e
KC
199};
200
201struct sysmmu_drvdata {
2a96536e 202 struct device *sysmmu; /* System MMU's device descriptor */
6b21a5db 203 struct device *master; /* Owner of system MMU */
7222e8db
CK
204 void __iomem *sfrbase;
205 struct clk *clk;
70605870 206 struct clk *clk_master;
2a96536e 207 int activations;
9d4e7a24 208 spinlock_t lock;
2a96536e 209 struct iommu_domain *domain;
469acebe 210 struct list_head domain_node;
7222e8db 211 phys_addr_t pgtable;
512bd0c6 212 unsigned int version;
2a96536e
KC
213};
214
e1fd1eaa
JR
215static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
216{
217 return container_of(dom, struct exynos_iommu_domain, domain);
218}
219
2a96536e
KC
220static bool set_sysmmu_active(struct sysmmu_drvdata *data)
221{
222 /* return true if the System MMU was not active previously
223 and it needs to be initialized */
224 return ++data->activations == 1;
225}
226
227static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
228{
229 /* return true if the System MMU is needed to be disabled */
230 BUG_ON(data->activations < 1);
231 return --data->activations == 0;
232}
233
234static bool is_sysmmu_active(struct sysmmu_drvdata *data)
235{
236 return data->activations > 0;
237}
238
239static void sysmmu_unblock(void __iomem *sfrbase)
240{
241 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
242}
243
244static bool sysmmu_block(void __iomem *sfrbase)
245{
246 int i = 120;
247
248 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
249 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
250 --i;
251
252 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
253 sysmmu_unblock(sfrbase);
254 return false;
255 }
256
257 return true;
258}
259
260static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
261{
262 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
263}
264
265static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
d09d78fc 266 sysmmu_iova_t iova, unsigned int num_inv)
2a96536e 267{
3ad6b7f3 268 unsigned int i;
365409db 269
3ad6b7f3
CK
270 for (i = 0; i < num_inv; i++) {
271 __raw_writel((iova & SPAGE_MASK) | 1,
272 sfrbase + REG_MMU_FLUSH_ENTRY);
273 iova += SPAGE_SIZE;
274 }
2a96536e
KC
275}
276
277static void __sysmmu_set_ptbase(void __iomem *sfrbase,
d09d78fc 278 phys_addr_t pgd)
2a96536e 279{
2a96536e
KC
280 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
281
282 __sysmmu_tlb_invalidate(sfrbase);
283}
284
1fab7fa7
CK
285static void show_fault_information(const char *name,
286 enum exynos_sysmmu_inttype itype,
d09d78fc 287 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
2a96536e 288{
d09d78fc 289 sysmmu_pte_t *ent;
2a96536e
KC
290
291 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
292 itype = SYSMMU_FAULT_UNKNOWN;
293
d09d78fc 294 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
1fab7fa7 295 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
2a96536e 296
7222e8db 297 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
d09d78fc 298 pr_err("\tLv1 entry: %#x\n", *ent);
2a96536e
KC
299
300 if (lv1ent_page(ent)) {
301 ent = page_entry(ent, fault_addr);
d09d78fc 302 pr_err("\t Lv2 entry: %#x\n", *ent);
2a96536e 303 }
2a96536e
KC
304}
305
306static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
307{
f171abab 308 /* SYSMMU is in blocked state when interrupt occurred. */
2a96536e 309 struct sysmmu_drvdata *data = dev_id;
2a96536e 310 enum exynos_sysmmu_inttype itype;
d09d78fc 311 sysmmu_iova_t addr = -1;
7222e8db 312 int ret = -ENOSYS;
2a96536e 313
2a96536e
KC
314 WARN_ON(!is_sysmmu_active(data));
315
9d4e7a24
CK
316 spin_lock(&data->lock);
317
70605870
CK
318 if (!IS_ERR(data->clk_master))
319 clk_enable(data->clk_master);
9d4e7a24 320
7222e8db
CK
321 itype = (enum exynos_sysmmu_inttype)
322 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
323 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
2a96536e 324 itype = SYSMMU_FAULT_UNKNOWN;
7222e8db
CK
325 else
326 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
2a96536e 327
1fab7fa7
CK
328 if (itype == SYSMMU_FAULT_UNKNOWN) {
329 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
330 __func__, dev_name(data->sysmmu));
331 pr_err("%s: Please check if IRQ is correctly configured.\n",
332 __func__);
333 BUG();
334 } else {
d09d78fc 335 unsigned int base =
1fab7fa7
CK
336 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
337 show_fault_information(dev_name(data->sysmmu),
338 itype, base, addr);
339 if (data->domain)
340 ret = report_iommu_fault(data->domain,
6b21a5db 341 data->master, addr, itype);
2a96536e
KC
342 }
343
1fab7fa7
CK
344 /* fault is not recovered by fault handler */
345 BUG_ON(ret != 0);
2a96536e 346
1fab7fa7
CK
347 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
348
349 sysmmu_unblock(data->sfrbase);
2a96536e 350
70605870
CK
351 if (!IS_ERR(data->clk_master))
352 clk_disable(data->clk_master);
353
9d4e7a24 354 spin_unlock(&data->lock);
2a96536e
KC
355
356 return IRQ_HANDLED;
357}
358
6b21a5db 359static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
2a96536e 360{
70605870
CK
361 if (!IS_ERR(data->clk_master))
362 clk_enable(data->clk_master);
363
7222e8db 364 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
6b21a5db 365 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
2a96536e 366
46c16d1e 367 clk_disable(data->clk);
70605870
CK
368 if (!IS_ERR(data->clk_master))
369 clk_disable(data->clk_master);
2a96536e
KC
370}
371
6b21a5db 372static bool __sysmmu_disable(struct sysmmu_drvdata *data)
2a96536e 373{
6b21a5db 374 bool disabled;
2a96536e
KC
375 unsigned long flags;
376
9d4e7a24 377 spin_lock_irqsave(&data->lock, flags);
2a96536e 378
6b21a5db
CK
379 disabled = set_sysmmu_inactive(data);
380
381 if (disabled) {
382 data->pgtable = 0;
383 data->domain = NULL;
384
385 __sysmmu_disable_nocount(data);
2a96536e 386
6b21a5db
CK
387 dev_dbg(data->sysmmu, "Disabled\n");
388 } else {
389 dev_dbg(data->sysmmu, "%d times left to disable\n",
390 data->activations);
2a96536e
KC
391 }
392
6b21a5db
CK
393 spin_unlock_irqrestore(&data->lock, flags);
394
395 return disabled;
396}
2a96536e 397
6b21a5db
CK
398static void __sysmmu_init_config(struct sysmmu_drvdata *data)
399{
eeb5184b
CK
400 unsigned int cfg = CFG_LRU | CFG_QOS(15);
401 unsigned int ver;
402
512bd0c6 403 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
eeb5184b
CK
404 if (MMU_MAJ_VER(ver) == 3) {
405 if (MMU_MIN_VER(ver) >= 2) {
406 cfg |= CFG_FLPDCACHE;
407 if (MMU_MIN_VER(ver) == 3) {
408 cfg |= CFG_ACGEN;
409 cfg &= ~CFG_LRU;
410 } else {
411 cfg |= CFG_SYSSEL;
412 }
413 }
414 }
6b21a5db
CK
415
416 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
512bd0c6 417 data->version = ver;
6b21a5db
CK
418}
419
420static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
421{
70605870
CK
422 if (!IS_ERR(data->clk_master))
423 clk_enable(data->clk_master);
424 clk_enable(data->clk);
425
6b21a5db
CK
426 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
427
428 __sysmmu_init_config(data);
429
430 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
2a96536e 431
7222e8db
CK
432 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
433
70605870
CK
434 if (!IS_ERR(data->clk_master))
435 clk_disable(data->clk_master);
6b21a5db 436}
70605870 437
6b21a5db
CK
438static int __sysmmu_enable(struct sysmmu_drvdata *data,
439 phys_addr_t pgtable, struct iommu_domain *domain)
440{
441 int ret = 0;
442 unsigned long flags;
443
444 spin_lock_irqsave(&data->lock, flags);
445 if (set_sysmmu_active(data)) {
446 data->pgtable = pgtable;
447 data->domain = domain;
448
449 __sysmmu_enable_nocount(data);
450
451 dev_dbg(data->sysmmu, "Enabled\n");
452 } else {
453 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
454
455 dev_dbg(data->sysmmu, "already enabled\n");
456 }
457
458 if (WARN_ON(ret < 0))
459 set_sysmmu_inactive(data); /* decrement count */
2a96536e 460
9d4e7a24 461 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
462
463 return ret;
464}
465
66a7ed84
CK
466static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
467 sysmmu_iova_t iova)
468{
512bd0c6 469 if (data->version == MAKE_MMU_VER(3, 3))
66a7ed84
CK
470 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
471}
472
469acebe 473static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
66a7ed84
CK
474 sysmmu_iova_t iova)
475{
476 unsigned long flags;
66a7ed84
CK
477
478 if (!IS_ERR(data->clk_master))
479 clk_enable(data->clk_master);
480
481 spin_lock_irqsave(&data->lock, flags);
482 if (is_sysmmu_active(data))
483 __sysmmu_tlb_invalidate_flpdcache(data, iova);
484 spin_unlock_irqrestore(&data->lock, flags);
485
486 if (!IS_ERR(data->clk_master))
487 clk_disable(data->clk_master);
488}
489
469acebe
MS
490static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
491 sysmmu_iova_t iova, size_t size)
2a96536e
KC
492{
493 unsigned long flags;
2a96536e 494
6b21a5db 495 spin_lock_irqsave(&data->lock, flags);
2a96536e 496 if (is_sysmmu_active(data)) {
3ad6b7f3 497 unsigned int num_inv = 1;
70605870
CK
498
499 if (!IS_ERR(data->clk_master))
500 clk_enable(data->clk_master);
501
3ad6b7f3
CK
502 /*
503 * L2TLB invalidation required
504 * 4KB page: 1 invalidation
f171abab
SK
505 * 64KB page: 16 invalidations
506 * 1MB page: 64 invalidations
3ad6b7f3
CK
507 * because it is set-associative TLB
508 * with 8-way and 64 sets.
509 * 1MB page can be cached in one of all sets.
510 * 64KB page can be one of 16 consecutive sets.
511 */
512bd0c6 512 if (MMU_MAJ_VER(data->version) == 2)
3ad6b7f3
CK
513 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
514
7222e8db
CK
515 if (sysmmu_block(data->sfrbase)) {
516 __sysmmu_tlb_invalidate_entry(
3ad6b7f3 517 data->sfrbase, iova, num_inv);
7222e8db 518 sysmmu_unblock(data->sfrbase);
2a96536e 519 }
70605870
CK
520 if (!IS_ERR(data->clk_master))
521 clk_disable(data->clk_master);
2a96536e 522 } else {
469acebe
MS
523 dev_dbg(data->master,
524 "disabled. Skipping TLB invalidation @ %#x\n", iova);
2a96536e 525 }
9d4e7a24 526 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
527}
528
6b21a5db 529static int __init exynos_sysmmu_probe(struct platform_device *pdev)
2a96536e 530{
46c16d1e 531 int irq, ret;
7222e8db 532 struct device *dev = &pdev->dev;
2a96536e 533 struct sysmmu_drvdata *data;
7222e8db 534 struct resource *res;
2a96536e 535
46c16d1e
CK
536 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
537 if (!data)
538 return -ENOMEM;
2a96536e 539
7222e8db 540 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46c16d1e
CK
541 data->sfrbase = devm_ioremap_resource(dev, res);
542 if (IS_ERR(data->sfrbase))
543 return PTR_ERR(data->sfrbase);
2a96536e 544
46c16d1e
CK
545 irq = platform_get_irq(pdev, 0);
546 if (irq <= 0) {
0bf4e54d 547 dev_err(dev, "Unable to find IRQ resource\n");
46c16d1e 548 return irq;
2a96536e
KC
549 }
550
46c16d1e 551 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
7222e8db
CK
552 dev_name(dev), data);
553 if (ret) {
46c16d1e
CK
554 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
555 return ret;
2a96536e
KC
556 }
557
46c16d1e
CK
558 data->clk = devm_clk_get(dev, "sysmmu");
559 if (IS_ERR(data->clk)) {
560 dev_err(dev, "Failed to get clock!\n");
561 return PTR_ERR(data->clk);
562 } else {
563 ret = clk_prepare(data->clk);
564 if (ret) {
565 dev_err(dev, "Failed to prepare clk\n");
566 return ret;
567 }
2a96536e
KC
568 }
569
70605870
CK
570 data->clk_master = devm_clk_get(dev, "master");
571 if (!IS_ERR(data->clk_master)) {
572 ret = clk_prepare(data->clk_master);
573 if (ret) {
574 clk_unprepare(data->clk);
575 dev_err(dev, "Failed to prepare master's clk\n");
576 return ret;
577 }
578 }
579
2a96536e 580 data->sysmmu = dev;
9d4e7a24 581 spin_lock_init(&data->lock);
2a96536e 582
7222e8db
CK
583 platform_set_drvdata(pdev, data);
584
f4723ec1 585 pm_runtime_enable(dev);
2a96536e 586
2a96536e 587 return 0;
2a96536e
KC
588}
589
6b21a5db
CK
590static const struct of_device_id sysmmu_of_match[] __initconst = {
591 { .compatible = "samsung,exynos-sysmmu", },
592 { },
593};
594
595static struct platform_driver exynos_sysmmu_driver __refdata = {
596 .probe = exynos_sysmmu_probe,
597 .driver = {
2a96536e 598 .name = "exynos-sysmmu",
6b21a5db 599 .of_match_table = sysmmu_of_match,
2a96536e
KC
600 }
601};
602
603static inline void pgtable_flush(void *vastart, void *vaend)
604{
605 dmac_flush_range(vastart, vaend);
606 outer_flush_range(virt_to_phys(vastart),
607 virt_to_phys(vaend));
608}
609
e1fd1eaa 610static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
2a96536e 611{
e1fd1eaa 612 struct exynos_iommu_domain *exynos_domain;
66a7ed84 613 int i;
2a96536e 614
e1fd1eaa
JR
615 if (type != IOMMU_DOMAIN_UNMANAGED)
616 return NULL;
617
618 exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
619 if (!exynos_domain)
620 return NULL;
2a96536e 621
e1fd1eaa
JR
622 exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
623 if (!exynos_domain->pgtable)
2a96536e
KC
624 goto err_pgtable;
625
e1fd1eaa
JR
626 exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
627 if (!exynos_domain->lv2entcnt)
2a96536e
KC
628 goto err_counter;
629
f171abab 630 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
66a7ed84 631 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
e1fd1eaa
JR
632 exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
633 exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
634 exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
635 exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
636 exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
637 exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
638 exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
639 exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
66a7ed84
CK
640 }
641
e1fd1eaa 642 pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
2a96536e 643
e1fd1eaa
JR
644 spin_lock_init(&exynos_domain->lock);
645 spin_lock_init(&exynos_domain->pgtablelock);
646 INIT_LIST_HEAD(&exynos_domain->clients);
2a96536e 647
e1fd1eaa
JR
648 exynos_domain->domain.geometry.aperture_start = 0;
649 exynos_domain->domain.geometry.aperture_end = ~0UL;
650 exynos_domain->domain.geometry.force_aperture = true;
3177bb76 651
e1fd1eaa 652 return &exynos_domain->domain;
2a96536e
KC
653
654err_counter:
e1fd1eaa 655 free_pages((unsigned long)exynos_domain->pgtable, 2);
2a96536e 656err_pgtable:
e1fd1eaa
JR
657 kfree(exynos_domain);
658 return NULL;
2a96536e
KC
659}
660
e1fd1eaa 661static void exynos_iommu_domain_free(struct iommu_domain *domain)
2a96536e 662{
e1fd1eaa 663 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
469acebe 664 struct sysmmu_drvdata *data, *next;
2a96536e
KC
665 unsigned long flags;
666 int i;
667
668 WARN_ON(!list_empty(&priv->clients));
669
670 spin_lock_irqsave(&priv->lock, flags);
671
469acebe
MS
672 list_for_each_entry_safe(data, next, &priv->clients, domain_node) {
673 if (__sysmmu_disable(data))
674 data->master = NULL;
675 list_del_init(&data->domain_node);
2a96536e
KC
676 }
677
678 spin_unlock_irqrestore(&priv->lock, flags);
679
680 for (i = 0; i < NUM_LV1ENTRIES; i++)
681 if (lv1ent_page(priv->pgtable + i))
734c3c73
CK
682 kmem_cache_free(lv2table_kmem_cache,
683 phys_to_virt(lv2table_base(priv->pgtable + i)));
2a96536e
KC
684
685 free_pages((unsigned long)priv->pgtable, 2);
686 free_pages((unsigned long)priv->lv2entcnt, 1);
e1fd1eaa 687 kfree(priv);
2a96536e
KC
688}
689
690static int exynos_iommu_attach_device(struct iommu_domain *domain,
691 struct device *dev)
692{
6b21a5db 693 struct exynos_iommu_owner *owner = dev->archdata.iommu;
e1fd1eaa 694 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
469acebe 695 struct sysmmu_drvdata *data;
7222e8db 696 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
2a96536e 697 unsigned long flags;
469acebe 698 int ret = -ENODEV;
2a96536e 699
469acebe
MS
700 if (!has_sysmmu(dev))
701 return -ENODEV;
2a96536e 702
469acebe
MS
703 data = dev_get_drvdata(owner->sysmmu);
704 if (data) {
705 ret = __sysmmu_enable(data, pagetable, domain);
706 if (ret >= 0) {
707 data->master = dev;
708
709 spin_lock_irqsave(&priv->lock, flags);
710 list_add_tail(&data->domain_node, &priv->clients);
711 spin_unlock_irqrestore(&priv->lock, flags);
712 }
713 }
2a96536e
KC
714
715 if (ret < 0) {
7222e8db
CK
716 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
717 __func__, &pagetable);
7222e8db 718 return ret;
2a96536e
KC
719 }
720
7222e8db
CK
721 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
722 __func__, &pagetable, (ret == 0) ? "" : ", again");
723
2a96536e
KC
724 return ret;
725}
726
727static void exynos_iommu_detach_device(struct iommu_domain *domain,
728 struct device *dev)
729{
e1fd1eaa 730 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
7222e8db 731 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
469acebe 732 struct sysmmu_drvdata *data;
2a96536e 733 unsigned long flags;
469acebe 734 bool found = false;
2a96536e 735
469acebe
MS
736 if (!has_sysmmu(dev))
737 return;
2a96536e 738
469acebe
MS
739 spin_lock_irqsave(&priv->lock, flags);
740 list_for_each_entry(data, &priv->clients, domain_node) {
741 if (data->master == dev) {
742 if (__sysmmu_disable(data)) {
743 data->master = NULL;
744 list_del_init(&data->domain_node);
745 }
746 found = true;
2a96536e
KC
747 break;
748 }
749 }
6b21a5db 750 spin_unlock_irqrestore(&priv->lock, flags);
2a96536e 751
469acebe 752 if (found)
7222e8db
CK
753 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
754 __func__, &pagetable);
6b21a5db
CK
755 else
756 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
2a96536e
KC
757}
758
66a7ed84
CK
759static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
760 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
2a96536e 761{
61128f08 762 if (lv1ent_section(sent)) {
d09d78fc 763 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
61128f08
CK
764 return ERR_PTR(-EADDRINUSE);
765 }
766
2a96536e 767 if (lv1ent_fault(sent)) {
d09d78fc 768 sysmmu_pte_t *pent;
66a7ed84 769 bool need_flush_flpd_cache = lv1ent_zero(sent);
2a96536e 770
734c3c73 771 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
d09d78fc 772 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
2a96536e 773 if (!pent)
61128f08 774 return ERR_PTR(-ENOMEM);
2a96536e 775
7222e8db 776 *sent = mk_lv1ent_page(virt_to_phys(pent));
dc3814f4 777 kmemleak_ignore(pent);
2a96536e
KC
778 *pgcounter = NUM_LV2ENTRIES;
779 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
780 pgtable_flush(sent, sent + 1);
66a7ed84
CK
781
782 /*
f171abab
SK
783 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
784 * FLPD cache may cache the address of zero_l2_table. This
785 * function replaces the zero_l2_table with new L2 page table
786 * to write valid mappings.
66a7ed84 787 * Accessing the valid area may cause page fault since FLPD
f171abab
SK
788 * cache may still cache zero_l2_table for the valid area
789 * instead of new L2 page table that has the mapping
790 * information of the valid area.
66a7ed84
CK
791 * Thus any replacement of zero_l2_table with other valid L2
792 * page table must involve FLPD cache invalidation for System
793 * MMU v3.3.
794 * FLPD cache invalidation is performed with TLB invalidation
795 * by VPN without blocking. It is safe to invalidate TLB without
796 * blocking because the target address of TLB invalidation is
797 * not currently mapped.
798 */
799 if (need_flush_flpd_cache) {
469acebe 800 struct sysmmu_drvdata *data;
365409db 801
66a7ed84 802 spin_lock(&priv->lock);
469acebe
MS
803 list_for_each_entry(data, &priv->clients, domain_node)
804 sysmmu_tlb_invalidate_flpdcache(data, iova);
66a7ed84
CK
805 spin_unlock(&priv->lock);
806 }
2a96536e
KC
807 }
808
809 return page_entry(sent, iova);
810}
811
66a7ed84
CK
812static int lv1set_section(struct exynos_iommu_domain *priv,
813 sysmmu_pte_t *sent, sysmmu_iova_t iova,
61128f08 814 phys_addr_t paddr, short *pgcnt)
2a96536e 815{
61128f08 816 if (lv1ent_section(sent)) {
d09d78fc 817 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 818 iova);
2a96536e 819 return -EADDRINUSE;
61128f08 820 }
2a96536e
KC
821
822 if (lv1ent_page(sent)) {
61128f08 823 if (*pgcnt != NUM_LV2ENTRIES) {
d09d78fc 824 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 825 iova);
2a96536e 826 return -EADDRINUSE;
61128f08 827 }
2a96536e 828
734c3c73 829 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
2a96536e
KC
830 *pgcnt = 0;
831 }
832
833 *sent = mk_lv1ent_sect(paddr);
834
835 pgtable_flush(sent, sent + 1);
836
66a7ed84
CK
837 spin_lock(&priv->lock);
838 if (lv1ent_page_zero(sent)) {
469acebe 839 struct sysmmu_drvdata *data;
66a7ed84
CK
840 /*
841 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
842 * entry by speculative prefetch of SLPD which has no mapping.
843 */
469acebe
MS
844 list_for_each_entry(data, &priv->clients, domain_node)
845 sysmmu_tlb_invalidate_flpdcache(data, iova);
66a7ed84
CK
846 }
847 spin_unlock(&priv->lock);
848
2a96536e
KC
849 return 0;
850}
851
d09d78fc 852static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
2a96536e
KC
853 short *pgcnt)
854{
855 if (size == SPAGE_SIZE) {
0bf4e54d 856 if (WARN_ON(!lv2ent_fault(pent)))
2a96536e
KC
857 return -EADDRINUSE;
858
859 *pent = mk_lv2ent_spage(paddr);
860 pgtable_flush(pent, pent + 1);
861 *pgcnt -= 1;
862 } else { /* size == LPAGE_SIZE */
863 int i;
365409db 864
2a96536e 865 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
0bf4e54d 866 if (WARN_ON(!lv2ent_fault(pent))) {
61128f08
CK
867 if (i > 0)
868 memset(pent - i, 0, sizeof(*pent) * i);
2a96536e
KC
869 return -EADDRINUSE;
870 }
871
872 *pent = mk_lv2ent_lpage(paddr);
873 }
874 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
875 *pgcnt -= SPAGES_PER_LPAGE;
876 }
877
878 return 0;
879}
880
66a7ed84
CK
881/*
882 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
883 *
f171abab 884 * System MMU v3.x has advanced logic to improve address translation
66a7ed84 885 * performance with caching more page table entries by a page table walk.
f171abab
SK
886 * However, the logic has a bug that while caching faulty page table entries,
887 * System MMU reports page fault if the cached fault entry is hit even though
888 * the fault entry is updated to a valid entry after the entry is cached.
889 * To prevent caching faulty page table entries which may be updated to valid
890 * entries later, the virtual memory manager should care about the workaround
891 * for the problem. The following describes the workaround.
66a7ed84
CK
892 *
893 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
f171abab 894 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
66a7ed84 895 *
f171abab 896 * Precisely, any start address of I/O virtual region must be aligned with
66a7ed84
CK
897 * the following sizes for System MMU v3.1 and v3.2.
898 * System MMU v3.1: 128KiB
899 * System MMU v3.2: 256KiB
900 *
901 * Because System MMU v3.3 caches page table entries more aggressively, it needs
f171abab
SK
902 * more workarounds.
903 * - Any two consecutive I/O virtual regions must have a hole of size larger
904 * than or equal to 128KiB.
66a7ed84
CK
905 * - Start address of an I/O virtual region must be aligned by 128KiB.
906 */
d09d78fc 907static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
2a96536e
KC
908 phys_addr_t paddr, size_t size, int prot)
909{
e1fd1eaa 910 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
d09d78fc
CK
911 sysmmu_pte_t *entry;
912 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
2a96536e
KC
913 unsigned long flags;
914 int ret = -ENOMEM;
915
916 BUG_ON(priv->pgtable == NULL);
917
918 spin_lock_irqsave(&priv->pgtablelock, flags);
919
920 entry = section_entry(priv->pgtable, iova);
921
922 if (size == SECT_SIZE) {
66a7ed84 923 ret = lv1set_section(priv, entry, iova, paddr,
2a96536e
KC
924 &priv->lv2entcnt[lv1ent_offset(iova)]);
925 } else {
d09d78fc 926 sysmmu_pte_t *pent;
2a96536e 927
66a7ed84 928 pent = alloc_lv2entry(priv, entry, iova,
2a96536e
KC
929 &priv->lv2entcnt[lv1ent_offset(iova)]);
930
61128f08
CK
931 if (IS_ERR(pent))
932 ret = PTR_ERR(pent);
2a96536e
KC
933 else
934 ret = lv2set_page(pent, paddr, size,
935 &priv->lv2entcnt[lv1ent_offset(iova)]);
936 }
937
61128f08 938 if (ret)
0bf4e54d
CK
939 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
940 __func__, ret, size, iova);
2a96536e
KC
941
942 spin_unlock_irqrestore(&priv->pgtablelock, flags);
943
944 return ret;
945}
946
66a7ed84
CK
947static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
948 sysmmu_iova_t iova, size_t size)
949{
469acebe 950 struct sysmmu_drvdata *data;
66a7ed84
CK
951 unsigned long flags;
952
953 spin_lock_irqsave(&priv->lock, flags);
954
469acebe
MS
955 list_for_each_entry(data, &priv->clients, domain_node)
956 sysmmu_tlb_invalidate_entry(data, iova, size);
66a7ed84
CK
957
958 spin_unlock_irqrestore(&priv->lock, flags);
959}
960
2a96536e 961static size_t exynos_iommu_unmap(struct iommu_domain *domain,
d09d78fc 962 unsigned long l_iova, size_t size)
2a96536e 963{
e1fd1eaa 964 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
d09d78fc
CK
965 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
966 sysmmu_pte_t *ent;
61128f08 967 size_t err_pgsize;
d09d78fc 968 unsigned long flags;
2a96536e
KC
969
970 BUG_ON(priv->pgtable == NULL);
971
972 spin_lock_irqsave(&priv->pgtablelock, flags);
973
974 ent = section_entry(priv->pgtable, iova);
975
976 if (lv1ent_section(ent)) {
0bf4e54d 977 if (WARN_ON(size < SECT_SIZE)) {
61128f08
CK
978 err_pgsize = SECT_SIZE;
979 goto err;
980 }
2a96536e 981
f171abab
SK
982 /* workaround for h/w bug in System MMU v3.3 */
983 *ent = ZERO_LV2LINK;
2a96536e
KC
984 pgtable_flush(ent, ent + 1);
985 size = SECT_SIZE;
986 goto done;
987 }
988
989 if (unlikely(lv1ent_fault(ent))) {
990 if (size > SECT_SIZE)
991 size = SECT_SIZE;
992 goto done;
993 }
994
995 /* lv1ent_page(sent) == true here */
996
997 ent = page_entry(ent, iova);
998
999 if (unlikely(lv2ent_fault(ent))) {
1000 size = SPAGE_SIZE;
1001 goto done;
1002 }
1003
1004 if (lv2ent_small(ent)) {
1005 *ent = 0;
1006 size = SPAGE_SIZE;
6cb47ed7 1007 pgtable_flush(ent, ent + 1);
2a96536e
KC
1008 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1009 goto done;
1010 }
1011
1012 /* lv1ent_large(ent) == true here */
0bf4e54d 1013 if (WARN_ON(size < LPAGE_SIZE)) {
61128f08
CK
1014 err_pgsize = LPAGE_SIZE;
1015 goto err;
1016 }
2a96536e
KC
1017
1018 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
6cb47ed7 1019 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
2a96536e
KC
1020
1021 size = LPAGE_SIZE;
1022 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1023done:
1024 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1025
66a7ed84 1026 exynos_iommu_tlb_invalidate_entry(priv, iova, size);
2a96536e 1027
2a96536e 1028 return size;
61128f08
CK
1029err:
1030 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1031
0bf4e54d
CK
1032 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1033 __func__, size, iova, err_pgsize);
61128f08
CK
1034
1035 return 0;
2a96536e
KC
1036}
1037
1038static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
bb5547ac 1039 dma_addr_t iova)
2a96536e 1040{
e1fd1eaa 1041 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
d09d78fc 1042 sysmmu_pte_t *entry;
2a96536e
KC
1043 unsigned long flags;
1044 phys_addr_t phys = 0;
1045
1046 spin_lock_irqsave(&priv->pgtablelock, flags);
1047
1048 entry = section_entry(priv->pgtable, iova);
1049
1050 if (lv1ent_section(entry)) {
1051 phys = section_phys(entry) + section_offs(iova);
1052 } else if (lv1ent_page(entry)) {
1053 entry = page_entry(entry, iova);
1054
1055 if (lv2ent_large(entry))
1056 phys = lpage_phys(entry) + lpage_offs(iova);
1057 else if (lv2ent_small(entry))
1058 phys = spage_phys(entry) + spage_offs(iova);
1059 }
1060
1061 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1062
1063 return phys;
1064}
1065
bf4a1c92
AM
1066static int exynos_iommu_add_device(struct device *dev)
1067{
1068 struct iommu_group *group;
1069 int ret;
1070
1071 group = iommu_group_get(dev);
1072
1073 if (!group) {
1074 group = iommu_group_alloc();
1075 if (IS_ERR(group)) {
1076 dev_err(dev, "Failed to allocate IOMMU group\n");
1077 return PTR_ERR(group);
1078 }
1079 }
1080
1081 ret = iommu_group_add_device(group, dev);
1082 iommu_group_put(group);
1083
1084 return ret;
1085}
1086
1087static void exynos_iommu_remove_device(struct device *dev)
1088{
1089 iommu_group_remove_device(dev);
1090}
1091
b22f6434 1092static const struct iommu_ops exynos_iommu_ops = {
e1fd1eaa
JR
1093 .domain_alloc = exynos_iommu_domain_alloc,
1094 .domain_free = exynos_iommu_domain_free,
ba5fa6f6
BH
1095 .attach_dev = exynos_iommu_attach_device,
1096 .detach_dev = exynos_iommu_detach_device,
1097 .map = exynos_iommu_map,
1098 .unmap = exynos_iommu_unmap,
315786eb 1099 .map_sg = default_iommu_map_sg,
ba5fa6f6
BH
1100 .iova_to_phys = exynos_iommu_iova_to_phys,
1101 .add_device = exynos_iommu_add_device,
1102 .remove_device = exynos_iommu_remove_device,
2a96536e
KC
1103 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1104};
1105
1106static int __init exynos_iommu_init(void)
1107{
a7b67cd5 1108 struct device_node *np;
2a96536e
KC
1109 int ret;
1110
a7b67cd5
TR
1111 np = of_find_matching_node(NULL, sysmmu_of_match);
1112 if (!np)
1113 return 0;
1114
1115 of_node_put(np);
1116
734c3c73
CK
1117 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1118 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1119 if (!lv2table_kmem_cache) {
1120 pr_err("%s: Failed to create kmem cache\n", __func__);
1121 return -ENOMEM;
1122 }
1123
2a96536e 1124 ret = platform_driver_register(&exynos_sysmmu_driver);
734c3c73
CK
1125 if (ret) {
1126 pr_err("%s: Failed to register driver\n", __func__);
1127 goto err_reg_driver;
1128 }
2a96536e 1129
66a7ed84
CK
1130 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1131 if (zero_lv2_table == NULL) {
1132 pr_err("%s: Failed to allocate zero level2 page table\n",
1133 __func__);
1134 ret = -ENOMEM;
1135 goto err_zero_lv2;
1136 }
1137
734c3c73
CK
1138 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1139 if (ret) {
1140 pr_err("%s: Failed to register exynos-iommu driver.\n",
1141 __func__);
1142 goto err_set_iommu;
1143 }
2a96536e 1144
734c3c73
CK
1145 return 0;
1146err_set_iommu:
66a7ed84
CK
1147 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1148err_zero_lv2:
734c3c73
CK
1149 platform_driver_unregister(&exynos_sysmmu_driver);
1150err_reg_driver:
1151 kmem_cache_destroy(lv2table_kmem_cache);
2a96536e
KC
1152 return ret;
1153}
1154subsys_initcall(exynos_iommu_init);
This page took 0.208426 seconds and 5 git commands to generate.