Linux 3.18-rc3
[deliverable/linux.git] / drivers / iommu / exynos-iommu.c
CommitLineData
2a96536e
KC
1/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
d09d78fc
CK
32typedef u32 sysmmu_iova_t;
33typedef u32 sysmmu_pte_t;
34
f171abab 35/* We do not consider super section mapping (16MB) */
2a96536e
KC
36#define SECT_ORDER 20
37#define LPAGE_ORDER 16
38#define SPAGE_ORDER 12
39
40#define SECT_SIZE (1 << SECT_ORDER)
41#define LPAGE_SIZE (1 << LPAGE_ORDER)
42#define SPAGE_SIZE (1 << SPAGE_ORDER)
43
44#define SECT_MASK (~(SECT_SIZE - 1))
45#define LPAGE_MASK (~(LPAGE_SIZE - 1))
46#define SPAGE_MASK (~(SPAGE_SIZE - 1))
47
66a7ed84
CK
48#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
53 ((*(sent) & 3) == 1))
2a96536e
KC
54#define lv1ent_section(sent) ((*(sent) & 3) == 2)
55
56#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57#define lv2ent_small(pent) ((*(pent) & 2) == 2)
58#define lv2ent_large(pent) ((*(pent) & 3) == 1)
59
d09d78fc
CK
60static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
61{
62 return iova & (size - 1);
63}
64
2a96536e 65#define section_phys(sent) (*(sent) & SECT_MASK)
d09d78fc 66#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
2a96536e 67#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
d09d78fc 68#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
2a96536e 69#define spage_phys(pent) (*(pent) & SPAGE_MASK)
d09d78fc 70#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
2a96536e
KC
71
72#define NUM_LV1ENTRIES 4096
d09d78fc 73#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2a96536e 74
d09d78fc
CK
75static u32 lv1ent_offset(sysmmu_iova_t iova)
76{
77 return iova >> SECT_ORDER;
78}
79
80static u32 lv2ent_offset(sysmmu_iova_t iova)
81{
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
83}
84
85#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2a96536e
KC
86
87#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
88
89#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
90
91#define mk_lv1ent_sect(pa) ((pa) | 2)
92#define mk_lv1ent_page(pa) ((pa) | 1)
93#define mk_lv2ent_lpage(pa) ((pa) | 1)
94#define mk_lv2ent_spage(pa) ((pa) | 2)
95
96#define CTRL_ENABLE 0x5
97#define CTRL_BLOCK 0x7
98#define CTRL_DISABLE 0x0
99
eeb5184b
CK
100#define CFG_LRU 0x1
101#define CFG_QOS(n) ((n & 0xF) << 7)
102#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
104#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
105#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
106
2a96536e
KC
107#define REG_MMU_CTRL 0x000
108#define REG_MMU_CFG 0x004
109#define REG_MMU_STATUS 0x008
110#define REG_MMU_FLUSH 0x00C
111#define REG_MMU_FLUSH_ENTRY 0x010
112#define REG_PT_BASE_ADDR 0x014
113#define REG_INT_STATUS 0x018
114#define REG_INT_CLEAR 0x01C
115
116#define REG_PAGE_FAULT_ADDR 0x024
117#define REG_AW_FAULT_ADDR 0x028
118#define REG_AR_FAULT_ADDR 0x02C
119#define REG_DEFAULT_SLAVE_ADDR 0x030
120
121#define REG_MMU_VERSION 0x034
122
eeb5184b
CK
123#define MMU_MAJ_VER(val) ((val) >> 7)
124#define MMU_MIN_VER(val) ((val) & 0x7F)
125#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
126
127#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
128
2a96536e
KC
129#define REG_PB0_SADDR 0x04C
130#define REG_PB0_EADDR 0x050
131#define REG_PB1_SADDR 0x054
132#define REG_PB1_EADDR 0x058
133
6b21a5db
CK
134#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
135
734c3c73 136static struct kmem_cache *lv2table_kmem_cache;
66a7ed84
CK
137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
734c3c73 139
d09d78fc 140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
2a96536e
KC
141{
142 return pgtable + lv1ent_offset(iova);
143}
144
d09d78fc 145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
2a96536e 146{
d09d78fc 147 return (sysmmu_pte_t *)phys_to_virt(
7222e8db 148 lv2table_base(sent)) + lv2ent_offset(iova);
2a96536e
KC
149}
150
151enum exynos_sysmmu_inttype {
152 SYSMMU_PAGEFAULT,
153 SYSMMU_AR_MULTIHIT,
154 SYSMMU_AW_MULTIHIT,
155 SYSMMU_BUSERROR,
156 SYSMMU_AR_SECURITY,
157 SYSMMU_AR_ACCESS,
158 SYSMMU_AW_SECURITY,
159 SYSMMU_AW_PROTECTION, /* 7 */
160 SYSMMU_FAULT_UNKNOWN,
161 SYSMMU_FAULTS_NUM
162};
163
2a96536e
KC
164static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 REG_PAGE_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_DEFAULT_SLAVE_ADDR,
169 REG_AR_FAULT_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AW_FAULT_ADDR,
172 REG_AW_FAULT_ADDR
173};
174
175static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 "PAGE FAULT",
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
179 "BUS ERROR",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
184 "UNKNOWN FAULT"
185};
186
6b21a5db
CK
187/* attached to dev.archdata.iommu of the master device */
188struct exynos_iommu_owner {
189 struct list_head client; /* entry of exynos_iommu_domain.clients */
190 struct device *dev;
191 struct device *sysmmu;
192 struct iommu_domain *domain;
193 void *vmm_data; /* IO virtual memory manager's data */
194 spinlock_t lock; /* Lock to preserve consistency of System MMU */
195};
196
2a96536e
KC
197struct exynos_iommu_domain {
198 struct list_head clients; /* list of sysmmu_drvdata.node */
d09d78fc 199 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
2a96536e
KC
200 short *lv2entcnt; /* free lv2 entry counter for each section */
201 spinlock_t lock; /* lock for this structure */
202 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
203};
204
205struct sysmmu_drvdata {
2a96536e 206 struct device *sysmmu; /* System MMU's device descriptor */
6b21a5db 207 struct device *master; /* Owner of system MMU */
7222e8db
CK
208 void __iomem *sfrbase;
209 struct clk *clk;
70605870 210 struct clk *clk_master;
2a96536e 211 int activations;
9d4e7a24 212 spinlock_t lock;
2a96536e 213 struct iommu_domain *domain;
7222e8db 214 phys_addr_t pgtable;
2a96536e
KC
215};
216
217static bool set_sysmmu_active(struct sysmmu_drvdata *data)
218{
219 /* return true if the System MMU was not active previously
220 and it needs to be initialized */
221 return ++data->activations == 1;
222}
223
224static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
225{
226 /* return true if the System MMU is needed to be disabled */
227 BUG_ON(data->activations < 1);
228 return --data->activations == 0;
229}
230
231static bool is_sysmmu_active(struct sysmmu_drvdata *data)
232{
233 return data->activations > 0;
234}
235
236static void sysmmu_unblock(void __iomem *sfrbase)
237{
238 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
239}
240
eeb5184b
CK
241static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
242{
243 return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
244}
245
2a96536e
KC
246static bool sysmmu_block(void __iomem *sfrbase)
247{
248 int i = 120;
249
250 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
251 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
252 --i;
253
254 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
255 sysmmu_unblock(sfrbase);
256 return false;
257 }
258
259 return true;
260}
261
262static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
263{
264 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
265}
266
267static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
d09d78fc 268 sysmmu_iova_t iova, unsigned int num_inv)
2a96536e 269{
3ad6b7f3 270 unsigned int i;
365409db 271
3ad6b7f3
CK
272 for (i = 0; i < num_inv; i++) {
273 __raw_writel((iova & SPAGE_MASK) | 1,
274 sfrbase + REG_MMU_FLUSH_ENTRY);
275 iova += SPAGE_SIZE;
276 }
2a96536e
KC
277}
278
279static void __sysmmu_set_ptbase(void __iomem *sfrbase,
d09d78fc 280 phys_addr_t pgd)
2a96536e 281{
2a96536e
KC
282 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
283
284 __sysmmu_tlb_invalidate(sfrbase);
285}
286
1fab7fa7
CK
287static void show_fault_information(const char *name,
288 enum exynos_sysmmu_inttype itype,
d09d78fc 289 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
2a96536e 290{
d09d78fc 291 sysmmu_pte_t *ent;
2a96536e
KC
292
293 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
294 itype = SYSMMU_FAULT_UNKNOWN;
295
d09d78fc 296 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
1fab7fa7 297 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
2a96536e 298
7222e8db 299 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
d09d78fc 300 pr_err("\tLv1 entry: %#x\n", *ent);
2a96536e
KC
301
302 if (lv1ent_page(ent)) {
303 ent = page_entry(ent, fault_addr);
d09d78fc 304 pr_err("\t Lv2 entry: %#x\n", *ent);
2a96536e 305 }
2a96536e
KC
306}
307
308static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
309{
f171abab 310 /* SYSMMU is in blocked state when interrupt occurred. */
2a96536e 311 struct sysmmu_drvdata *data = dev_id;
2a96536e 312 enum exynos_sysmmu_inttype itype;
d09d78fc 313 sysmmu_iova_t addr = -1;
7222e8db 314 int ret = -ENOSYS;
2a96536e 315
2a96536e
KC
316 WARN_ON(!is_sysmmu_active(data));
317
9d4e7a24
CK
318 spin_lock(&data->lock);
319
70605870
CK
320 if (!IS_ERR(data->clk_master))
321 clk_enable(data->clk_master);
9d4e7a24 322
7222e8db
CK
323 itype = (enum exynos_sysmmu_inttype)
324 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
325 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
2a96536e 326 itype = SYSMMU_FAULT_UNKNOWN;
7222e8db
CK
327 else
328 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
2a96536e 329
1fab7fa7
CK
330 if (itype == SYSMMU_FAULT_UNKNOWN) {
331 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
332 __func__, dev_name(data->sysmmu));
333 pr_err("%s: Please check if IRQ is correctly configured.\n",
334 __func__);
335 BUG();
336 } else {
d09d78fc 337 unsigned int base =
1fab7fa7
CK
338 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
339 show_fault_information(dev_name(data->sysmmu),
340 itype, base, addr);
341 if (data->domain)
342 ret = report_iommu_fault(data->domain,
6b21a5db 343 data->master, addr, itype);
2a96536e
KC
344 }
345
1fab7fa7
CK
346 /* fault is not recovered by fault handler */
347 BUG_ON(ret != 0);
2a96536e 348
1fab7fa7
CK
349 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
350
351 sysmmu_unblock(data->sfrbase);
2a96536e 352
70605870
CK
353 if (!IS_ERR(data->clk_master))
354 clk_disable(data->clk_master);
355
9d4e7a24 356 spin_unlock(&data->lock);
2a96536e
KC
357
358 return IRQ_HANDLED;
359}
360
6b21a5db 361static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
2a96536e 362{
70605870
CK
363 if (!IS_ERR(data->clk_master))
364 clk_enable(data->clk_master);
365
7222e8db 366 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
6b21a5db 367 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
2a96536e 368
46c16d1e 369 clk_disable(data->clk);
70605870
CK
370 if (!IS_ERR(data->clk_master))
371 clk_disable(data->clk_master);
2a96536e
KC
372}
373
6b21a5db 374static bool __sysmmu_disable(struct sysmmu_drvdata *data)
2a96536e 375{
6b21a5db 376 bool disabled;
2a96536e
KC
377 unsigned long flags;
378
9d4e7a24 379 spin_lock_irqsave(&data->lock, flags);
2a96536e 380
6b21a5db
CK
381 disabled = set_sysmmu_inactive(data);
382
383 if (disabled) {
384 data->pgtable = 0;
385 data->domain = NULL;
386
387 __sysmmu_disable_nocount(data);
2a96536e 388
6b21a5db
CK
389 dev_dbg(data->sysmmu, "Disabled\n");
390 } else {
391 dev_dbg(data->sysmmu, "%d times left to disable\n",
392 data->activations);
2a96536e
KC
393 }
394
6b21a5db
CK
395 spin_unlock_irqrestore(&data->lock, flags);
396
397 return disabled;
398}
2a96536e 399
6b21a5db
CK
400static void __sysmmu_init_config(struct sysmmu_drvdata *data)
401{
eeb5184b
CK
402 unsigned int cfg = CFG_LRU | CFG_QOS(15);
403 unsigned int ver;
404
405 ver = __raw_sysmmu_version(data);
406 if (MMU_MAJ_VER(ver) == 3) {
407 if (MMU_MIN_VER(ver) >= 2) {
408 cfg |= CFG_FLPDCACHE;
409 if (MMU_MIN_VER(ver) == 3) {
410 cfg |= CFG_ACGEN;
411 cfg &= ~CFG_LRU;
412 } else {
413 cfg |= CFG_SYSSEL;
414 }
415 }
416 }
6b21a5db
CK
417
418 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
419}
420
421static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
422{
70605870
CK
423 if (!IS_ERR(data->clk_master))
424 clk_enable(data->clk_master);
425 clk_enable(data->clk);
426
6b21a5db
CK
427 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
428
429 __sysmmu_init_config(data);
430
431 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
2a96536e 432
7222e8db
CK
433 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
434
70605870
CK
435 if (!IS_ERR(data->clk_master))
436 clk_disable(data->clk_master);
6b21a5db 437}
70605870 438
6b21a5db
CK
439static int __sysmmu_enable(struct sysmmu_drvdata *data,
440 phys_addr_t pgtable, struct iommu_domain *domain)
441{
442 int ret = 0;
443 unsigned long flags;
444
445 spin_lock_irqsave(&data->lock, flags);
446 if (set_sysmmu_active(data)) {
447 data->pgtable = pgtable;
448 data->domain = domain;
449
450 __sysmmu_enable_nocount(data);
451
452 dev_dbg(data->sysmmu, "Enabled\n");
453 } else {
454 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
455
456 dev_dbg(data->sysmmu, "already enabled\n");
457 }
458
459 if (WARN_ON(ret < 0))
460 set_sysmmu_inactive(data); /* decrement count */
2a96536e 461
9d4e7a24 462 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
463
464 return ret;
465}
466
6b21a5db
CK
467/* __exynos_sysmmu_enable: Enables System MMU
468 *
469 * returns -error if an error occurred and System MMU is not enabled,
470 * 0 if the System MMU has been just enabled and 1 if System MMU was already
471 * enabled before.
472 */
473static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
474 struct iommu_domain *domain)
2a96536e 475{
6b21a5db
CK
476 int ret = 0;
477 unsigned long flags;
478 struct exynos_iommu_owner *owner = dev->archdata.iommu;
479 struct sysmmu_drvdata *data;
2a96536e 480
6b21a5db 481 BUG_ON(!has_sysmmu(dev));
2a96536e 482
6b21a5db 483 spin_lock_irqsave(&owner->lock, flags);
2a96536e 484
6b21a5db
CK
485 data = dev_get_drvdata(owner->sysmmu);
486
487 ret = __sysmmu_enable(data, pgtable, domain);
488 if (ret >= 0)
489 data->master = dev;
490
491 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
492
493 return ret;
494}
495
6b21a5db
CK
496int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
497{
498 BUG_ON(!memblock_is_memory(pgtable));
499
500 return __exynos_sysmmu_enable(dev, pgtable, NULL);
501}
502
77e38350 503static bool exynos_sysmmu_disable(struct device *dev)
2a96536e 504{
6b21a5db
CK
505 unsigned long flags;
506 bool disabled = true;
507 struct exynos_iommu_owner *owner = dev->archdata.iommu;
508 struct sysmmu_drvdata *data;
509
510 BUG_ON(!has_sysmmu(dev));
2a96536e 511
6b21a5db
CK
512 spin_lock_irqsave(&owner->lock, flags);
513
514 data = dev_get_drvdata(owner->sysmmu);
515
516 disabled = __sysmmu_disable(data);
517 if (disabled)
518 data->master = NULL;
519
520 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
521
522 return disabled;
523}
524
66a7ed84
CK
525static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
526 sysmmu_iova_t iova)
527{
528 if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
529 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
530}
531
532static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
533 sysmmu_iova_t iova)
534{
535 unsigned long flags;
536 struct exynos_iommu_owner *owner = dev->archdata.iommu;
537 struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
538
539 if (!IS_ERR(data->clk_master))
540 clk_enable(data->clk_master);
541
542 spin_lock_irqsave(&data->lock, flags);
543 if (is_sysmmu_active(data))
544 __sysmmu_tlb_invalidate_flpdcache(data, iova);
545 spin_unlock_irqrestore(&data->lock, flags);
546
547 if (!IS_ERR(data->clk_master))
548 clk_disable(data->clk_master);
549}
550
d09d78fc 551static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
3ad6b7f3 552 size_t size)
2a96536e 553{
6b21a5db 554 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2a96536e 555 unsigned long flags;
6b21a5db 556 struct sysmmu_drvdata *data;
2a96536e 557
6b21a5db 558 data = dev_get_drvdata(owner->sysmmu);
2a96536e 559
6b21a5db 560 spin_lock_irqsave(&data->lock, flags);
2a96536e 561 if (is_sysmmu_active(data)) {
3ad6b7f3 562 unsigned int num_inv = 1;
70605870
CK
563
564 if (!IS_ERR(data->clk_master))
565 clk_enable(data->clk_master);
566
3ad6b7f3
CK
567 /*
568 * L2TLB invalidation required
569 * 4KB page: 1 invalidation
f171abab
SK
570 * 64KB page: 16 invalidations
571 * 1MB page: 64 invalidations
3ad6b7f3
CK
572 * because it is set-associative TLB
573 * with 8-way and 64 sets.
574 * 1MB page can be cached in one of all sets.
575 * 64KB page can be one of 16 consecutive sets.
576 */
eeb5184b 577 if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
3ad6b7f3
CK
578 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
579
7222e8db
CK
580 if (sysmmu_block(data->sfrbase)) {
581 __sysmmu_tlb_invalidate_entry(
3ad6b7f3 582 data->sfrbase, iova, num_inv);
7222e8db 583 sysmmu_unblock(data->sfrbase);
2a96536e 584 }
70605870
CK
585 if (!IS_ERR(data->clk_master))
586 clk_disable(data->clk_master);
2a96536e 587 } else {
6b21a5db
CK
588 dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
589 iova);
2a96536e 590 }
9d4e7a24 591 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
592}
593
594void exynos_sysmmu_tlb_invalidate(struct device *dev)
595{
6b21a5db 596 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2a96536e 597 unsigned long flags;
6b21a5db 598 struct sysmmu_drvdata *data;
2a96536e 599
6b21a5db 600 data = dev_get_drvdata(owner->sysmmu);
2a96536e 601
6b21a5db 602 spin_lock_irqsave(&data->lock, flags);
2a96536e 603 if (is_sysmmu_active(data)) {
70605870
CK
604 if (!IS_ERR(data->clk_master))
605 clk_enable(data->clk_master);
7222e8db
CK
606 if (sysmmu_block(data->sfrbase)) {
607 __sysmmu_tlb_invalidate(data->sfrbase);
608 sysmmu_unblock(data->sfrbase);
2a96536e 609 }
70605870
CK
610 if (!IS_ERR(data->clk_master))
611 clk_disable(data->clk_master);
2a96536e 612 } else {
6b21a5db 613 dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
2a96536e 614 }
9d4e7a24 615 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
616}
617
6b21a5db 618static int __init exynos_sysmmu_probe(struct platform_device *pdev)
2a96536e 619{
46c16d1e 620 int irq, ret;
7222e8db 621 struct device *dev = &pdev->dev;
2a96536e 622 struct sysmmu_drvdata *data;
7222e8db 623 struct resource *res;
2a96536e 624
46c16d1e
CK
625 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
626 if (!data)
627 return -ENOMEM;
2a96536e 628
7222e8db 629 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46c16d1e
CK
630 data->sfrbase = devm_ioremap_resource(dev, res);
631 if (IS_ERR(data->sfrbase))
632 return PTR_ERR(data->sfrbase);
2a96536e 633
46c16d1e
CK
634 irq = platform_get_irq(pdev, 0);
635 if (irq <= 0) {
0bf4e54d 636 dev_err(dev, "Unable to find IRQ resource\n");
46c16d1e 637 return irq;
2a96536e
KC
638 }
639
46c16d1e 640 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
7222e8db
CK
641 dev_name(dev), data);
642 if (ret) {
46c16d1e
CK
643 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
644 return ret;
2a96536e
KC
645 }
646
46c16d1e
CK
647 data->clk = devm_clk_get(dev, "sysmmu");
648 if (IS_ERR(data->clk)) {
649 dev_err(dev, "Failed to get clock!\n");
650 return PTR_ERR(data->clk);
651 } else {
652 ret = clk_prepare(data->clk);
653 if (ret) {
654 dev_err(dev, "Failed to prepare clk\n");
655 return ret;
656 }
2a96536e
KC
657 }
658
70605870
CK
659 data->clk_master = devm_clk_get(dev, "master");
660 if (!IS_ERR(data->clk_master)) {
661 ret = clk_prepare(data->clk_master);
662 if (ret) {
663 clk_unprepare(data->clk);
664 dev_err(dev, "Failed to prepare master's clk\n");
665 return ret;
666 }
667 }
668
2a96536e 669 data->sysmmu = dev;
9d4e7a24 670 spin_lock_init(&data->lock);
2a96536e 671
7222e8db
CK
672 platform_set_drvdata(pdev, data);
673
f4723ec1 674 pm_runtime_enable(dev);
2a96536e 675
2a96536e 676 return 0;
2a96536e
KC
677}
678
6b21a5db
CK
679static const struct of_device_id sysmmu_of_match[] __initconst = {
680 { .compatible = "samsung,exynos-sysmmu", },
681 { },
682};
683
684static struct platform_driver exynos_sysmmu_driver __refdata = {
685 .probe = exynos_sysmmu_probe,
686 .driver = {
2a96536e
KC
687 .owner = THIS_MODULE,
688 .name = "exynos-sysmmu",
6b21a5db 689 .of_match_table = sysmmu_of_match,
2a96536e
KC
690 }
691};
692
693static inline void pgtable_flush(void *vastart, void *vaend)
694{
695 dmac_flush_range(vastart, vaend);
696 outer_flush_range(virt_to_phys(vastart),
697 virt_to_phys(vaend));
698}
699
700static int exynos_iommu_domain_init(struct iommu_domain *domain)
701{
702 struct exynos_iommu_domain *priv;
66a7ed84 703 int i;
2a96536e
KC
704
705 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
706 if (!priv)
707 return -ENOMEM;
708
66a7ed84 709 priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
2a96536e
KC
710 if (!priv->pgtable)
711 goto err_pgtable;
712
66a7ed84 713 priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
2a96536e
KC
714 if (!priv->lv2entcnt)
715 goto err_counter;
716
f171abab 717 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
66a7ed84
CK
718 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
719 priv->pgtable[i + 0] = ZERO_LV2LINK;
720 priv->pgtable[i + 1] = ZERO_LV2LINK;
721 priv->pgtable[i + 2] = ZERO_LV2LINK;
722 priv->pgtable[i + 3] = ZERO_LV2LINK;
723 priv->pgtable[i + 4] = ZERO_LV2LINK;
724 priv->pgtable[i + 5] = ZERO_LV2LINK;
725 priv->pgtable[i + 6] = ZERO_LV2LINK;
726 priv->pgtable[i + 7] = ZERO_LV2LINK;
727 }
728
2a96536e
KC
729 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
730
731 spin_lock_init(&priv->lock);
732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients);
734
eb51637b
SK
735 domain->geometry.aperture_start = 0;
736 domain->geometry.aperture_end = ~0UL;
737 domain->geometry.force_aperture = true;
3177bb76 738
2a96536e
KC
739 domain->priv = priv;
740 return 0;
741
742err_counter:
743 free_pages((unsigned long)priv->pgtable, 2);
744err_pgtable:
745 kfree(priv);
746 return -ENOMEM;
747}
748
749static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
750{
751 struct exynos_iommu_domain *priv = domain->priv;
6b21a5db 752 struct exynos_iommu_owner *owner;
2a96536e
KC
753 unsigned long flags;
754 int i;
755
756 WARN_ON(!list_empty(&priv->clients));
757
758 spin_lock_irqsave(&priv->lock, flags);
759
6b21a5db
CK
760 list_for_each_entry(owner, &priv->clients, client) {
761 while (!exynos_sysmmu_disable(owner->dev))
2a96536e
KC
762 ; /* until System MMU is actually disabled */
763 }
764
6b21a5db
CK
765 while (!list_empty(&priv->clients))
766 list_del_init(priv->clients.next);
767
2a96536e
KC
768 spin_unlock_irqrestore(&priv->lock, flags);
769
770 for (i = 0; i < NUM_LV1ENTRIES; i++)
771 if (lv1ent_page(priv->pgtable + i))
734c3c73
CK
772 kmem_cache_free(lv2table_kmem_cache,
773 phys_to_virt(lv2table_base(priv->pgtable + i)));
2a96536e
KC
774
775 free_pages((unsigned long)priv->pgtable, 2);
776 free_pages((unsigned long)priv->lv2entcnt, 1);
777 kfree(domain->priv);
778 domain->priv = NULL;
779}
780
781static int exynos_iommu_attach_device(struct iommu_domain *domain,
782 struct device *dev)
783{
6b21a5db 784 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2a96536e 785 struct exynos_iommu_domain *priv = domain->priv;
7222e8db 786 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
2a96536e
KC
787 unsigned long flags;
788 int ret;
789
2a96536e
KC
790 spin_lock_irqsave(&priv->lock, flags);
791
6b21a5db 792 ret = __exynos_sysmmu_enable(dev, pagetable, domain);
2a96536e 793 if (ret == 0) {
6b21a5db
CK
794 list_add_tail(&owner->client, &priv->clients);
795 owner->domain = domain;
2a96536e
KC
796 }
797
798 spin_unlock_irqrestore(&priv->lock, flags);
799
800 if (ret < 0) {
7222e8db
CK
801 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
802 __func__, &pagetable);
7222e8db 803 return ret;
2a96536e
KC
804 }
805
7222e8db
CK
806 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
807 __func__, &pagetable, (ret == 0) ? "" : ", again");
808
2a96536e
KC
809 return ret;
810}
811
812static void exynos_iommu_detach_device(struct iommu_domain *domain,
813 struct device *dev)
814{
6b21a5db 815 struct exynos_iommu_owner *owner;
2a96536e 816 struct exynos_iommu_domain *priv = domain->priv;
7222e8db 817 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
2a96536e 818 unsigned long flags;
2a96536e
KC
819
820 spin_lock_irqsave(&priv->lock, flags);
821
6b21a5db
CK
822 list_for_each_entry(owner, &priv->clients, client) {
823 if (owner == dev->archdata.iommu) {
824 if (exynos_sysmmu_disable(dev)) {
825 list_del_init(&owner->client);
826 owner->domain = NULL;
827 }
2a96536e
KC
828 break;
829 }
830 }
831
6b21a5db 832 spin_unlock_irqrestore(&priv->lock, flags);
2a96536e 833
6b21a5db 834 if (owner == dev->archdata.iommu)
7222e8db
CK
835 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
836 __func__, &pagetable);
6b21a5db
CK
837 else
838 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
2a96536e
KC
839}
840
66a7ed84
CK
841static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
842 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
2a96536e 843{
61128f08 844 if (lv1ent_section(sent)) {
d09d78fc 845 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
61128f08
CK
846 return ERR_PTR(-EADDRINUSE);
847 }
848
2a96536e 849 if (lv1ent_fault(sent)) {
d09d78fc 850 sysmmu_pte_t *pent;
66a7ed84 851 bool need_flush_flpd_cache = lv1ent_zero(sent);
2a96536e 852
734c3c73 853 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
d09d78fc 854 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
2a96536e 855 if (!pent)
61128f08 856 return ERR_PTR(-ENOMEM);
2a96536e 857
7222e8db 858 *sent = mk_lv1ent_page(virt_to_phys(pent));
2a96536e
KC
859 *pgcounter = NUM_LV2ENTRIES;
860 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
861 pgtable_flush(sent, sent + 1);
66a7ed84
CK
862
863 /*
f171abab
SK
864 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
865 * FLPD cache may cache the address of zero_l2_table. This
866 * function replaces the zero_l2_table with new L2 page table
867 * to write valid mappings.
66a7ed84 868 * Accessing the valid area may cause page fault since FLPD
f171abab
SK
869 * cache may still cache zero_l2_table for the valid area
870 * instead of new L2 page table that has the mapping
871 * information of the valid area.
66a7ed84
CK
872 * Thus any replacement of zero_l2_table with other valid L2
873 * page table must involve FLPD cache invalidation for System
874 * MMU v3.3.
875 * FLPD cache invalidation is performed with TLB invalidation
876 * by VPN without blocking. It is safe to invalidate TLB without
877 * blocking because the target address of TLB invalidation is
878 * not currently mapped.
879 */
880 if (need_flush_flpd_cache) {
881 struct exynos_iommu_owner *owner;
365409db 882
66a7ed84
CK
883 spin_lock(&priv->lock);
884 list_for_each_entry(owner, &priv->clients, client)
885 sysmmu_tlb_invalidate_flpdcache(
886 owner->dev, iova);
887 spin_unlock(&priv->lock);
888 }
2a96536e
KC
889 }
890
891 return page_entry(sent, iova);
892}
893
66a7ed84
CK
894static int lv1set_section(struct exynos_iommu_domain *priv,
895 sysmmu_pte_t *sent, sysmmu_iova_t iova,
61128f08 896 phys_addr_t paddr, short *pgcnt)
2a96536e 897{
61128f08 898 if (lv1ent_section(sent)) {
d09d78fc 899 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 900 iova);
2a96536e 901 return -EADDRINUSE;
61128f08 902 }
2a96536e
KC
903
904 if (lv1ent_page(sent)) {
61128f08 905 if (*pgcnt != NUM_LV2ENTRIES) {
d09d78fc 906 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 907 iova);
2a96536e 908 return -EADDRINUSE;
61128f08 909 }
2a96536e 910
734c3c73 911 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
2a96536e
KC
912 *pgcnt = 0;
913 }
914
915 *sent = mk_lv1ent_sect(paddr);
916
917 pgtable_flush(sent, sent + 1);
918
66a7ed84
CK
919 spin_lock(&priv->lock);
920 if (lv1ent_page_zero(sent)) {
921 struct exynos_iommu_owner *owner;
922 /*
923 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
924 * entry by speculative prefetch of SLPD which has no mapping.
925 */
926 list_for_each_entry(owner, &priv->clients, client)
927 sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
928 }
929 spin_unlock(&priv->lock);
930
2a96536e
KC
931 return 0;
932}
933
d09d78fc 934static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
2a96536e
KC
935 short *pgcnt)
936{
937 if (size == SPAGE_SIZE) {
0bf4e54d 938 if (WARN_ON(!lv2ent_fault(pent)))
2a96536e
KC
939 return -EADDRINUSE;
940
941 *pent = mk_lv2ent_spage(paddr);
942 pgtable_flush(pent, pent + 1);
943 *pgcnt -= 1;
944 } else { /* size == LPAGE_SIZE */
945 int i;
365409db 946
2a96536e 947 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
0bf4e54d 948 if (WARN_ON(!lv2ent_fault(pent))) {
61128f08
CK
949 if (i > 0)
950 memset(pent - i, 0, sizeof(*pent) * i);
2a96536e
KC
951 return -EADDRINUSE;
952 }
953
954 *pent = mk_lv2ent_lpage(paddr);
955 }
956 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
957 *pgcnt -= SPAGES_PER_LPAGE;
958 }
959
960 return 0;
961}
962
66a7ed84
CK
963/*
964 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
965 *
f171abab 966 * System MMU v3.x has advanced logic to improve address translation
66a7ed84 967 * performance with caching more page table entries by a page table walk.
f171abab
SK
968 * However, the logic has a bug that while caching faulty page table entries,
969 * System MMU reports page fault if the cached fault entry is hit even though
970 * the fault entry is updated to a valid entry after the entry is cached.
971 * To prevent caching faulty page table entries which may be updated to valid
972 * entries later, the virtual memory manager should care about the workaround
973 * for the problem. The following describes the workaround.
66a7ed84
CK
974 *
975 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
f171abab 976 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
66a7ed84 977 *
f171abab 978 * Precisely, any start address of I/O virtual region must be aligned with
66a7ed84
CK
979 * the following sizes for System MMU v3.1 and v3.2.
980 * System MMU v3.1: 128KiB
981 * System MMU v3.2: 256KiB
982 *
983 * Because System MMU v3.3 caches page table entries more aggressively, it needs
f171abab
SK
984 * more workarounds.
985 * - Any two consecutive I/O virtual regions must have a hole of size larger
986 * than or equal to 128KiB.
66a7ed84
CK
987 * - Start address of an I/O virtual region must be aligned by 128KiB.
988 */
d09d78fc 989static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
2a96536e
KC
990 phys_addr_t paddr, size_t size, int prot)
991{
992 struct exynos_iommu_domain *priv = domain->priv;
d09d78fc
CK
993 sysmmu_pte_t *entry;
994 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
2a96536e
KC
995 unsigned long flags;
996 int ret = -ENOMEM;
997
998 BUG_ON(priv->pgtable == NULL);
999
1000 spin_lock_irqsave(&priv->pgtablelock, flags);
1001
1002 entry = section_entry(priv->pgtable, iova);
1003
1004 if (size == SECT_SIZE) {
66a7ed84 1005 ret = lv1set_section(priv, entry, iova, paddr,
2a96536e
KC
1006 &priv->lv2entcnt[lv1ent_offset(iova)]);
1007 } else {
d09d78fc 1008 sysmmu_pte_t *pent;
2a96536e 1009
66a7ed84 1010 pent = alloc_lv2entry(priv, entry, iova,
2a96536e
KC
1011 &priv->lv2entcnt[lv1ent_offset(iova)]);
1012
61128f08
CK
1013 if (IS_ERR(pent))
1014 ret = PTR_ERR(pent);
2a96536e
KC
1015 else
1016 ret = lv2set_page(pent, paddr, size,
1017 &priv->lv2entcnt[lv1ent_offset(iova)]);
1018 }
1019
61128f08 1020 if (ret)
0bf4e54d
CK
1021 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1022 __func__, ret, size, iova);
2a96536e
KC
1023
1024 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1025
1026 return ret;
1027}
1028
66a7ed84
CK
1029static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
1030 sysmmu_iova_t iova, size_t size)
1031{
1032 struct exynos_iommu_owner *owner;
1033 unsigned long flags;
1034
1035 spin_lock_irqsave(&priv->lock, flags);
1036
1037 list_for_each_entry(owner, &priv->clients, client)
1038 sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
1039
1040 spin_unlock_irqrestore(&priv->lock, flags);
1041}
1042
2a96536e 1043static size_t exynos_iommu_unmap(struct iommu_domain *domain,
d09d78fc 1044 unsigned long l_iova, size_t size)
2a96536e
KC
1045{
1046 struct exynos_iommu_domain *priv = domain->priv;
d09d78fc
CK
1047 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1048 sysmmu_pte_t *ent;
61128f08 1049 size_t err_pgsize;
d09d78fc 1050 unsigned long flags;
2a96536e
KC
1051
1052 BUG_ON(priv->pgtable == NULL);
1053
1054 spin_lock_irqsave(&priv->pgtablelock, flags);
1055
1056 ent = section_entry(priv->pgtable, iova);
1057
1058 if (lv1ent_section(ent)) {
0bf4e54d 1059 if (WARN_ON(size < SECT_SIZE)) {
61128f08
CK
1060 err_pgsize = SECT_SIZE;
1061 goto err;
1062 }
2a96536e 1063
f171abab
SK
1064 /* workaround for h/w bug in System MMU v3.3 */
1065 *ent = ZERO_LV2LINK;
2a96536e
KC
1066 pgtable_flush(ent, ent + 1);
1067 size = SECT_SIZE;
1068 goto done;
1069 }
1070
1071 if (unlikely(lv1ent_fault(ent))) {
1072 if (size > SECT_SIZE)
1073 size = SECT_SIZE;
1074 goto done;
1075 }
1076
1077 /* lv1ent_page(sent) == true here */
1078
1079 ent = page_entry(ent, iova);
1080
1081 if (unlikely(lv2ent_fault(ent))) {
1082 size = SPAGE_SIZE;
1083 goto done;
1084 }
1085
1086 if (lv2ent_small(ent)) {
1087 *ent = 0;
1088 size = SPAGE_SIZE;
6cb47ed7 1089 pgtable_flush(ent, ent + 1);
2a96536e
KC
1090 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1091 goto done;
1092 }
1093
1094 /* lv1ent_large(ent) == true here */
0bf4e54d 1095 if (WARN_ON(size < LPAGE_SIZE)) {
61128f08
CK
1096 err_pgsize = LPAGE_SIZE;
1097 goto err;
1098 }
2a96536e
KC
1099
1100 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
6cb47ed7 1101 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
2a96536e
KC
1102
1103 size = LPAGE_SIZE;
1104 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1105done:
1106 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1107
66a7ed84 1108 exynos_iommu_tlb_invalidate_entry(priv, iova, size);
2a96536e 1109
2a96536e 1110 return size;
61128f08
CK
1111err:
1112 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1113
0bf4e54d
CK
1114 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1115 __func__, size, iova, err_pgsize);
61128f08
CK
1116
1117 return 0;
2a96536e
KC
1118}
1119
1120static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
bb5547ac 1121 dma_addr_t iova)
2a96536e
KC
1122{
1123 struct exynos_iommu_domain *priv = domain->priv;
d09d78fc 1124 sysmmu_pte_t *entry;
2a96536e
KC
1125 unsigned long flags;
1126 phys_addr_t phys = 0;
1127
1128 spin_lock_irqsave(&priv->pgtablelock, flags);
1129
1130 entry = section_entry(priv->pgtable, iova);
1131
1132 if (lv1ent_section(entry)) {
1133 phys = section_phys(entry) + section_offs(iova);
1134 } else if (lv1ent_page(entry)) {
1135 entry = page_entry(entry, iova);
1136
1137 if (lv2ent_large(entry))
1138 phys = lpage_phys(entry) + lpage_offs(iova);
1139 else if (lv2ent_small(entry))
1140 phys = spage_phys(entry) + spage_offs(iova);
1141 }
1142
1143 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1144
1145 return phys;
1146}
1147
bf4a1c92
AM
1148static int exynos_iommu_add_device(struct device *dev)
1149{
1150 struct iommu_group *group;
1151 int ret;
1152
1153 group = iommu_group_get(dev);
1154
1155 if (!group) {
1156 group = iommu_group_alloc();
1157 if (IS_ERR(group)) {
1158 dev_err(dev, "Failed to allocate IOMMU group\n");
1159 return PTR_ERR(group);
1160 }
1161 }
1162
1163 ret = iommu_group_add_device(group, dev);
1164 iommu_group_put(group);
1165
1166 return ret;
1167}
1168
1169static void exynos_iommu_remove_device(struct device *dev)
1170{
1171 iommu_group_remove_device(dev);
1172}
1173
b22f6434 1174static const struct iommu_ops exynos_iommu_ops = {
ba5fa6f6
BH
1175 .domain_init = exynos_iommu_domain_init,
1176 .domain_destroy = exynos_iommu_domain_destroy,
1177 .attach_dev = exynos_iommu_attach_device,
1178 .detach_dev = exynos_iommu_detach_device,
1179 .map = exynos_iommu_map,
1180 .unmap = exynos_iommu_unmap,
1181 .iova_to_phys = exynos_iommu_iova_to_phys,
1182 .add_device = exynos_iommu_add_device,
1183 .remove_device = exynos_iommu_remove_device,
2a96536e
KC
1184 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1185};
1186
1187static int __init exynos_iommu_init(void)
1188{
1189 int ret;
1190
734c3c73
CK
1191 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1192 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1193 if (!lv2table_kmem_cache) {
1194 pr_err("%s: Failed to create kmem cache\n", __func__);
1195 return -ENOMEM;
1196 }
1197
2a96536e 1198 ret = platform_driver_register(&exynos_sysmmu_driver);
734c3c73
CK
1199 if (ret) {
1200 pr_err("%s: Failed to register driver\n", __func__);
1201 goto err_reg_driver;
1202 }
2a96536e 1203
66a7ed84
CK
1204 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1205 if (zero_lv2_table == NULL) {
1206 pr_err("%s: Failed to allocate zero level2 page table\n",
1207 __func__);
1208 ret = -ENOMEM;
1209 goto err_zero_lv2;
1210 }
1211
734c3c73
CK
1212 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1213 if (ret) {
1214 pr_err("%s: Failed to register exynos-iommu driver.\n",
1215 __func__);
1216 goto err_set_iommu;
1217 }
2a96536e 1218
734c3c73
CK
1219 return 0;
1220err_set_iommu:
66a7ed84
CK
1221 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1222err_zero_lv2:
734c3c73
CK
1223 platform_driver_unregister(&exynos_sysmmu_driver);
1224err_reg_driver:
1225 kmem_cache_destroy(lv2table_kmem_cache);
2a96536e
KC
1226 return ret;
1227}
1228subsys_initcall(exynos_iommu_init);
This page took 0.291275 seconds and 5 git commands to generate.