Merge tag 'drm-amdkfd-next-fixes-2015-06-10' of git://people.freedesktop.org/~gabbayo...
[deliverable/linux.git] / drivers / iommu / exynos-iommu.c
CommitLineData
2a96536e
KC
1/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
d09d78fc
CK
32typedef u32 sysmmu_iova_t;
33typedef u32 sysmmu_pte_t;
34
f171abab 35/* We do not consider super section mapping (16MB) */
2a96536e
KC
36#define SECT_ORDER 20
37#define LPAGE_ORDER 16
38#define SPAGE_ORDER 12
39
40#define SECT_SIZE (1 << SECT_ORDER)
41#define LPAGE_SIZE (1 << LPAGE_ORDER)
42#define SPAGE_SIZE (1 << SPAGE_ORDER)
43
44#define SECT_MASK (~(SECT_SIZE - 1))
45#define LPAGE_MASK (~(LPAGE_SIZE - 1))
46#define SPAGE_MASK (~(SPAGE_SIZE - 1))
47
66a7ed84
CK
48#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
53 ((*(sent) & 3) == 1))
2a96536e
KC
54#define lv1ent_section(sent) ((*(sent) & 3) == 2)
55
56#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57#define lv2ent_small(pent) ((*(pent) & 2) == 2)
58#define lv2ent_large(pent) ((*(pent) & 3) == 1)
59
d09d78fc
CK
60static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
61{
62 return iova & (size - 1);
63}
64
2a96536e 65#define section_phys(sent) (*(sent) & SECT_MASK)
d09d78fc 66#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
2a96536e 67#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
d09d78fc 68#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
2a96536e 69#define spage_phys(pent) (*(pent) & SPAGE_MASK)
d09d78fc 70#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
2a96536e
KC
71
72#define NUM_LV1ENTRIES 4096
d09d78fc 73#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2a96536e 74
d09d78fc
CK
75static u32 lv1ent_offset(sysmmu_iova_t iova)
76{
77 return iova >> SECT_ORDER;
78}
79
80static u32 lv2ent_offset(sysmmu_iova_t iova)
81{
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
83}
84
85#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2a96536e
KC
86
87#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
88
89#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
90
91#define mk_lv1ent_sect(pa) ((pa) | 2)
92#define mk_lv1ent_page(pa) ((pa) | 1)
93#define mk_lv2ent_lpage(pa) ((pa) | 1)
94#define mk_lv2ent_spage(pa) ((pa) | 2)
95
96#define CTRL_ENABLE 0x5
97#define CTRL_BLOCK 0x7
98#define CTRL_DISABLE 0x0
99
eeb5184b
CK
100#define CFG_LRU 0x1
101#define CFG_QOS(n) ((n & 0xF) << 7)
102#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
104#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
105#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
106
2a96536e
KC
107#define REG_MMU_CTRL 0x000
108#define REG_MMU_CFG 0x004
109#define REG_MMU_STATUS 0x008
110#define REG_MMU_FLUSH 0x00C
111#define REG_MMU_FLUSH_ENTRY 0x010
112#define REG_PT_BASE_ADDR 0x014
113#define REG_INT_STATUS 0x018
114#define REG_INT_CLEAR 0x01C
115
116#define REG_PAGE_FAULT_ADDR 0x024
117#define REG_AW_FAULT_ADDR 0x028
118#define REG_AR_FAULT_ADDR 0x02C
119#define REG_DEFAULT_SLAVE_ADDR 0x030
120
121#define REG_MMU_VERSION 0x034
122
eeb5184b
CK
123#define MMU_MAJ_VER(val) ((val) >> 7)
124#define MMU_MIN_VER(val) ((val) & 0x7F)
125#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
126
127#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
128
2a96536e
KC
129#define REG_PB0_SADDR 0x04C
130#define REG_PB0_EADDR 0x050
131#define REG_PB1_SADDR 0x054
132#define REG_PB1_EADDR 0x058
133
6b21a5db
CK
134#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
135
734c3c73 136static struct kmem_cache *lv2table_kmem_cache;
66a7ed84
CK
137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
734c3c73 139
d09d78fc 140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
2a96536e
KC
141{
142 return pgtable + lv1ent_offset(iova);
143}
144
d09d78fc 145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
2a96536e 146{
d09d78fc 147 return (sysmmu_pte_t *)phys_to_virt(
7222e8db 148 lv2table_base(sent)) + lv2ent_offset(iova);
2a96536e
KC
149}
150
151enum exynos_sysmmu_inttype {
152 SYSMMU_PAGEFAULT,
153 SYSMMU_AR_MULTIHIT,
154 SYSMMU_AW_MULTIHIT,
155 SYSMMU_BUSERROR,
156 SYSMMU_AR_SECURITY,
157 SYSMMU_AR_ACCESS,
158 SYSMMU_AW_SECURITY,
159 SYSMMU_AW_PROTECTION, /* 7 */
160 SYSMMU_FAULT_UNKNOWN,
161 SYSMMU_FAULTS_NUM
162};
163
2a96536e
KC
164static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 REG_PAGE_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_DEFAULT_SLAVE_ADDR,
169 REG_AR_FAULT_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AW_FAULT_ADDR,
172 REG_AW_FAULT_ADDR
173};
174
175static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 "PAGE FAULT",
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
179 "BUS ERROR",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
184 "UNKNOWN FAULT"
185};
186
6b21a5db
CK
187/* attached to dev.archdata.iommu of the master device */
188struct exynos_iommu_owner {
189 struct list_head client; /* entry of exynos_iommu_domain.clients */
190 struct device *dev;
191 struct device *sysmmu;
192 struct iommu_domain *domain;
193 void *vmm_data; /* IO virtual memory manager's data */
194 spinlock_t lock; /* Lock to preserve consistency of System MMU */
195};
196
2a96536e
KC
197struct exynos_iommu_domain {
198 struct list_head clients; /* list of sysmmu_drvdata.node */
d09d78fc 199 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
2a96536e
KC
200 short *lv2entcnt; /* free lv2 entry counter for each section */
201 spinlock_t lock; /* lock for this structure */
202 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
e1fd1eaa 203 struct iommu_domain domain; /* generic domain data structure */
2a96536e
KC
204};
205
206struct sysmmu_drvdata {
2a96536e 207 struct device *sysmmu; /* System MMU's device descriptor */
6b21a5db 208 struct device *master; /* Owner of system MMU */
7222e8db
CK
209 void __iomem *sfrbase;
210 struct clk *clk;
70605870 211 struct clk *clk_master;
2a96536e 212 int activations;
9d4e7a24 213 spinlock_t lock;
2a96536e 214 struct iommu_domain *domain;
7222e8db 215 phys_addr_t pgtable;
2a96536e
KC
216};
217
e1fd1eaa
JR
218static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
219{
220 return container_of(dom, struct exynos_iommu_domain, domain);
221}
222
2a96536e
KC
223static bool set_sysmmu_active(struct sysmmu_drvdata *data)
224{
225 /* return true if the System MMU was not active previously
226 and it needs to be initialized */
227 return ++data->activations == 1;
228}
229
230static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
231{
232 /* return true if the System MMU is needed to be disabled */
233 BUG_ON(data->activations < 1);
234 return --data->activations == 0;
235}
236
237static bool is_sysmmu_active(struct sysmmu_drvdata *data)
238{
239 return data->activations > 0;
240}
241
242static void sysmmu_unblock(void __iomem *sfrbase)
243{
244 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
245}
246
eeb5184b
CK
247static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
248{
249 return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
250}
251
2a96536e
KC
252static bool sysmmu_block(void __iomem *sfrbase)
253{
254 int i = 120;
255
256 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
257 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
258 --i;
259
260 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
261 sysmmu_unblock(sfrbase);
262 return false;
263 }
264
265 return true;
266}
267
268static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
269{
270 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
271}
272
273static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
d09d78fc 274 sysmmu_iova_t iova, unsigned int num_inv)
2a96536e 275{
3ad6b7f3 276 unsigned int i;
365409db 277
3ad6b7f3
CK
278 for (i = 0; i < num_inv; i++) {
279 __raw_writel((iova & SPAGE_MASK) | 1,
280 sfrbase + REG_MMU_FLUSH_ENTRY);
281 iova += SPAGE_SIZE;
282 }
2a96536e
KC
283}
284
285static void __sysmmu_set_ptbase(void __iomem *sfrbase,
d09d78fc 286 phys_addr_t pgd)
2a96536e 287{
2a96536e
KC
288 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
289
290 __sysmmu_tlb_invalidate(sfrbase);
291}
292
1fab7fa7
CK
293static void show_fault_information(const char *name,
294 enum exynos_sysmmu_inttype itype,
d09d78fc 295 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
2a96536e 296{
d09d78fc 297 sysmmu_pte_t *ent;
2a96536e
KC
298
299 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
300 itype = SYSMMU_FAULT_UNKNOWN;
301
d09d78fc 302 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
1fab7fa7 303 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
2a96536e 304
7222e8db 305 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
d09d78fc 306 pr_err("\tLv1 entry: %#x\n", *ent);
2a96536e
KC
307
308 if (lv1ent_page(ent)) {
309 ent = page_entry(ent, fault_addr);
d09d78fc 310 pr_err("\t Lv2 entry: %#x\n", *ent);
2a96536e 311 }
2a96536e
KC
312}
313
314static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
315{
f171abab 316 /* SYSMMU is in blocked state when interrupt occurred. */
2a96536e 317 struct sysmmu_drvdata *data = dev_id;
2a96536e 318 enum exynos_sysmmu_inttype itype;
d09d78fc 319 sysmmu_iova_t addr = -1;
7222e8db 320 int ret = -ENOSYS;
2a96536e 321
2a96536e
KC
322 WARN_ON(!is_sysmmu_active(data));
323
9d4e7a24
CK
324 spin_lock(&data->lock);
325
70605870
CK
326 if (!IS_ERR(data->clk_master))
327 clk_enable(data->clk_master);
9d4e7a24 328
7222e8db
CK
329 itype = (enum exynos_sysmmu_inttype)
330 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
331 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
2a96536e 332 itype = SYSMMU_FAULT_UNKNOWN;
7222e8db
CK
333 else
334 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
2a96536e 335
1fab7fa7
CK
336 if (itype == SYSMMU_FAULT_UNKNOWN) {
337 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
338 __func__, dev_name(data->sysmmu));
339 pr_err("%s: Please check if IRQ is correctly configured.\n",
340 __func__);
341 BUG();
342 } else {
d09d78fc 343 unsigned int base =
1fab7fa7
CK
344 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
345 show_fault_information(dev_name(data->sysmmu),
346 itype, base, addr);
347 if (data->domain)
348 ret = report_iommu_fault(data->domain,
6b21a5db 349 data->master, addr, itype);
2a96536e
KC
350 }
351
1fab7fa7
CK
352 /* fault is not recovered by fault handler */
353 BUG_ON(ret != 0);
2a96536e 354
1fab7fa7
CK
355 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
356
357 sysmmu_unblock(data->sfrbase);
2a96536e 358
70605870
CK
359 if (!IS_ERR(data->clk_master))
360 clk_disable(data->clk_master);
361
9d4e7a24 362 spin_unlock(&data->lock);
2a96536e
KC
363
364 return IRQ_HANDLED;
365}
366
6b21a5db 367static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
2a96536e 368{
70605870
CK
369 if (!IS_ERR(data->clk_master))
370 clk_enable(data->clk_master);
371
7222e8db 372 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
6b21a5db 373 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
2a96536e 374
46c16d1e 375 clk_disable(data->clk);
70605870
CK
376 if (!IS_ERR(data->clk_master))
377 clk_disable(data->clk_master);
2a96536e
KC
378}
379
6b21a5db 380static bool __sysmmu_disable(struct sysmmu_drvdata *data)
2a96536e 381{
6b21a5db 382 bool disabled;
2a96536e
KC
383 unsigned long flags;
384
9d4e7a24 385 spin_lock_irqsave(&data->lock, flags);
2a96536e 386
6b21a5db
CK
387 disabled = set_sysmmu_inactive(data);
388
389 if (disabled) {
390 data->pgtable = 0;
391 data->domain = NULL;
392
393 __sysmmu_disable_nocount(data);
2a96536e 394
6b21a5db
CK
395 dev_dbg(data->sysmmu, "Disabled\n");
396 } else {
397 dev_dbg(data->sysmmu, "%d times left to disable\n",
398 data->activations);
2a96536e
KC
399 }
400
6b21a5db
CK
401 spin_unlock_irqrestore(&data->lock, flags);
402
403 return disabled;
404}
2a96536e 405
6b21a5db
CK
406static void __sysmmu_init_config(struct sysmmu_drvdata *data)
407{
eeb5184b
CK
408 unsigned int cfg = CFG_LRU | CFG_QOS(15);
409 unsigned int ver;
410
411 ver = __raw_sysmmu_version(data);
412 if (MMU_MAJ_VER(ver) == 3) {
413 if (MMU_MIN_VER(ver) >= 2) {
414 cfg |= CFG_FLPDCACHE;
415 if (MMU_MIN_VER(ver) == 3) {
416 cfg |= CFG_ACGEN;
417 cfg &= ~CFG_LRU;
418 } else {
419 cfg |= CFG_SYSSEL;
420 }
421 }
422 }
6b21a5db
CK
423
424 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
425}
426
427static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
428{
70605870
CK
429 if (!IS_ERR(data->clk_master))
430 clk_enable(data->clk_master);
431 clk_enable(data->clk);
432
6b21a5db
CK
433 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
434
435 __sysmmu_init_config(data);
436
437 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
2a96536e 438
7222e8db
CK
439 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
440
70605870
CK
441 if (!IS_ERR(data->clk_master))
442 clk_disable(data->clk_master);
6b21a5db 443}
70605870 444
6b21a5db
CK
445static int __sysmmu_enable(struct sysmmu_drvdata *data,
446 phys_addr_t pgtable, struct iommu_domain *domain)
447{
448 int ret = 0;
449 unsigned long flags;
450
451 spin_lock_irqsave(&data->lock, flags);
452 if (set_sysmmu_active(data)) {
453 data->pgtable = pgtable;
454 data->domain = domain;
455
456 __sysmmu_enable_nocount(data);
457
458 dev_dbg(data->sysmmu, "Enabled\n");
459 } else {
460 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
461
462 dev_dbg(data->sysmmu, "already enabled\n");
463 }
464
465 if (WARN_ON(ret < 0))
466 set_sysmmu_inactive(data); /* decrement count */
2a96536e 467
9d4e7a24 468 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
469
470 return ret;
471}
472
6b21a5db
CK
473/* __exynos_sysmmu_enable: Enables System MMU
474 *
475 * returns -error if an error occurred and System MMU is not enabled,
476 * 0 if the System MMU has been just enabled and 1 if System MMU was already
477 * enabled before.
478 */
479static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
480 struct iommu_domain *domain)
2a96536e 481{
6b21a5db
CK
482 int ret = 0;
483 unsigned long flags;
484 struct exynos_iommu_owner *owner = dev->archdata.iommu;
485 struct sysmmu_drvdata *data;
2a96536e 486
6b21a5db 487 BUG_ON(!has_sysmmu(dev));
2a96536e 488
6b21a5db 489 spin_lock_irqsave(&owner->lock, flags);
2a96536e 490
6b21a5db
CK
491 data = dev_get_drvdata(owner->sysmmu);
492
493 ret = __sysmmu_enable(data, pgtable, domain);
494 if (ret >= 0)
495 data->master = dev;
496
497 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
498
499 return ret;
500}
501
6b21a5db
CK
502int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
503{
504 BUG_ON(!memblock_is_memory(pgtable));
505
506 return __exynos_sysmmu_enable(dev, pgtable, NULL);
507}
508
77e38350 509static bool exynos_sysmmu_disable(struct device *dev)
2a96536e 510{
6b21a5db
CK
511 unsigned long flags;
512 bool disabled = true;
513 struct exynos_iommu_owner *owner = dev->archdata.iommu;
514 struct sysmmu_drvdata *data;
515
516 BUG_ON(!has_sysmmu(dev));
2a96536e 517
6b21a5db
CK
518 spin_lock_irqsave(&owner->lock, flags);
519
520 data = dev_get_drvdata(owner->sysmmu);
521
522 disabled = __sysmmu_disable(data);
523 if (disabled)
524 data->master = NULL;
525
526 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
527
528 return disabled;
529}
530
66a7ed84
CK
531static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
532 sysmmu_iova_t iova)
533{
534 if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
535 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
536}
537
538static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
539 sysmmu_iova_t iova)
540{
541 unsigned long flags;
542 struct exynos_iommu_owner *owner = dev->archdata.iommu;
543 struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
544
545 if (!IS_ERR(data->clk_master))
546 clk_enable(data->clk_master);
547
548 spin_lock_irqsave(&data->lock, flags);
549 if (is_sysmmu_active(data))
550 __sysmmu_tlb_invalidate_flpdcache(data, iova);
551 spin_unlock_irqrestore(&data->lock, flags);
552
553 if (!IS_ERR(data->clk_master))
554 clk_disable(data->clk_master);
555}
556
d09d78fc 557static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
3ad6b7f3 558 size_t size)
2a96536e 559{
6b21a5db 560 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2a96536e 561 unsigned long flags;
6b21a5db 562 struct sysmmu_drvdata *data;
2a96536e 563
6b21a5db 564 data = dev_get_drvdata(owner->sysmmu);
2a96536e 565
6b21a5db 566 spin_lock_irqsave(&data->lock, flags);
2a96536e 567 if (is_sysmmu_active(data)) {
3ad6b7f3 568 unsigned int num_inv = 1;
70605870
CK
569
570 if (!IS_ERR(data->clk_master))
571 clk_enable(data->clk_master);
572
3ad6b7f3
CK
573 /*
574 * L2TLB invalidation required
575 * 4KB page: 1 invalidation
f171abab
SK
576 * 64KB page: 16 invalidations
577 * 1MB page: 64 invalidations
3ad6b7f3
CK
578 * because it is set-associative TLB
579 * with 8-way and 64 sets.
580 * 1MB page can be cached in one of all sets.
581 * 64KB page can be one of 16 consecutive sets.
582 */
eeb5184b 583 if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
3ad6b7f3
CK
584 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
585
7222e8db
CK
586 if (sysmmu_block(data->sfrbase)) {
587 __sysmmu_tlb_invalidate_entry(
3ad6b7f3 588 data->sfrbase, iova, num_inv);
7222e8db 589 sysmmu_unblock(data->sfrbase);
2a96536e 590 }
70605870
CK
591 if (!IS_ERR(data->clk_master))
592 clk_disable(data->clk_master);
2a96536e 593 } else {
6b21a5db
CK
594 dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
595 iova);
2a96536e 596 }
9d4e7a24 597 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
598}
599
600void exynos_sysmmu_tlb_invalidate(struct device *dev)
601{
6b21a5db 602 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2a96536e 603 unsigned long flags;
6b21a5db 604 struct sysmmu_drvdata *data;
2a96536e 605
6b21a5db 606 data = dev_get_drvdata(owner->sysmmu);
2a96536e 607
6b21a5db 608 spin_lock_irqsave(&data->lock, flags);
2a96536e 609 if (is_sysmmu_active(data)) {
70605870
CK
610 if (!IS_ERR(data->clk_master))
611 clk_enable(data->clk_master);
7222e8db
CK
612 if (sysmmu_block(data->sfrbase)) {
613 __sysmmu_tlb_invalidate(data->sfrbase);
614 sysmmu_unblock(data->sfrbase);
2a96536e 615 }
70605870
CK
616 if (!IS_ERR(data->clk_master))
617 clk_disable(data->clk_master);
2a96536e 618 } else {
6b21a5db 619 dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
2a96536e 620 }
9d4e7a24 621 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
622}
623
6b21a5db 624static int __init exynos_sysmmu_probe(struct platform_device *pdev)
2a96536e 625{
46c16d1e 626 int irq, ret;
7222e8db 627 struct device *dev = &pdev->dev;
2a96536e 628 struct sysmmu_drvdata *data;
7222e8db 629 struct resource *res;
2a96536e 630
46c16d1e
CK
631 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
632 if (!data)
633 return -ENOMEM;
2a96536e 634
7222e8db 635 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46c16d1e
CK
636 data->sfrbase = devm_ioremap_resource(dev, res);
637 if (IS_ERR(data->sfrbase))
638 return PTR_ERR(data->sfrbase);
2a96536e 639
46c16d1e
CK
640 irq = platform_get_irq(pdev, 0);
641 if (irq <= 0) {
0bf4e54d 642 dev_err(dev, "Unable to find IRQ resource\n");
46c16d1e 643 return irq;
2a96536e
KC
644 }
645
46c16d1e 646 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
7222e8db
CK
647 dev_name(dev), data);
648 if (ret) {
46c16d1e
CK
649 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
650 return ret;
2a96536e
KC
651 }
652
46c16d1e
CK
653 data->clk = devm_clk_get(dev, "sysmmu");
654 if (IS_ERR(data->clk)) {
655 dev_err(dev, "Failed to get clock!\n");
656 return PTR_ERR(data->clk);
657 } else {
658 ret = clk_prepare(data->clk);
659 if (ret) {
660 dev_err(dev, "Failed to prepare clk\n");
661 return ret;
662 }
2a96536e
KC
663 }
664
70605870
CK
665 data->clk_master = devm_clk_get(dev, "master");
666 if (!IS_ERR(data->clk_master)) {
667 ret = clk_prepare(data->clk_master);
668 if (ret) {
669 clk_unprepare(data->clk);
670 dev_err(dev, "Failed to prepare master's clk\n");
671 return ret;
672 }
673 }
674
2a96536e 675 data->sysmmu = dev;
9d4e7a24 676 spin_lock_init(&data->lock);
2a96536e 677
7222e8db
CK
678 platform_set_drvdata(pdev, data);
679
f4723ec1 680 pm_runtime_enable(dev);
2a96536e 681
2a96536e 682 return 0;
2a96536e
KC
683}
684
6b21a5db
CK
685static const struct of_device_id sysmmu_of_match[] __initconst = {
686 { .compatible = "samsung,exynos-sysmmu", },
687 { },
688};
689
690static struct platform_driver exynos_sysmmu_driver __refdata = {
691 .probe = exynos_sysmmu_probe,
692 .driver = {
2a96536e 693 .name = "exynos-sysmmu",
6b21a5db 694 .of_match_table = sysmmu_of_match,
2a96536e
KC
695 }
696};
697
698static inline void pgtable_flush(void *vastart, void *vaend)
699{
700 dmac_flush_range(vastart, vaend);
701 outer_flush_range(virt_to_phys(vastart),
702 virt_to_phys(vaend));
703}
704
e1fd1eaa 705static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
2a96536e 706{
e1fd1eaa 707 struct exynos_iommu_domain *exynos_domain;
66a7ed84 708 int i;
2a96536e 709
e1fd1eaa
JR
710 if (type != IOMMU_DOMAIN_UNMANAGED)
711 return NULL;
712
713 exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
714 if (!exynos_domain)
715 return NULL;
2a96536e 716
e1fd1eaa
JR
717 exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
718 if (!exynos_domain->pgtable)
2a96536e
KC
719 goto err_pgtable;
720
e1fd1eaa
JR
721 exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
722 if (!exynos_domain->lv2entcnt)
2a96536e
KC
723 goto err_counter;
724
f171abab 725 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
66a7ed84 726 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
e1fd1eaa
JR
727 exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
728 exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
729 exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
730 exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
731 exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
732 exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
733 exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
734 exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
66a7ed84
CK
735 }
736
e1fd1eaa 737 pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
2a96536e 738
e1fd1eaa
JR
739 spin_lock_init(&exynos_domain->lock);
740 spin_lock_init(&exynos_domain->pgtablelock);
741 INIT_LIST_HEAD(&exynos_domain->clients);
2a96536e 742
e1fd1eaa
JR
743 exynos_domain->domain.geometry.aperture_start = 0;
744 exynos_domain->domain.geometry.aperture_end = ~0UL;
745 exynos_domain->domain.geometry.force_aperture = true;
3177bb76 746
e1fd1eaa 747 return &exynos_domain->domain;
2a96536e
KC
748
749err_counter:
e1fd1eaa 750 free_pages((unsigned long)exynos_domain->pgtable, 2);
2a96536e 751err_pgtable:
e1fd1eaa
JR
752 kfree(exynos_domain);
753 return NULL;
2a96536e
KC
754}
755
e1fd1eaa 756static void exynos_iommu_domain_free(struct iommu_domain *domain)
2a96536e 757{
e1fd1eaa 758 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
6b21a5db 759 struct exynos_iommu_owner *owner;
2a96536e
KC
760 unsigned long flags;
761 int i;
762
763 WARN_ON(!list_empty(&priv->clients));
764
765 spin_lock_irqsave(&priv->lock, flags);
766
6b21a5db
CK
767 list_for_each_entry(owner, &priv->clients, client) {
768 while (!exynos_sysmmu_disable(owner->dev))
2a96536e
KC
769 ; /* until System MMU is actually disabled */
770 }
771
6b21a5db
CK
772 while (!list_empty(&priv->clients))
773 list_del_init(priv->clients.next);
774
2a96536e
KC
775 spin_unlock_irqrestore(&priv->lock, flags);
776
777 for (i = 0; i < NUM_LV1ENTRIES; i++)
778 if (lv1ent_page(priv->pgtable + i))
734c3c73
CK
779 kmem_cache_free(lv2table_kmem_cache,
780 phys_to_virt(lv2table_base(priv->pgtable + i)));
2a96536e
KC
781
782 free_pages((unsigned long)priv->pgtable, 2);
783 free_pages((unsigned long)priv->lv2entcnt, 1);
e1fd1eaa 784 kfree(priv);
2a96536e
KC
785}
786
787static int exynos_iommu_attach_device(struct iommu_domain *domain,
788 struct device *dev)
789{
6b21a5db 790 struct exynos_iommu_owner *owner = dev->archdata.iommu;
e1fd1eaa 791 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
7222e8db 792 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
2a96536e
KC
793 unsigned long flags;
794 int ret;
795
2a96536e
KC
796 spin_lock_irqsave(&priv->lock, flags);
797
6b21a5db 798 ret = __exynos_sysmmu_enable(dev, pagetable, domain);
2a96536e 799 if (ret == 0) {
6b21a5db
CK
800 list_add_tail(&owner->client, &priv->clients);
801 owner->domain = domain;
2a96536e
KC
802 }
803
804 spin_unlock_irqrestore(&priv->lock, flags);
805
806 if (ret < 0) {
7222e8db
CK
807 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
808 __func__, &pagetable);
7222e8db 809 return ret;
2a96536e
KC
810 }
811
7222e8db
CK
812 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
813 __func__, &pagetable, (ret == 0) ? "" : ", again");
814
2a96536e
KC
815 return ret;
816}
817
818static void exynos_iommu_detach_device(struct iommu_domain *domain,
819 struct device *dev)
820{
6b21a5db 821 struct exynos_iommu_owner *owner;
e1fd1eaa 822 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
7222e8db 823 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
2a96536e 824 unsigned long flags;
2a96536e
KC
825
826 spin_lock_irqsave(&priv->lock, flags);
827
6b21a5db
CK
828 list_for_each_entry(owner, &priv->clients, client) {
829 if (owner == dev->archdata.iommu) {
830 if (exynos_sysmmu_disable(dev)) {
831 list_del_init(&owner->client);
832 owner->domain = NULL;
833 }
2a96536e
KC
834 break;
835 }
836 }
837
6b21a5db 838 spin_unlock_irqrestore(&priv->lock, flags);
2a96536e 839
6b21a5db 840 if (owner == dev->archdata.iommu)
7222e8db
CK
841 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
842 __func__, &pagetable);
6b21a5db
CK
843 else
844 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
2a96536e
KC
845}
846
66a7ed84
CK
847static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
848 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
2a96536e 849{
61128f08 850 if (lv1ent_section(sent)) {
d09d78fc 851 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
61128f08
CK
852 return ERR_PTR(-EADDRINUSE);
853 }
854
2a96536e 855 if (lv1ent_fault(sent)) {
d09d78fc 856 sysmmu_pte_t *pent;
66a7ed84 857 bool need_flush_flpd_cache = lv1ent_zero(sent);
2a96536e 858
734c3c73 859 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
d09d78fc 860 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
2a96536e 861 if (!pent)
61128f08 862 return ERR_PTR(-ENOMEM);
2a96536e 863
7222e8db 864 *sent = mk_lv1ent_page(virt_to_phys(pent));
2a96536e
KC
865 *pgcounter = NUM_LV2ENTRIES;
866 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
867 pgtable_flush(sent, sent + 1);
66a7ed84
CK
868
869 /*
f171abab
SK
870 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
871 * FLPD cache may cache the address of zero_l2_table. This
872 * function replaces the zero_l2_table with new L2 page table
873 * to write valid mappings.
66a7ed84 874 * Accessing the valid area may cause page fault since FLPD
f171abab
SK
875 * cache may still cache zero_l2_table for the valid area
876 * instead of new L2 page table that has the mapping
877 * information of the valid area.
66a7ed84
CK
878 * Thus any replacement of zero_l2_table with other valid L2
879 * page table must involve FLPD cache invalidation for System
880 * MMU v3.3.
881 * FLPD cache invalidation is performed with TLB invalidation
882 * by VPN without blocking. It is safe to invalidate TLB without
883 * blocking because the target address of TLB invalidation is
884 * not currently mapped.
885 */
886 if (need_flush_flpd_cache) {
887 struct exynos_iommu_owner *owner;
365409db 888
66a7ed84
CK
889 spin_lock(&priv->lock);
890 list_for_each_entry(owner, &priv->clients, client)
891 sysmmu_tlb_invalidate_flpdcache(
892 owner->dev, iova);
893 spin_unlock(&priv->lock);
894 }
2a96536e
KC
895 }
896
897 return page_entry(sent, iova);
898}
899
66a7ed84
CK
900static int lv1set_section(struct exynos_iommu_domain *priv,
901 sysmmu_pte_t *sent, sysmmu_iova_t iova,
61128f08 902 phys_addr_t paddr, short *pgcnt)
2a96536e 903{
61128f08 904 if (lv1ent_section(sent)) {
d09d78fc 905 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 906 iova);
2a96536e 907 return -EADDRINUSE;
61128f08 908 }
2a96536e
KC
909
910 if (lv1ent_page(sent)) {
61128f08 911 if (*pgcnt != NUM_LV2ENTRIES) {
d09d78fc 912 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 913 iova);
2a96536e 914 return -EADDRINUSE;
61128f08 915 }
2a96536e 916
734c3c73 917 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
2a96536e
KC
918 *pgcnt = 0;
919 }
920
921 *sent = mk_lv1ent_sect(paddr);
922
923 pgtable_flush(sent, sent + 1);
924
66a7ed84
CK
925 spin_lock(&priv->lock);
926 if (lv1ent_page_zero(sent)) {
927 struct exynos_iommu_owner *owner;
928 /*
929 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
930 * entry by speculative prefetch of SLPD which has no mapping.
931 */
932 list_for_each_entry(owner, &priv->clients, client)
933 sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
934 }
935 spin_unlock(&priv->lock);
936
2a96536e
KC
937 return 0;
938}
939
d09d78fc 940static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
2a96536e
KC
941 short *pgcnt)
942{
943 if (size == SPAGE_SIZE) {
0bf4e54d 944 if (WARN_ON(!lv2ent_fault(pent)))
2a96536e
KC
945 return -EADDRINUSE;
946
947 *pent = mk_lv2ent_spage(paddr);
948 pgtable_flush(pent, pent + 1);
949 *pgcnt -= 1;
950 } else { /* size == LPAGE_SIZE */
951 int i;
365409db 952
2a96536e 953 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
0bf4e54d 954 if (WARN_ON(!lv2ent_fault(pent))) {
61128f08
CK
955 if (i > 0)
956 memset(pent - i, 0, sizeof(*pent) * i);
2a96536e
KC
957 return -EADDRINUSE;
958 }
959
960 *pent = mk_lv2ent_lpage(paddr);
961 }
962 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
963 *pgcnt -= SPAGES_PER_LPAGE;
964 }
965
966 return 0;
967}
968
66a7ed84
CK
969/*
970 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
971 *
f171abab 972 * System MMU v3.x has advanced logic to improve address translation
66a7ed84 973 * performance with caching more page table entries by a page table walk.
f171abab
SK
974 * However, the logic has a bug that while caching faulty page table entries,
975 * System MMU reports page fault if the cached fault entry is hit even though
976 * the fault entry is updated to a valid entry after the entry is cached.
977 * To prevent caching faulty page table entries which may be updated to valid
978 * entries later, the virtual memory manager should care about the workaround
979 * for the problem. The following describes the workaround.
66a7ed84
CK
980 *
981 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
f171abab 982 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
66a7ed84 983 *
f171abab 984 * Precisely, any start address of I/O virtual region must be aligned with
66a7ed84
CK
985 * the following sizes for System MMU v3.1 and v3.2.
986 * System MMU v3.1: 128KiB
987 * System MMU v3.2: 256KiB
988 *
989 * Because System MMU v3.3 caches page table entries more aggressively, it needs
f171abab
SK
990 * more workarounds.
991 * - Any two consecutive I/O virtual regions must have a hole of size larger
992 * than or equal to 128KiB.
66a7ed84
CK
993 * - Start address of an I/O virtual region must be aligned by 128KiB.
994 */
d09d78fc 995static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
2a96536e
KC
996 phys_addr_t paddr, size_t size, int prot)
997{
e1fd1eaa 998 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
d09d78fc
CK
999 sysmmu_pte_t *entry;
1000 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
2a96536e
KC
1001 unsigned long flags;
1002 int ret = -ENOMEM;
1003
1004 BUG_ON(priv->pgtable == NULL);
1005
1006 spin_lock_irqsave(&priv->pgtablelock, flags);
1007
1008 entry = section_entry(priv->pgtable, iova);
1009
1010 if (size == SECT_SIZE) {
66a7ed84 1011 ret = lv1set_section(priv, entry, iova, paddr,
2a96536e
KC
1012 &priv->lv2entcnt[lv1ent_offset(iova)]);
1013 } else {
d09d78fc 1014 sysmmu_pte_t *pent;
2a96536e 1015
66a7ed84 1016 pent = alloc_lv2entry(priv, entry, iova,
2a96536e
KC
1017 &priv->lv2entcnt[lv1ent_offset(iova)]);
1018
61128f08
CK
1019 if (IS_ERR(pent))
1020 ret = PTR_ERR(pent);
2a96536e
KC
1021 else
1022 ret = lv2set_page(pent, paddr, size,
1023 &priv->lv2entcnt[lv1ent_offset(iova)]);
1024 }
1025
61128f08 1026 if (ret)
0bf4e54d
CK
1027 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1028 __func__, ret, size, iova);
2a96536e
KC
1029
1030 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1031
1032 return ret;
1033}
1034
66a7ed84
CK
1035static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
1036 sysmmu_iova_t iova, size_t size)
1037{
1038 struct exynos_iommu_owner *owner;
1039 unsigned long flags;
1040
1041 spin_lock_irqsave(&priv->lock, flags);
1042
1043 list_for_each_entry(owner, &priv->clients, client)
1044 sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
1045
1046 spin_unlock_irqrestore(&priv->lock, flags);
1047}
1048
2a96536e 1049static size_t exynos_iommu_unmap(struct iommu_domain *domain,
d09d78fc 1050 unsigned long l_iova, size_t size)
2a96536e 1051{
e1fd1eaa 1052 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
d09d78fc
CK
1053 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1054 sysmmu_pte_t *ent;
61128f08 1055 size_t err_pgsize;
d09d78fc 1056 unsigned long flags;
2a96536e
KC
1057
1058 BUG_ON(priv->pgtable == NULL);
1059
1060 spin_lock_irqsave(&priv->pgtablelock, flags);
1061
1062 ent = section_entry(priv->pgtable, iova);
1063
1064 if (lv1ent_section(ent)) {
0bf4e54d 1065 if (WARN_ON(size < SECT_SIZE)) {
61128f08
CK
1066 err_pgsize = SECT_SIZE;
1067 goto err;
1068 }
2a96536e 1069
f171abab
SK
1070 /* workaround for h/w bug in System MMU v3.3 */
1071 *ent = ZERO_LV2LINK;
2a96536e
KC
1072 pgtable_flush(ent, ent + 1);
1073 size = SECT_SIZE;
1074 goto done;
1075 }
1076
1077 if (unlikely(lv1ent_fault(ent))) {
1078 if (size > SECT_SIZE)
1079 size = SECT_SIZE;
1080 goto done;
1081 }
1082
1083 /* lv1ent_page(sent) == true here */
1084
1085 ent = page_entry(ent, iova);
1086
1087 if (unlikely(lv2ent_fault(ent))) {
1088 size = SPAGE_SIZE;
1089 goto done;
1090 }
1091
1092 if (lv2ent_small(ent)) {
1093 *ent = 0;
1094 size = SPAGE_SIZE;
6cb47ed7 1095 pgtable_flush(ent, ent + 1);
2a96536e
KC
1096 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1097 goto done;
1098 }
1099
1100 /* lv1ent_large(ent) == true here */
0bf4e54d 1101 if (WARN_ON(size < LPAGE_SIZE)) {
61128f08
CK
1102 err_pgsize = LPAGE_SIZE;
1103 goto err;
1104 }
2a96536e
KC
1105
1106 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
6cb47ed7 1107 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
2a96536e
KC
1108
1109 size = LPAGE_SIZE;
1110 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1111done:
1112 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1113
66a7ed84 1114 exynos_iommu_tlb_invalidate_entry(priv, iova, size);
2a96536e 1115
2a96536e 1116 return size;
61128f08
CK
1117err:
1118 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1119
0bf4e54d
CK
1120 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1121 __func__, size, iova, err_pgsize);
61128f08
CK
1122
1123 return 0;
2a96536e
KC
1124}
1125
1126static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
bb5547ac 1127 dma_addr_t iova)
2a96536e 1128{
e1fd1eaa 1129 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
d09d78fc 1130 sysmmu_pte_t *entry;
2a96536e
KC
1131 unsigned long flags;
1132 phys_addr_t phys = 0;
1133
1134 spin_lock_irqsave(&priv->pgtablelock, flags);
1135
1136 entry = section_entry(priv->pgtable, iova);
1137
1138 if (lv1ent_section(entry)) {
1139 phys = section_phys(entry) + section_offs(iova);
1140 } else if (lv1ent_page(entry)) {
1141 entry = page_entry(entry, iova);
1142
1143 if (lv2ent_large(entry))
1144 phys = lpage_phys(entry) + lpage_offs(iova);
1145 else if (lv2ent_small(entry))
1146 phys = spage_phys(entry) + spage_offs(iova);
1147 }
1148
1149 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1150
1151 return phys;
1152}
1153
bf4a1c92
AM
1154static int exynos_iommu_add_device(struct device *dev)
1155{
1156 struct iommu_group *group;
1157 int ret;
1158
1159 group = iommu_group_get(dev);
1160
1161 if (!group) {
1162 group = iommu_group_alloc();
1163 if (IS_ERR(group)) {
1164 dev_err(dev, "Failed to allocate IOMMU group\n");
1165 return PTR_ERR(group);
1166 }
1167 }
1168
1169 ret = iommu_group_add_device(group, dev);
1170 iommu_group_put(group);
1171
1172 return ret;
1173}
1174
1175static void exynos_iommu_remove_device(struct device *dev)
1176{
1177 iommu_group_remove_device(dev);
1178}
1179
b22f6434 1180static const struct iommu_ops exynos_iommu_ops = {
e1fd1eaa
JR
1181 .domain_alloc = exynos_iommu_domain_alloc,
1182 .domain_free = exynos_iommu_domain_free,
ba5fa6f6
BH
1183 .attach_dev = exynos_iommu_attach_device,
1184 .detach_dev = exynos_iommu_detach_device,
1185 .map = exynos_iommu_map,
1186 .unmap = exynos_iommu_unmap,
315786eb 1187 .map_sg = default_iommu_map_sg,
ba5fa6f6
BH
1188 .iova_to_phys = exynos_iommu_iova_to_phys,
1189 .add_device = exynos_iommu_add_device,
1190 .remove_device = exynos_iommu_remove_device,
2a96536e
KC
1191 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1192};
1193
1194static int __init exynos_iommu_init(void)
1195{
a7b67cd5 1196 struct device_node *np;
2a96536e
KC
1197 int ret;
1198
a7b67cd5
TR
1199 np = of_find_matching_node(NULL, sysmmu_of_match);
1200 if (!np)
1201 return 0;
1202
1203 of_node_put(np);
1204
734c3c73
CK
1205 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1206 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1207 if (!lv2table_kmem_cache) {
1208 pr_err("%s: Failed to create kmem cache\n", __func__);
1209 return -ENOMEM;
1210 }
1211
2a96536e 1212 ret = platform_driver_register(&exynos_sysmmu_driver);
734c3c73
CK
1213 if (ret) {
1214 pr_err("%s: Failed to register driver\n", __func__);
1215 goto err_reg_driver;
1216 }
2a96536e 1217
66a7ed84
CK
1218 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1219 if (zero_lv2_table == NULL) {
1220 pr_err("%s: Failed to allocate zero level2 page table\n",
1221 __func__);
1222 ret = -ENOMEM;
1223 goto err_zero_lv2;
1224 }
1225
734c3c73
CK
1226 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1227 if (ret) {
1228 pr_err("%s: Failed to register exynos-iommu driver.\n",
1229 __func__);
1230 goto err_set_iommu;
1231 }
2a96536e 1232
734c3c73
CK
1233 return 0;
1234err_set_iommu:
66a7ed84
CK
1235 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1236err_zero_lv2:
734c3c73
CK
1237 platform_driver_unregister(&exynos_sysmmu_driver);
1238err_reg_driver:
1239 kmem_cache_destroy(lv2table_kmem_cache);
2a96536e
KC
1240 return ret;
1241}
1242subsys_initcall(exynos_iommu_init);
This page took 0.224681 seconds and 5 git commands to generate.