iommu/exynos: Change rwlock to spinlock
[deliverable/linux.git] / drivers / iommu / exynos-iommu.c
CommitLineData
2a96536e
KC
1/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
2a96536e
KC
32/* We does not consider super section mapping (16MB) */
33#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
45#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
46#define lv1ent_page(sent) ((*(sent) & 3) == 1)
47#define lv1ent_section(sent) ((*(sent) & 3) == 2)
48
49#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
50#define lv2ent_small(pent) ((*(pent) & 2) == 2)
51#define lv2ent_large(pent) ((*(pent) & 3) == 1)
52
53#define section_phys(sent) (*(sent) & SECT_MASK)
54#define section_offs(iova) ((iova) & 0xFFFFF)
55#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
56#define lpage_offs(iova) ((iova) & 0xFFFF)
57#define spage_phys(pent) (*(pent) & SPAGE_MASK)
58#define spage_offs(iova) ((iova) & 0xFFF)
59
60#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
61#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
62
63#define NUM_LV1ENTRIES 4096
64#define NUM_LV2ENTRIES 256
65
66#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
67
68#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
69
70#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
71
72#define mk_lv1ent_sect(pa) ((pa) | 2)
73#define mk_lv1ent_page(pa) ((pa) | 1)
74#define mk_lv2ent_lpage(pa) ((pa) | 1)
75#define mk_lv2ent_spage(pa) ((pa) | 2)
76
77#define CTRL_ENABLE 0x5
78#define CTRL_BLOCK 0x7
79#define CTRL_DISABLE 0x0
80
81#define REG_MMU_CTRL 0x000
82#define REG_MMU_CFG 0x004
83#define REG_MMU_STATUS 0x008
84#define REG_MMU_FLUSH 0x00C
85#define REG_MMU_FLUSH_ENTRY 0x010
86#define REG_PT_BASE_ADDR 0x014
87#define REG_INT_STATUS 0x018
88#define REG_INT_CLEAR 0x01C
89
90#define REG_PAGE_FAULT_ADDR 0x024
91#define REG_AW_FAULT_ADDR 0x028
92#define REG_AR_FAULT_ADDR 0x02C
93#define REG_DEFAULT_SLAVE_ADDR 0x030
94
95#define REG_MMU_VERSION 0x034
96
97#define REG_PB0_SADDR 0x04C
98#define REG_PB0_EADDR 0x050
99#define REG_PB1_SADDR 0x054
100#define REG_PB1_EADDR 0x058
101
734c3c73
CK
102static struct kmem_cache *lv2table_kmem_cache;
103
2a96536e
KC
104static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
105{
106 return pgtable + lv1ent_offset(iova);
107}
108
109static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
110{
7222e8db
CK
111 return (unsigned long *)phys_to_virt(
112 lv2table_base(sent)) + lv2ent_offset(iova);
2a96536e
KC
113}
114
115enum exynos_sysmmu_inttype {
116 SYSMMU_PAGEFAULT,
117 SYSMMU_AR_MULTIHIT,
118 SYSMMU_AW_MULTIHIT,
119 SYSMMU_BUSERROR,
120 SYSMMU_AR_SECURITY,
121 SYSMMU_AR_ACCESS,
122 SYSMMU_AW_SECURITY,
123 SYSMMU_AW_PROTECTION, /* 7 */
124 SYSMMU_FAULT_UNKNOWN,
125 SYSMMU_FAULTS_NUM
126};
127
2a96536e
KC
128static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
129 REG_PAGE_FAULT_ADDR,
130 REG_AR_FAULT_ADDR,
131 REG_AW_FAULT_ADDR,
132 REG_DEFAULT_SLAVE_ADDR,
133 REG_AR_FAULT_ADDR,
134 REG_AR_FAULT_ADDR,
135 REG_AW_FAULT_ADDR,
136 REG_AW_FAULT_ADDR
137};
138
139static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
140 "PAGE FAULT",
141 "AR MULTI-HIT FAULT",
142 "AW MULTI-HIT FAULT",
143 "BUS ERROR",
144 "AR SECURITY PROTECTION FAULT",
145 "AR ACCESS PROTECTION FAULT",
146 "AW SECURITY PROTECTION FAULT",
147 "AW ACCESS PROTECTION FAULT",
148 "UNKNOWN FAULT"
149};
150
151struct exynos_iommu_domain {
152 struct list_head clients; /* list of sysmmu_drvdata.node */
153 unsigned long *pgtable; /* lv1 page table, 16KB */
154 short *lv2entcnt; /* free lv2 entry counter for each section */
155 spinlock_t lock; /* lock for this structure */
156 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
157};
158
159struct sysmmu_drvdata {
160 struct list_head node; /* entry of exynos_iommu_domain.clients */
161 struct device *sysmmu; /* System MMU's device descriptor */
162 struct device *dev; /* Owner of system MMU */
7222e8db
CK
163 void __iomem *sfrbase;
164 struct clk *clk;
70605870 165 struct clk *clk_master;
2a96536e 166 int activations;
9d4e7a24 167 spinlock_t lock;
2a96536e 168 struct iommu_domain *domain;
7222e8db 169 phys_addr_t pgtable;
2a96536e
KC
170};
171
172static bool set_sysmmu_active(struct sysmmu_drvdata *data)
173{
174 /* return true if the System MMU was not active previously
175 and it needs to be initialized */
176 return ++data->activations == 1;
177}
178
179static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
180{
181 /* return true if the System MMU is needed to be disabled */
182 BUG_ON(data->activations < 1);
183 return --data->activations == 0;
184}
185
186static bool is_sysmmu_active(struct sysmmu_drvdata *data)
187{
188 return data->activations > 0;
189}
190
191static void sysmmu_unblock(void __iomem *sfrbase)
192{
193 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
194}
195
196static bool sysmmu_block(void __iomem *sfrbase)
197{
198 int i = 120;
199
200 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
201 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
202 --i;
203
204 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
205 sysmmu_unblock(sfrbase);
206 return false;
207 }
208
209 return true;
210}
211
212static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
213{
214 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
215}
216
217static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
3ad6b7f3 218 unsigned long iova, unsigned int num_inv)
2a96536e 219{
3ad6b7f3
CK
220 unsigned int i;
221 for (i = 0; i < num_inv; i++) {
222 __raw_writel((iova & SPAGE_MASK) | 1,
223 sfrbase + REG_MMU_FLUSH_ENTRY);
224 iova += SPAGE_SIZE;
225 }
2a96536e
KC
226}
227
228static void __sysmmu_set_ptbase(void __iomem *sfrbase,
229 unsigned long pgd)
230{
231 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
232 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
233
234 __sysmmu_tlb_invalidate(sfrbase);
235}
236
1fab7fa7
CK
237static void show_fault_information(const char *name,
238 enum exynos_sysmmu_inttype itype,
239 phys_addr_t pgtable_base, unsigned long fault_addr)
2a96536e
KC
240{
241 unsigned long *ent;
242
243 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
244 itype = SYSMMU_FAULT_UNKNOWN;
245
1fab7fa7
CK
246 pr_err("%s occurred at %#lx by %s(Page table base: %pa)\n",
247 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
2a96536e 248
7222e8db 249 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
2a96536e
KC
250 pr_err("\tLv1 entry: 0x%lx\n", *ent);
251
252 if (lv1ent_page(ent)) {
253 ent = page_entry(ent, fault_addr);
254 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
255 }
2a96536e
KC
256}
257
258static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
259{
260 /* SYSMMU is in blocked when interrupt occurred. */
261 struct sysmmu_drvdata *data = dev_id;
2a96536e
KC
262 enum exynos_sysmmu_inttype itype;
263 unsigned long addr = -1;
7222e8db 264 int ret = -ENOSYS;
2a96536e 265
2a96536e
KC
266 WARN_ON(!is_sysmmu_active(data));
267
9d4e7a24
CK
268 spin_lock(&data->lock);
269
70605870
CK
270 if (!IS_ERR(data->clk_master))
271 clk_enable(data->clk_master);
9d4e7a24 272
7222e8db
CK
273 itype = (enum exynos_sysmmu_inttype)
274 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
275 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
2a96536e 276 itype = SYSMMU_FAULT_UNKNOWN;
7222e8db
CK
277 else
278 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
2a96536e 279
1fab7fa7
CK
280 if (itype == SYSMMU_FAULT_UNKNOWN) {
281 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
282 __func__, dev_name(data->sysmmu));
283 pr_err("%s: Please check if IRQ is correctly configured.\n",
284 __func__);
285 BUG();
286 } else {
287 unsigned long base =
288 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
289 show_fault_information(dev_name(data->sysmmu),
290 itype, base, addr);
291 if (data->domain)
292 ret = report_iommu_fault(data->domain,
293 data->dev, addr, itype);
2a96536e
KC
294 }
295
1fab7fa7
CK
296 /* fault is not recovered by fault handler */
297 BUG_ON(ret != 0);
2a96536e 298
1fab7fa7
CK
299 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
300
301 sysmmu_unblock(data->sfrbase);
2a96536e 302
70605870
CK
303 if (!IS_ERR(data->clk_master))
304 clk_disable(data->clk_master);
305
9d4e7a24 306 spin_unlock(&data->lock);
2a96536e
KC
307
308 return IRQ_HANDLED;
309}
310
311static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
312{
313 unsigned long flags;
314 bool disabled = false;
2a96536e 315
9d4e7a24 316 spin_lock_irqsave(&data->lock, flags);
2a96536e
KC
317
318 if (!set_sysmmu_inactive(data))
319 goto finish;
320
70605870
CK
321 if (!IS_ERR(data->clk_master))
322 clk_enable(data->clk_master);
323
7222e8db 324 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
2a96536e 325
46c16d1e 326 clk_disable(data->clk);
70605870
CK
327 if (!IS_ERR(data->clk_master))
328 clk_disable(data->clk_master);
2a96536e
KC
329
330 disabled = true;
331 data->pgtable = 0;
332 data->domain = NULL;
333finish:
9d4e7a24 334 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
335
336 if (disabled)
e5cf63c3 337 dev_dbg(data->sysmmu, "Disabled\n");
2a96536e 338 else
e5cf63c3
CK
339 dev_dbg(data->sysmmu, "%d times left to be disabled\n",
340 data->activations);
2a96536e
KC
341
342 return disabled;
343}
344
345/* __exynos_sysmmu_enable: Enables System MMU
346 *
347 * returns -error if an error occurred and System MMU is not enabled,
348 * 0 if the System MMU has been just enabled and 1 if System MMU was already
349 * enabled before.
350 */
351static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
352 unsigned long pgtable, struct iommu_domain *domain)
353{
7222e8db 354 int ret = 0;
2a96536e
KC
355 unsigned long flags;
356
9d4e7a24 357 spin_lock_irqsave(&data->lock, flags);
2a96536e
KC
358
359 if (!set_sysmmu_active(data)) {
360 if (WARN_ON(pgtable != data->pgtable)) {
361 ret = -EBUSY;
362 set_sysmmu_inactive(data);
363 } else {
364 ret = 1;
365 }
366
e5cf63c3 367 dev_dbg(data->sysmmu, "Already enabled\n");
2a96536e
KC
368 goto finish;
369 }
370
2a96536e
KC
371 data->pgtable = pgtable;
372
70605870
CK
373 if (!IS_ERR(data->clk_master))
374 clk_enable(data->clk_master);
375 clk_enable(data->clk);
376
7222e8db 377 __sysmmu_set_ptbase(data->sfrbase, pgtable);
2a96536e 378
7222e8db
CK
379 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
380
70605870
CK
381 if (!IS_ERR(data->clk_master))
382 clk_disable(data->clk_master);
383
2a96536e
KC
384 data->domain = domain;
385
e5cf63c3 386 dev_dbg(data->sysmmu, "Enabled\n");
2a96536e 387finish:
9d4e7a24 388 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
389
390 return ret;
391}
392
393int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
394{
395 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
396 int ret;
397
398 BUG_ON(!memblock_is_memory(pgtable));
399
400 ret = pm_runtime_get_sync(data->sysmmu);
401 if (ret < 0) {
e5cf63c3 402 dev_dbg(data->sysmmu, "Failed to enable\n");
2a96536e
KC
403 return ret;
404 }
405
406 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
407 if (WARN_ON(ret < 0)) {
408 pm_runtime_put(data->sysmmu);
e5cf63c3
CK
409 dev_err(data->sysmmu, "Already enabled with page table %#x\n",
410 data->pgtable);
2a96536e
KC
411 } else {
412 data->dev = dev;
413 }
414
415 return ret;
416}
417
77e38350 418static bool exynos_sysmmu_disable(struct device *dev)
2a96536e
KC
419{
420 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
421 bool disabled;
422
423 disabled = __exynos_sysmmu_disable(data);
424 pm_runtime_put(data->sysmmu);
425
426 return disabled;
427}
428
3ad6b7f3
CK
429static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
430 size_t size)
2a96536e
KC
431{
432 unsigned long flags;
433 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
434
9d4e7a24 435 spin_lock_irqsave(&data->lock, flags);
2a96536e
KC
436
437 if (is_sysmmu_active(data)) {
3ad6b7f3
CK
438 unsigned int maj;
439 unsigned int num_inv = 1;
70605870
CK
440
441 if (!IS_ERR(data->clk_master))
442 clk_enable(data->clk_master);
443
3ad6b7f3
CK
444 maj = __raw_readl(data->sfrbase + REG_MMU_VERSION);
445 /*
446 * L2TLB invalidation required
447 * 4KB page: 1 invalidation
448 * 64KB page: 16 invalidation
449 * 1MB page: 64 invalidation
450 * because it is set-associative TLB
451 * with 8-way and 64 sets.
452 * 1MB page can be cached in one of all sets.
453 * 64KB page can be one of 16 consecutive sets.
454 */
455 if ((maj >> 28) == 2) /* major version number */
456 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
457
7222e8db
CK
458 if (sysmmu_block(data->sfrbase)) {
459 __sysmmu_tlb_invalidate_entry(
3ad6b7f3 460 data->sfrbase, iova, num_inv);
7222e8db 461 sysmmu_unblock(data->sfrbase);
2a96536e 462 }
70605870
CK
463 if (!IS_ERR(data->clk_master))
464 clk_disable(data->clk_master);
2a96536e 465 } else {
e5cf63c3 466 dev_dbg(data->sysmmu, "Disabled. Skipping invalidating TLB.\n");
2a96536e
KC
467 }
468
9d4e7a24 469 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
470}
471
472void exynos_sysmmu_tlb_invalidate(struct device *dev)
473{
474 unsigned long flags;
475 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
476
9d4e7a24 477 spin_lock_irqsave(&data->lock, flags);
2a96536e
KC
478
479 if (is_sysmmu_active(data)) {
70605870
CK
480 if (!IS_ERR(data->clk_master))
481 clk_enable(data->clk_master);
7222e8db
CK
482 if (sysmmu_block(data->sfrbase)) {
483 __sysmmu_tlb_invalidate(data->sfrbase);
484 sysmmu_unblock(data->sfrbase);
2a96536e 485 }
70605870
CK
486 if (!IS_ERR(data->clk_master))
487 clk_disable(data->clk_master);
2a96536e 488 } else {
e5cf63c3 489 dev_dbg(data->sysmmu, "Disabled. Skipping invalidating TLB.\n");
2a96536e
KC
490 }
491
9d4e7a24 492 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
493}
494
495static int exynos_sysmmu_probe(struct platform_device *pdev)
496{
46c16d1e 497 int irq, ret;
7222e8db 498 struct device *dev = &pdev->dev;
2a96536e 499 struct sysmmu_drvdata *data;
7222e8db 500 struct resource *res;
2a96536e 501
46c16d1e
CK
502 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
503 if (!data)
504 return -ENOMEM;
2a96536e 505
7222e8db 506 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46c16d1e
CK
507 data->sfrbase = devm_ioremap_resource(dev, res);
508 if (IS_ERR(data->sfrbase))
509 return PTR_ERR(data->sfrbase);
2a96536e 510
46c16d1e
CK
511 irq = platform_get_irq(pdev, 0);
512 if (irq <= 0) {
7222e8db 513 dev_dbg(dev, "Unable to find IRQ resource\n");
46c16d1e 514 return irq;
2a96536e
KC
515 }
516
46c16d1e 517 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
7222e8db
CK
518 dev_name(dev), data);
519 if (ret) {
46c16d1e
CK
520 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
521 return ret;
2a96536e
KC
522 }
523
46c16d1e
CK
524 data->clk = devm_clk_get(dev, "sysmmu");
525 if (IS_ERR(data->clk)) {
526 dev_err(dev, "Failed to get clock!\n");
527 return PTR_ERR(data->clk);
528 } else {
529 ret = clk_prepare(data->clk);
530 if (ret) {
531 dev_err(dev, "Failed to prepare clk\n");
532 return ret;
533 }
2a96536e
KC
534 }
535
70605870
CK
536 data->clk_master = devm_clk_get(dev, "master");
537 if (!IS_ERR(data->clk_master)) {
538 ret = clk_prepare(data->clk_master);
539 if (ret) {
540 clk_unprepare(data->clk);
541 dev_err(dev, "Failed to prepare master's clk\n");
542 return ret;
543 }
544 }
545
2a96536e 546 data->sysmmu = dev;
9d4e7a24 547 spin_lock_init(&data->lock);
2a96536e
KC
548 INIT_LIST_HEAD(&data->node);
549
7222e8db
CK
550 platform_set_drvdata(pdev, data);
551
f4723ec1 552 pm_runtime_enable(dev);
2a96536e 553
2a96536e 554 return 0;
2a96536e
KC
555}
556
557static struct platform_driver exynos_sysmmu_driver = {
558 .probe = exynos_sysmmu_probe,
559 .driver = {
560 .owner = THIS_MODULE,
561 .name = "exynos-sysmmu",
562 }
563};
564
565static inline void pgtable_flush(void *vastart, void *vaend)
566{
567 dmac_flush_range(vastart, vaend);
568 outer_flush_range(virt_to_phys(vastart),
569 virt_to_phys(vaend));
570}
571
572static int exynos_iommu_domain_init(struct iommu_domain *domain)
573{
574 struct exynos_iommu_domain *priv;
575
576 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
577 if (!priv)
578 return -ENOMEM;
579
580 priv->pgtable = (unsigned long *)__get_free_pages(
581 GFP_KERNEL | __GFP_ZERO, 2);
582 if (!priv->pgtable)
583 goto err_pgtable;
584
585 priv->lv2entcnt = (short *)__get_free_pages(
586 GFP_KERNEL | __GFP_ZERO, 1);
587 if (!priv->lv2entcnt)
588 goto err_counter;
589
590 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
591
592 spin_lock_init(&priv->lock);
593 spin_lock_init(&priv->pgtablelock);
594 INIT_LIST_HEAD(&priv->clients);
595
eb51637b
SK
596 domain->geometry.aperture_start = 0;
597 domain->geometry.aperture_end = ~0UL;
598 domain->geometry.force_aperture = true;
3177bb76 599
2a96536e
KC
600 domain->priv = priv;
601 return 0;
602
603err_counter:
604 free_pages((unsigned long)priv->pgtable, 2);
605err_pgtable:
606 kfree(priv);
607 return -ENOMEM;
608}
609
610static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
611{
612 struct exynos_iommu_domain *priv = domain->priv;
613 struct sysmmu_drvdata *data;
614 unsigned long flags;
615 int i;
616
617 WARN_ON(!list_empty(&priv->clients));
618
619 spin_lock_irqsave(&priv->lock, flags);
620
621 list_for_each_entry(data, &priv->clients, node) {
622 while (!exynos_sysmmu_disable(data->dev))
623 ; /* until System MMU is actually disabled */
624 }
625
626 spin_unlock_irqrestore(&priv->lock, flags);
627
628 for (i = 0; i < NUM_LV1ENTRIES; i++)
629 if (lv1ent_page(priv->pgtable + i))
734c3c73
CK
630 kmem_cache_free(lv2table_kmem_cache,
631 phys_to_virt(lv2table_base(priv->pgtable + i)));
2a96536e
KC
632
633 free_pages((unsigned long)priv->pgtable, 2);
634 free_pages((unsigned long)priv->lv2entcnt, 1);
635 kfree(domain->priv);
636 domain->priv = NULL;
637}
638
639static int exynos_iommu_attach_device(struct iommu_domain *domain,
640 struct device *dev)
641{
642 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
643 struct exynos_iommu_domain *priv = domain->priv;
7222e8db 644 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
2a96536e
KC
645 unsigned long flags;
646 int ret;
647
648 ret = pm_runtime_get_sync(data->sysmmu);
649 if (ret < 0)
650 return ret;
651
652 ret = 0;
653
654 spin_lock_irqsave(&priv->lock, flags);
655
7222e8db 656 ret = __exynos_sysmmu_enable(data, pagetable, domain);
2a96536e
KC
657
658 if (ret == 0) {
659 /* 'data->node' must not be appeared in priv->clients */
660 BUG_ON(!list_empty(&data->node));
661 data->dev = dev;
662 list_add_tail(&data->node, &priv->clients);
663 }
664
665 spin_unlock_irqrestore(&priv->lock, flags);
666
667 if (ret < 0) {
7222e8db
CK
668 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
669 __func__, &pagetable);
2a96536e 670 pm_runtime_put(data->sysmmu);
7222e8db 671 return ret;
2a96536e
KC
672 }
673
7222e8db
CK
674 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
675 __func__, &pagetable, (ret == 0) ? "" : ", again");
676
2a96536e
KC
677 return ret;
678}
679
680static void exynos_iommu_detach_device(struct iommu_domain *domain,
681 struct device *dev)
682{
683 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
684 struct exynos_iommu_domain *priv = domain->priv;
685 struct list_head *pos;
7222e8db 686 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
2a96536e
KC
687 unsigned long flags;
688 bool found = false;
689
690 spin_lock_irqsave(&priv->lock, flags);
691
692 list_for_each(pos, &priv->clients) {
693 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
694 found = true;
695 break;
696 }
697 }
698
699 if (!found)
700 goto finish;
701
702 if (__exynos_sysmmu_disable(data)) {
7222e8db
CK
703 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
704 __func__, &pagetable);
f8ffcc92 705 list_del_init(&data->node);
2a96536e
KC
706
707 } else {
7222e8db
CK
708 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %pa delayed",
709 __func__, &pagetable);
2a96536e
KC
710 }
711
712finish:
713 spin_unlock_irqrestore(&priv->lock, flags);
714
715 if (found)
716 pm_runtime_put(data->sysmmu);
717}
718
719static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
720 short *pgcounter)
721{
61128f08
CK
722 if (lv1ent_section(sent)) {
723 WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova);
724 return ERR_PTR(-EADDRINUSE);
725 }
726
2a96536e
KC
727 if (lv1ent_fault(sent)) {
728 unsigned long *pent;
729
734c3c73 730 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
2a96536e
KC
731 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
732 if (!pent)
61128f08 733 return ERR_PTR(-ENOMEM);
2a96536e 734
7222e8db 735 *sent = mk_lv1ent_page(virt_to_phys(pent));
2a96536e
KC
736 *pgcounter = NUM_LV2ENTRIES;
737 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
738 pgtable_flush(sent, sent + 1);
739 }
740
741 return page_entry(sent, iova);
742}
743
61128f08
CK
744static int lv1set_section(unsigned long *sent, unsigned long iova,
745 phys_addr_t paddr, short *pgcnt)
2a96536e 746{
61128f08
CK
747 if (lv1ent_section(sent)) {
748 WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
749 iova);
2a96536e 750 return -EADDRINUSE;
61128f08 751 }
2a96536e
KC
752
753 if (lv1ent_page(sent)) {
61128f08
CK
754 if (*pgcnt != NUM_LV2ENTRIES) {
755 WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
756 iova);
2a96536e 757 return -EADDRINUSE;
61128f08 758 }
2a96536e 759
734c3c73 760 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
2a96536e
KC
761 *pgcnt = 0;
762 }
763
764 *sent = mk_lv1ent_sect(paddr);
765
766 pgtable_flush(sent, sent + 1);
767
768 return 0;
769}
770
771static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
772 short *pgcnt)
773{
774 if (size == SPAGE_SIZE) {
61128f08
CK
775 if (!lv2ent_fault(pent)) {
776 WARN(1, "Trying mapping on 4KiB where mapping exists");
2a96536e 777 return -EADDRINUSE;
61128f08 778 }
2a96536e
KC
779
780 *pent = mk_lv2ent_spage(paddr);
781 pgtable_flush(pent, pent + 1);
782 *pgcnt -= 1;
783 } else { /* size == LPAGE_SIZE */
784 int i;
785 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
786 if (!lv2ent_fault(pent)) {
61128f08
CK
787 WARN(1,
788 "Trying mapping on 64KiB where mapping exists");
789 if (i > 0)
790 memset(pent - i, 0, sizeof(*pent) * i);
2a96536e
KC
791 return -EADDRINUSE;
792 }
793
794 *pent = mk_lv2ent_lpage(paddr);
795 }
796 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
797 *pgcnt -= SPAGES_PER_LPAGE;
798 }
799
800 return 0;
801}
802
803static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
804 phys_addr_t paddr, size_t size, int prot)
805{
806 struct exynos_iommu_domain *priv = domain->priv;
807 unsigned long *entry;
808 unsigned long flags;
809 int ret = -ENOMEM;
810
811 BUG_ON(priv->pgtable == NULL);
812
813 spin_lock_irqsave(&priv->pgtablelock, flags);
814
815 entry = section_entry(priv->pgtable, iova);
816
817 if (size == SECT_SIZE) {
61128f08 818 ret = lv1set_section(entry, iova, paddr,
2a96536e
KC
819 &priv->lv2entcnt[lv1ent_offset(iova)]);
820 } else {
821 unsigned long *pent;
822
823 pent = alloc_lv2entry(entry, iova,
824 &priv->lv2entcnt[lv1ent_offset(iova)]);
825
61128f08
CK
826 if (IS_ERR(pent))
827 ret = PTR_ERR(pent);
2a96536e
KC
828 else
829 ret = lv2set_page(pent, paddr, size,
830 &priv->lv2entcnt[lv1ent_offset(iova)]);
831 }
832
61128f08 833 if (ret)
2a96536e
KC
834 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
835 __func__, iova, size);
2a96536e
KC
836
837 spin_unlock_irqrestore(&priv->pgtablelock, flags);
838
839 return ret;
840}
841
842static size_t exynos_iommu_unmap(struct iommu_domain *domain,
843 unsigned long iova, size_t size)
844{
845 struct exynos_iommu_domain *priv = domain->priv;
846 struct sysmmu_drvdata *data;
847 unsigned long flags;
848 unsigned long *ent;
61128f08 849 size_t err_pgsize;
2a96536e
KC
850
851 BUG_ON(priv->pgtable == NULL);
852
853 spin_lock_irqsave(&priv->pgtablelock, flags);
854
855 ent = section_entry(priv->pgtable, iova);
856
857 if (lv1ent_section(ent)) {
61128f08
CK
858 if (size < SECT_SIZE) {
859 err_pgsize = SECT_SIZE;
860 goto err;
861 }
2a96536e
KC
862
863 *ent = 0;
864 pgtable_flush(ent, ent + 1);
865 size = SECT_SIZE;
866 goto done;
867 }
868
869 if (unlikely(lv1ent_fault(ent))) {
870 if (size > SECT_SIZE)
871 size = SECT_SIZE;
872 goto done;
873 }
874
875 /* lv1ent_page(sent) == true here */
876
877 ent = page_entry(ent, iova);
878
879 if (unlikely(lv2ent_fault(ent))) {
880 size = SPAGE_SIZE;
881 goto done;
882 }
883
884 if (lv2ent_small(ent)) {
885 *ent = 0;
886 size = SPAGE_SIZE;
6cb47ed7 887 pgtable_flush(ent, ent + 1);
2a96536e
KC
888 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
889 goto done;
890 }
891
892 /* lv1ent_large(ent) == true here */
61128f08
CK
893 if (size < LPAGE_SIZE) {
894 err_pgsize = LPAGE_SIZE;
895 goto err;
896 }
2a96536e
KC
897
898 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
6cb47ed7 899 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
2a96536e
KC
900
901 size = LPAGE_SIZE;
902 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
903done:
904 spin_unlock_irqrestore(&priv->pgtablelock, flags);
905
906 spin_lock_irqsave(&priv->lock, flags);
907 list_for_each_entry(data, &priv->clients, node)
3ad6b7f3 908 sysmmu_tlb_invalidate_entry(data->dev, iova, size);
2a96536e
KC
909 spin_unlock_irqrestore(&priv->lock, flags);
910
2a96536e 911 return size;
61128f08
CK
912err:
913 spin_unlock_irqrestore(&priv->pgtablelock, flags);
914
915 WARN(1,
916 "%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n",
917 __func__, size, iova, err_pgsize);
918
919 return 0;
2a96536e
KC
920}
921
922static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
bb5547ac 923 dma_addr_t iova)
2a96536e
KC
924{
925 struct exynos_iommu_domain *priv = domain->priv;
926 unsigned long *entry;
927 unsigned long flags;
928 phys_addr_t phys = 0;
929
930 spin_lock_irqsave(&priv->pgtablelock, flags);
931
932 entry = section_entry(priv->pgtable, iova);
933
934 if (lv1ent_section(entry)) {
935 phys = section_phys(entry) + section_offs(iova);
936 } else if (lv1ent_page(entry)) {
937 entry = page_entry(entry, iova);
938
939 if (lv2ent_large(entry))
940 phys = lpage_phys(entry) + lpage_offs(iova);
941 else if (lv2ent_small(entry))
942 phys = spage_phys(entry) + spage_offs(iova);
943 }
944
945 spin_unlock_irqrestore(&priv->pgtablelock, flags);
946
947 return phys;
948}
949
950static struct iommu_ops exynos_iommu_ops = {
951 .domain_init = &exynos_iommu_domain_init,
952 .domain_destroy = &exynos_iommu_domain_destroy,
953 .attach_dev = &exynos_iommu_attach_device,
954 .detach_dev = &exynos_iommu_detach_device,
955 .map = &exynos_iommu_map,
956 .unmap = &exynos_iommu_unmap,
957 .iova_to_phys = &exynos_iommu_iova_to_phys,
958 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
959};
960
961static int __init exynos_iommu_init(void)
962{
963 int ret;
964
734c3c73
CK
965 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
966 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
967 if (!lv2table_kmem_cache) {
968 pr_err("%s: Failed to create kmem cache\n", __func__);
969 return -ENOMEM;
970 }
971
2a96536e 972 ret = platform_driver_register(&exynos_sysmmu_driver);
734c3c73
CK
973 if (ret) {
974 pr_err("%s: Failed to register driver\n", __func__);
975 goto err_reg_driver;
976 }
2a96536e 977
734c3c73
CK
978 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
979 if (ret) {
980 pr_err("%s: Failed to register exynos-iommu driver.\n",
981 __func__);
982 goto err_set_iommu;
983 }
2a96536e 984
734c3c73
CK
985 return 0;
986err_set_iommu:
987 platform_driver_unregister(&exynos_sysmmu_driver);
988err_reg_driver:
989 kmem_cache_destroy(lv2table_kmem_cache);
2a96536e
KC
990 return ret;
991}
992subsys_initcall(exynos_iommu_init);
This page took 0.165439 seconds and 5 git commands to generate.