Commit | Line | Data |
---|---|---|
2a96536e KC |
1 | /* linux/drivers/iommu/exynos_iommu.c |
2 | * | |
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | |
4 | * http://www.samsung.com | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG | |
12 | #define DEBUG | |
13 | #endif | |
14 | ||
15 | #include <linux/io.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/pm_runtime.h> | |
20 | #include <linux/clk.h> | |
21 | #include <linux/err.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/memblock.h> | |
27 | #include <linux/export.h> | |
28 | ||
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/pgtable.h> | |
31 | ||
d09d78fc CK |
32 | typedef u32 sysmmu_iova_t; |
33 | typedef u32 sysmmu_pte_t; | |
34 | ||
2a96536e KC |
35 | /* We does not consider super section mapping (16MB) */ |
36 | #define SECT_ORDER 20 | |
37 | #define LPAGE_ORDER 16 | |
38 | #define SPAGE_ORDER 12 | |
39 | ||
40 | #define SECT_SIZE (1 << SECT_ORDER) | |
41 | #define LPAGE_SIZE (1 << LPAGE_ORDER) | |
42 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
43 | ||
44 | #define SECT_MASK (~(SECT_SIZE - 1)) | |
45 | #define LPAGE_MASK (~(LPAGE_SIZE - 1)) | |
46 | #define SPAGE_MASK (~(SPAGE_SIZE - 1)) | |
47 | ||
48 | #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) | |
49 | #define lv1ent_page(sent) ((*(sent) & 3) == 1) | |
50 | #define lv1ent_section(sent) ((*(sent) & 3) == 2) | |
51 | ||
52 | #define lv2ent_fault(pent) ((*(pent) & 3) == 0) | |
53 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) | |
54 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) | |
55 | ||
d09d78fc CK |
56 | static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size) |
57 | { | |
58 | return iova & (size - 1); | |
59 | } | |
60 | ||
2a96536e | 61 | #define section_phys(sent) (*(sent) & SECT_MASK) |
d09d78fc | 62 | #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE) |
2a96536e | 63 | #define lpage_phys(pent) (*(pent) & LPAGE_MASK) |
d09d78fc | 64 | #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE) |
2a96536e | 65 | #define spage_phys(pent) (*(pent) & SPAGE_MASK) |
d09d78fc | 66 | #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE) |
2a96536e KC |
67 | |
68 | #define NUM_LV1ENTRIES 4096 | |
d09d78fc | 69 | #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) |
2a96536e | 70 | |
d09d78fc CK |
71 | static u32 lv1ent_offset(sysmmu_iova_t iova) |
72 | { | |
73 | return iova >> SECT_ORDER; | |
74 | } | |
75 | ||
76 | static u32 lv2ent_offset(sysmmu_iova_t iova) | |
77 | { | |
78 | return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); | |
79 | } | |
80 | ||
81 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) | |
2a96536e KC |
82 | |
83 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) | |
84 | ||
85 | #define lv2table_base(sent) (*(sent) & 0xFFFFFC00) | |
86 | ||
87 | #define mk_lv1ent_sect(pa) ((pa) | 2) | |
88 | #define mk_lv1ent_page(pa) ((pa) | 1) | |
89 | #define mk_lv2ent_lpage(pa) ((pa) | 1) | |
90 | #define mk_lv2ent_spage(pa) ((pa) | 2) | |
91 | ||
92 | #define CTRL_ENABLE 0x5 | |
93 | #define CTRL_BLOCK 0x7 | |
94 | #define CTRL_DISABLE 0x0 | |
95 | ||
96 | #define REG_MMU_CTRL 0x000 | |
97 | #define REG_MMU_CFG 0x004 | |
98 | #define REG_MMU_STATUS 0x008 | |
99 | #define REG_MMU_FLUSH 0x00C | |
100 | #define REG_MMU_FLUSH_ENTRY 0x010 | |
101 | #define REG_PT_BASE_ADDR 0x014 | |
102 | #define REG_INT_STATUS 0x018 | |
103 | #define REG_INT_CLEAR 0x01C | |
104 | ||
105 | #define REG_PAGE_FAULT_ADDR 0x024 | |
106 | #define REG_AW_FAULT_ADDR 0x028 | |
107 | #define REG_AR_FAULT_ADDR 0x02C | |
108 | #define REG_DEFAULT_SLAVE_ADDR 0x030 | |
109 | ||
110 | #define REG_MMU_VERSION 0x034 | |
111 | ||
112 | #define REG_PB0_SADDR 0x04C | |
113 | #define REG_PB0_EADDR 0x050 | |
114 | #define REG_PB1_SADDR 0x054 | |
115 | #define REG_PB1_EADDR 0x058 | |
116 | ||
6b21a5db CK |
117 | #define has_sysmmu(dev) (dev->archdata.iommu != NULL) |
118 | ||
734c3c73 CK |
119 | static struct kmem_cache *lv2table_kmem_cache; |
120 | ||
d09d78fc | 121 | static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) |
2a96536e KC |
122 | { |
123 | return pgtable + lv1ent_offset(iova); | |
124 | } | |
125 | ||
d09d78fc | 126 | static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) |
2a96536e | 127 | { |
d09d78fc | 128 | return (sysmmu_pte_t *)phys_to_virt( |
7222e8db | 129 | lv2table_base(sent)) + lv2ent_offset(iova); |
2a96536e KC |
130 | } |
131 | ||
132 | enum exynos_sysmmu_inttype { | |
133 | SYSMMU_PAGEFAULT, | |
134 | SYSMMU_AR_MULTIHIT, | |
135 | SYSMMU_AW_MULTIHIT, | |
136 | SYSMMU_BUSERROR, | |
137 | SYSMMU_AR_SECURITY, | |
138 | SYSMMU_AR_ACCESS, | |
139 | SYSMMU_AW_SECURITY, | |
140 | SYSMMU_AW_PROTECTION, /* 7 */ | |
141 | SYSMMU_FAULT_UNKNOWN, | |
142 | SYSMMU_FAULTS_NUM | |
143 | }; | |
144 | ||
2a96536e KC |
145 | static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { |
146 | REG_PAGE_FAULT_ADDR, | |
147 | REG_AR_FAULT_ADDR, | |
148 | REG_AW_FAULT_ADDR, | |
149 | REG_DEFAULT_SLAVE_ADDR, | |
150 | REG_AR_FAULT_ADDR, | |
151 | REG_AR_FAULT_ADDR, | |
152 | REG_AW_FAULT_ADDR, | |
153 | REG_AW_FAULT_ADDR | |
154 | }; | |
155 | ||
156 | static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { | |
157 | "PAGE FAULT", | |
158 | "AR MULTI-HIT FAULT", | |
159 | "AW MULTI-HIT FAULT", | |
160 | "BUS ERROR", | |
161 | "AR SECURITY PROTECTION FAULT", | |
162 | "AR ACCESS PROTECTION FAULT", | |
163 | "AW SECURITY PROTECTION FAULT", | |
164 | "AW ACCESS PROTECTION FAULT", | |
165 | "UNKNOWN FAULT" | |
166 | }; | |
167 | ||
6b21a5db CK |
168 | /* attached to dev.archdata.iommu of the master device */ |
169 | struct exynos_iommu_owner { | |
170 | struct list_head client; /* entry of exynos_iommu_domain.clients */ | |
171 | struct device *dev; | |
172 | struct device *sysmmu; | |
173 | struct iommu_domain *domain; | |
174 | void *vmm_data; /* IO virtual memory manager's data */ | |
175 | spinlock_t lock; /* Lock to preserve consistency of System MMU */ | |
176 | }; | |
177 | ||
2a96536e KC |
178 | struct exynos_iommu_domain { |
179 | struct list_head clients; /* list of sysmmu_drvdata.node */ | |
d09d78fc | 180 | sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ |
2a96536e KC |
181 | short *lv2entcnt; /* free lv2 entry counter for each section */ |
182 | spinlock_t lock; /* lock for this structure */ | |
183 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ | |
184 | }; | |
185 | ||
186 | struct sysmmu_drvdata { | |
2a96536e | 187 | struct device *sysmmu; /* System MMU's device descriptor */ |
6b21a5db | 188 | struct device *master; /* Owner of system MMU */ |
7222e8db CK |
189 | void __iomem *sfrbase; |
190 | struct clk *clk; | |
70605870 | 191 | struct clk *clk_master; |
2a96536e | 192 | int activations; |
9d4e7a24 | 193 | spinlock_t lock; |
2a96536e | 194 | struct iommu_domain *domain; |
7222e8db | 195 | phys_addr_t pgtable; |
2a96536e KC |
196 | }; |
197 | ||
198 | static bool set_sysmmu_active(struct sysmmu_drvdata *data) | |
199 | { | |
200 | /* return true if the System MMU was not active previously | |
201 | and it needs to be initialized */ | |
202 | return ++data->activations == 1; | |
203 | } | |
204 | ||
205 | static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) | |
206 | { | |
207 | /* return true if the System MMU is needed to be disabled */ | |
208 | BUG_ON(data->activations < 1); | |
209 | return --data->activations == 0; | |
210 | } | |
211 | ||
212 | static bool is_sysmmu_active(struct sysmmu_drvdata *data) | |
213 | { | |
214 | return data->activations > 0; | |
215 | } | |
216 | ||
217 | static void sysmmu_unblock(void __iomem *sfrbase) | |
218 | { | |
219 | __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL); | |
220 | } | |
221 | ||
222 | static bool sysmmu_block(void __iomem *sfrbase) | |
223 | { | |
224 | int i = 120; | |
225 | ||
226 | __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL); | |
227 | while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) | |
228 | --i; | |
229 | ||
230 | if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) { | |
231 | sysmmu_unblock(sfrbase); | |
232 | return false; | |
233 | } | |
234 | ||
235 | return true; | |
236 | } | |
237 | ||
238 | static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) | |
239 | { | |
240 | __raw_writel(0x1, sfrbase + REG_MMU_FLUSH); | |
241 | } | |
242 | ||
243 | static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, | |
d09d78fc | 244 | sysmmu_iova_t iova, unsigned int num_inv) |
2a96536e | 245 | { |
3ad6b7f3 CK |
246 | unsigned int i; |
247 | for (i = 0; i < num_inv; i++) { | |
248 | __raw_writel((iova & SPAGE_MASK) | 1, | |
249 | sfrbase + REG_MMU_FLUSH_ENTRY); | |
250 | iova += SPAGE_SIZE; | |
251 | } | |
2a96536e KC |
252 | } |
253 | ||
254 | static void __sysmmu_set_ptbase(void __iomem *sfrbase, | |
d09d78fc | 255 | phys_addr_t pgd) |
2a96536e | 256 | { |
2a96536e KC |
257 | __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); |
258 | ||
259 | __sysmmu_tlb_invalidate(sfrbase); | |
260 | } | |
261 | ||
1fab7fa7 CK |
262 | static void show_fault_information(const char *name, |
263 | enum exynos_sysmmu_inttype itype, | |
d09d78fc | 264 | phys_addr_t pgtable_base, sysmmu_iova_t fault_addr) |
2a96536e | 265 | { |
d09d78fc | 266 | sysmmu_pte_t *ent; |
2a96536e KC |
267 | |
268 | if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) | |
269 | itype = SYSMMU_FAULT_UNKNOWN; | |
270 | ||
d09d78fc | 271 | pr_err("%s occurred at %#x by %s(Page table base: %pa)\n", |
1fab7fa7 | 272 | sysmmu_fault_name[itype], fault_addr, name, &pgtable_base); |
2a96536e | 273 | |
7222e8db | 274 | ent = section_entry(phys_to_virt(pgtable_base), fault_addr); |
d09d78fc | 275 | pr_err("\tLv1 entry: %#x\n", *ent); |
2a96536e KC |
276 | |
277 | if (lv1ent_page(ent)) { | |
278 | ent = page_entry(ent, fault_addr); | |
d09d78fc | 279 | pr_err("\t Lv2 entry: %#x\n", *ent); |
2a96536e | 280 | } |
2a96536e KC |
281 | } |
282 | ||
283 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | |
284 | { | |
285 | /* SYSMMU is in blocked when interrupt occurred. */ | |
286 | struct sysmmu_drvdata *data = dev_id; | |
2a96536e | 287 | enum exynos_sysmmu_inttype itype; |
d09d78fc | 288 | sysmmu_iova_t addr = -1; |
7222e8db | 289 | int ret = -ENOSYS; |
2a96536e | 290 | |
2a96536e KC |
291 | WARN_ON(!is_sysmmu_active(data)); |
292 | ||
9d4e7a24 CK |
293 | spin_lock(&data->lock); |
294 | ||
70605870 CK |
295 | if (!IS_ERR(data->clk_master)) |
296 | clk_enable(data->clk_master); | |
9d4e7a24 | 297 | |
7222e8db CK |
298 | itype = (enum exynos_sysmmu_inttype) |
299 | __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS)); | |
300 | if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) | |
2a96536e | 301 | itype = SYSMMU_FAULT_UNKNOWN; |
7222e8db CK |
302 | else |
303 | addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]); | |
2a96536e | 304 | |
1fab7fa7 CK |
305 | if (itype == SYSMMU_FAULT_UNKNOWN) { |
306 | pr_err("%s: Fault is not occurred by System MMU '%s'!\n", | |
307 | __func__, dev_name(data->sysmmu)); | |
308 | pr_err("%s: Please check if IRQ is correctly configured.\n", | |
309 | __func__); | |
310 | BUG(); | |
311 | } else { | |
d09d78fc | 312 | unsigned int base = |
1fab7fa7 CK |
313 | __raw_readl(data->sfrbase + REG_PT_BASE_ADDR); |
314 | show_fault_information(dev_name(data->sysmmu), | |
315 | itype, base, addr); | |
316 | if (data->domain) | |
317 | ret = report_iommu_fault(data->domain, | |
6b21a5db | 318 | data->master, addr, itype); |
2a96536e KC |
319 | } |
320 | ||
1fab7fa7 CK |
321 | /* fault is not recovered by fault handler */ |
322 | BUG_ON(ret != 0); | |
2a96536e | 323 | |
1fab7fa7 CK |
324 | __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR); |
325 | ||
326 | sysmmu_unblock(data->sfrbase); | |
2a96536e | 327 | |
70605870 CK |
328 | if (!IS_ERR(data->clk_master)) |
329 | clk_disable(data->clk_master); | |
330 | ||
9d4e7a24 | 331 | spin_unlock(&data->lock); |
2a96536e KC |
332 | |
333 | return IRQ_HANDLED; | |
334 | } | |
335 | ||
6b21a5db | 336 | static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data) |
2a96536e | 337 | { |
70605870 CK |
338 | if (!IS_ERR(data->clk_master)) |
339 | clk_enable(data->clk_master); | |
340 | ||
7222e8db | 341 | __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); |
6b21a5db | 342 | __raw_writel(0, data->sfrbase + REG_MMU_CFG); |
2a96536e | 343 | |
46c16d1e | 344 | clk_disable(data->clk); |
70605870 CK |
345 | if (!IS_ERR(data->clk_master)) |
346 | clk_disable(data->clk_master); | |
2a96536e KC |
347 | } |
348 | ||
6b21a5db | 349 | static bool __sysmmu_disable(struct sysmmu_drvdata *data) |
2a96536e | 350 | { |
6b21a5db | 351 | bool disabled; |
2a96536e KC |
352 | unsigned long flags; |
353 | ||
9d4e7a24 | 354 | spin_lock_irqsave(&data->lock, flags); |
2a96536e | 355 | |
6b21a5db CK |
356 | disabled = set_sysmmu_inactive(data); |
357 | ||
358 | if (disabled) { | |
359 | data->pgtable = 0; | |
360 | data->domain = NULL; | |
361 | ||
362 | __sysmmu_disable_nocount(data); | |
2a96536e | 363 | |
6b21a5db CK |
364 | dev_dbg(data->sysmmu, "Disabled\n"); |
365 | } else { | |
366 | dev_dbg(data->sysmmu, "%d times left to disable\n", | |
367 | data->activations); | |
2a96536e KC |
368 | } |
369 | ||
6b21a5db CK |
370 | spin_unlock_irqrestore(&data->lock, flags); |
371 | ||
372 | return disabled; | |
373 | } | |
2a96536e | 374 | |
6b21a5db CK |
375 | static void __sysmmu_init_config(struct sysmmu_drvdata *data) |
376 | { | |
377 | unsigned int cfg = 0; | |
378 | ||
379 | __raw_writel(cfg, data->sfrbase + REG_MMU_CFG); | |
380 | } | |
381 | ||
382 | static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data) | |
383 | { | |
70605870 CK |
384 | if (!IS_ERR(data->clk_master)) |
385 | clk_enable(data->clk_master); | |
386 | clk_enable(data->clk); | |
387 | ||
6b21a5db CK |
388 | __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); |
389 | ||
390 | __sysmmu_init_config(data); | |
391 | ||
392 | __sysmmu_set_ptbase(data->sfrbase, data->pgtable); | |
2a96536e | 393 | |
7222e8db CK |
394 | __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); |
395 | ||
70605870 CK |
396 | if (!IS_ERR(data->clk_master)) |
397 | clk_disable(data->clk_master); | |
6b21a5db | 398 | } |
70605870 | 399 | |
6b21a5db CK |
400 | static int __sysmmu_enable(struct sysmmu_drvdata *data, |
401 | phys_addr_t pgtable, struct iommu_domain *domain) | |
402 | { | |
403 | int ret = 0; | |
404 | unsigned long flags; | |
405 | ||
406 | spin_lock_irqsave(&data->lock, flags); | |
407 | if (set_sysmmu_active(data)) { | |
408 | data->pgtable = pgtable; | |
409 | data->domain = domain; | |
410 | ||
411 | __sysmmu_enable_nocount(data); | |
412 | ||
413 | dev_dbg(data->sysmmu, "Enabled\n"); | |
414 | } else { | |
415 | ret = (pgtable == data->pgtable) ? 1 : -EBUSY; | |
416 | ||
417 | dev_dbg(data->sysmmu, "already enabled\n"); | |
418 | } | |
419 | ||
420 | if (WARN_ON(ret < 0)) | |
421 | set_sysmmu_inactive(data); /* decrement count */ | |
2a96536e | 422 | |
9d4e7a24 | 423 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
424 | |
425 | return ret; | |
426 | } | |
427 | ||
6b21a5db CK |
428 | /* __exynos_sysmmu_enable: Enables System MMU |
429 | * | |
430 | * returns -error if an error occurred and System MMU is not enabled, | |
431 | * 0 if the System MMU has been just enabled and 1 if System MMU was already | |
432 | * enabled before. | |
433 | */ | |
434 | static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable, | |
435 | struct iommu_domain *domain) | |
2a96536e | 436 | { |
6b21a5db CK |
437 | int ret = 0; |
438 | unsigned long flags; | |
439 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | |
440 | struct sysmmu_drvdata *data; | |
2a96536e | 441 | |
6b21a5db | 442 | BUG_ON(!has_sysmmu(dev)); |
2a96536e | 443 | |
6b21a5db | 444 | spin_lock_irqsave(&owner->lock, flags); |
2a96536e | 445 | |
6b21a5db CK |
446 | data = dev_get_drvdata(owner->sysmmu); |
447 | ||
448 | ret = __sysmmu_enable(data, pgtable, domain); | |
449 | if (ret >= 0) | |
450 | data->master = dev; | |
451 | ||
452 | spin_unlock_irqrestore(&owner->lock, flags); | |
2a96536e KC |
453 | |
454 | return ret; | |
455 | } | |
456 | ||
6b21a5db CK |
457 | int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable) |
458 | { | |
459 | BUG_ON(!memblock_is_memory(pgtable)); | |
460 | ||
461 | return __exynos_sysmmu_enable(dev, pgtable, NULL); | |
462 | } | |
463 | ||
77e38350 | 464 | static bool exynos_sysmmu_disable(struct device *dev) |
2a96536e | 465 | { |
6b21a5db CK |
466 | unsigned long flags; |
467 | bool disabled = true; | |
468 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | |
469 | struct sysmmu_drvdata *data; | |
470 | ||
471 | BUG_ON(!has_sysmmu(dev)); | |
2a96536e | 472 | |
6b21a5db CK |
473 | spin_lock_irqsave(&owner->lock, flags); |
474 | ||
475 | data = dev_get_drvdata(owner->sysmmu); | |
476 | ||
477 | disabled = __sysmmu_disable(data); | |
478 | if (disabled) | |
479 | data->master = NULL; | |
480 | ||
481 | spin_unlock_irqrestore(&owner->lock, flags); | |
2a96536e KC |
482 | |
483 | return disabled; | |
484 | } | |
485 | ||
d09d78fc | 486 | static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, |
3ad6b7f3 | 487 | size_t size) |
2a96536e | 488 | { |
6b21a5db | 489 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
2a96536e | 490 | unsigned long flags; |
6b21a5db | 491 | struct sysmmu_drvdata *data; |
2a96536e | 492 | |
6b21a5db | 493 | data = dev_get_drvdata(owner->sysmmu); |
2a96536e | 494 | |
6b21a5db | 495 | spin_lock_irqsave(&data->lock, flags); |
2a96536e | 496 | if (is_sysmmu_active(data)) { |
3ad6b7f3 CK |
497 | unsigned int maj; |
498 | unsigned int num_inv = 1; | |
70605870 CK |
499 | |
500 | if (!IS_ERR(data->clk_master)) | |
501 | clk_enable(data->clk_master); | |
502 | ||
3ad6b7f3 CK |
503 | maj = __raw_readl(data->sfrbase + REG_MMU_VERSION); |
504 | /* | |
505 | * L2TLB invalidation required | |
506 | * 4KB page: 1 invalidation | |
507 | * 64KB page: 16 invalidation | |
508 | * 1MB page: 64 invalidation | |
509 | * because it is set-associative TLB | |
510 | * with 8-way and 64 sets. | |
511 | * 1MB page can be cached in one of all sets. | |
512 | * 64KB page can be one of 16 consecutive sets. | |
513 | */ | |
514 | if ((maj >> 28) == 2) /* major version number */ | |
515 | num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); | |
516 | ||
7222e8db CK |
517 | if (sysmmu_block(data->sfrbase)) { |
518 | __sysmmu_tlb_invalidate_entry( | |
3ad6b7f3 | 519 | data->sfrbase, iova, num_inv); |
7222e8db | 520 | sysmmu_unblock(data->sfrbase); |
2a96536e | 521 | } |
70605870 CK |
522 | if (!IS_ERR(data->clk_master)) |
523 | clk_disable(data->clk_master); | |
2a96536e | 524 | } else { |
6b21a5db CK |
525 | dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n", |
526 | iova); | |
2a96536e | 527 | } |
9d4e7a24 | 528 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
529 | } |
530 | ||
531 | void exynos_sysmmu_tlb_invalidate(struct device *dev) | |
532 | { | |
6b21a5db | 533 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
2a96536e | 534 | unsigned long flags; |
6b21a5db | 535 | struct sysmmu_drvdata *data; |
2a96536e | 536 | |
6b21a5db | 537 | data = dev_get_drvdata(owner->sysmmu); |
2a96536e | 538 | |
6b21a5db | 539 | spin_lock_irqsave(&data->lock, flags); |
2a96536e | 540 | if (is_sysmmu_active(data)) { |
70605870 CK |
541 | if (!IS_ERR(data->clk_master)) |
542 | clk_enable(data->clk_master); | |
7222e8db CK |
543 | if (sysmmu_block(data->sfrbase)) { |
544 | __sysmmu_tlb_invalidate(data->sfrbase); | |
545 | sysmmu_unblock(data->sfrbase); | |
2a96536e | 546 | } |
70605870 CK |
547 | if (!IS_ERR(data->clk_master)) |
548 | clk_disable(data->clk_master); | |
2a96536e | 549 | } else { |
6b21a5db | 550 | dev_dbg(dev, "disabled. Skipping TLB invalidation\n"); |
2a96536e | 551 | } |
9d4e7a24 | 552 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
553 | } |
554 | ||
6b21a5db | 555 | static int __init exynos_sysmmu_probe(struct platform_device *pdev) |
2a96536e | 556 | { |
46c16d1e | 557 | int irq, ret; |
7222e8db | 558 | struct device *dev = &pdev->dev; |
2a96536e | 559 | struct sysmmu_drvdata *data; |
7222e8db | 560 | struct resource *res; |
2a96536e | 561 | |
46c16d1e CK |
562 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
563 | if (!data) | |
564 | return -ENOMEM; | |
2a96536e | 565 | |
7222e8db | 566 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
46c16d1e CK |
567 | data->sfrbase = devm_ioremap_resource(dev, res); |
568 | if (IS_ERR(data->sfrbase)) | |
569 | return PTR_ERR(data->sfrbase); | |
2a96536e | 570 | |
46c16d1e CK |
571 | irq = platform_get_irq(pdev, 0); |
572 | if (irq <= 0) { | |
0bf4e54d | 573 | dev_err(dev, "Unable to find IRQ resource\n"); |
46c16d1e | 574 | return irq; |
2a96536e KC |
575 | } |
576 | ||
46c16d1e | 577 | ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, |
7222e8db CK |
578 | dev_name(dev), data); |
579 | if (ret) { | |
46c16d1e CK |
580 | dev_err(dev, "Unabled to register handler of irq %d\n", irq); |
581 | return ret; | |
2a96536e KC |
582 | } |
583 | ||
46c16d1e CK |
584 | data->clk = devm_clk_get(dev, "sysmmu"); |
585 | if (IS_ERR(data->clk)) { | |
586 | dev_err(dev, "Failed to get clock!\n"); | |
587 | return PTR_ERR(data->clk); | |
588 | } else { | |
589 | ret = clk_prepare(data->clk); | |
590 | if (ret) { | |
591 | dev_err(dev, "Failed to prepare clk\n"); | |
592 | return ret; | |
593 | } | |
2a96536e KC |
594 | } |
595 | ||
70605870 CK |
596 | data->clk_master = devm_clk_get(dev, "master"); |
597 | if (!IS_ERR(data->clk_master)) { | |
598 | ret = clk_prepare(data->clk_master); | |
599 | if (ret) { | |
600 | clk_unprepare(data->clk); | |
601 | dev_err(dev, "Failed to prepare master's clk\n"); | |
602 | return ret; | |
603 | } | |
604 | } | |
605 | ||
2a96536e | 606 | data->sysmmu = dev; |
9d4e7a24 | 607 | spin_lock_init(&data->lock); |
2a96536e | 608 | |
7222e8db CK |
609 | platform_set_drvdata(pdev, data); |
610 | ||
f4723ec1 | 611 | pm_runtime_enable(dev); |
2a96536e | 612 | |
2a96536e | 613 | return 0; |
2a96536e KC |
614 | } |
615 | ||
6b21a5db CK |
616 | static const struct of_device_id sysmmu_of_match[] __initconst = { |
617 | { .compatible = "samsung,exynos-sysmmu", }, | |
618 | { }, | |
619 | }; | |
620 | ||
621 | static struct platform_driver exynos_sysmmu_driver __refdata = { | |
622 | .probe = exynos_sysmmu_probe, | |
623 | .driver = { | |
2a96536e KC |
624 | .owner = THIS_MODULE, |
625 | .name = "exynos-sysmmu", | |
6b21a5db | 626 | .of_match_table = sysmmu_of_match, |
2a96536e KC |
627 | } |
628 | }; | |
629 | ||
630 | static inline void pgtable_flush(void *vastart, void *vaend) | |
631 | { | |
632 | dmac_flush_range(vastart, vaend); | |
633 | outer_flush_range(virt_to_phys(vastart), | |
634 | virt_to_phys(vaend)); | |
635 | } | |
636 | ||
637 | static int exynos_iommu_domain_init(struct iommu_domain *domain) | |
638 | { | |
639 | struct exynos_iommu_domain *priv; | |
640 | ||
641 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
642 | if (!priv) | |
643 | return -ENOMEM; | |
644 | ||
d09d78fc | 645 | priv->pgtable = (sysmmu_pte_t *)__get_free_pages( |
2a96536e KC |
646 | GFP_KERNEL | __GFP_ZERO, 2); |
647 | if (!priv->pgtable) | |
648 | goto err_pgtable; | |
649 | ||
650 | priv->lv2entcnt = (short *)__get_free_pages( | |
651 | GFP_KERNEL | __GFP_ZERO, 1); | |
652 | if (!priv->lv2entcnt) | |
653 | goto err_counter; | |
654 | ||
655 | pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); | |
656 | ||
657 | spin_lock_init(&priv->lock); | |
658 | spin_lock_init(&priv->pgtablelock); | |
659 | INIT_LIST_HEAD(&priv->clients); | |
660 | ||
eb51637b SK |
661 | domain->geometry.aperture_start = 0; |
662 | domain->geometry.aperture_end = ~0UL; | |
663 | domain->geometry.force_aperture = true; | |
3177bb76 | 664 | |
2a96536e KC |
665 | domain->priv = priv; |
666 | return 0; | |
667 | ||
668 | err_counter: | |
669 | free_pages((unsigned long)priv->pgtable, 2); | |
670 | err_pgtable: | |
671 | kfree(priv); | |
672 | return -ENOMEM; | |
673 | } | |
674 | ||
675 | static void exynos_iommu_domain_destroy(struct iommu_domain *domain) | |
676 | { | |
677 | struct exynos_iommu_domain *priv = domain->priv; | |
6b21a5db | 678 | struct exynos_iommu_owner *owner; |
2a96536e KC |
679 | unsigned long flags; |
680 | int i; | |
681 | ||
682 | WARN_ON(!list_empty(&priv->clients)); | |
683 | ||
684 | spin_lock_irqsave(&priv->lock, flags); | |
685 | ||
6b21a5db CK |
686 | list_for_each_entry(owner, &priv->clients, client) { |
687 | while (!exynos_sysmmu_disable(owner->dev)) | |
2a96536e KC |
688 | ; /* until System MMU is actually disabled */ |
689 | } | |
690 | ||
6b21a5db CK |
691 | while (!list_empty(&priv->clients)) |
692 | list_del_init(priv->clients.next); | |
693 | ||
2a96536e KC |
694 | spin_unlock_irqrestore(&priv->lock, flags); |
695 | ||
696 | for (i = 0; i < NUM_LV1ENTRIES; i++) | |
697 | if (lv1ent_page(priv->pgtable + i)) | |
734c3c73 CK |
698 | kmem_cache_free(lv2table_kmem_cache, |
699 | phys_to_virt(lv2table_base(priv->pgtable + i))); | |
2a96536e KC |
700 | |
701 | free_pages((unsigned long)priv->pgtable, 2); | |
702 | free_pages((unsigned long)priv->lv2entcnt, 1); | |
703 | kfree(domain->priv); | |
704 | domain->priv = NULL; | |
705 | } | |
706 | ||
707 | static int exynos_iommu_attach_device(struct iommu_domain *domain, | |
708 | struct device *dev) | |
709 | { | |
6b21a5db | 710 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
2a96536e | 711 | struct exynos_iommu_domain *priv = domain->priv; |
7222e8db | 712 | phys_addr_t pagetable = virt_to_phys(priv->pgtable); |
2a96536e KC |
713 | unsigned long flags; |
714 | int ret; | |
715 | ||
2a96536e KC |
716 | spin_lock_irqsave(&priv->lock, flags); |
717 | ||
6b21a5db | 718 | ret = __exynos_sysmmu_enable(dev, pagetable, domain); |
2a96536e | 719 | if (ret == 0) { |
6b21a5db CK |
720 | list_add_tail(&owner->client, &priv->clients); |
721 | owner->domain = domain; | |
2a96536e KC |
722 | } |
723 | ||
724 | spin_unlock_irqrestore(&priv->lock, flags); | |
725 | ||
726 | if (ret < 0) { | |
7222e8db CK |
727 | dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n", |
728 | __func__, &pagetable); | |
7222e8db | 729 | return ret; |
2a96536e KC |
730 | } |
731 | ||
7222e8db CK |
732 | dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n", |
733 | __func__, &pagetable, (ret == 0) ? "" : ", again"); | |
734 | ||
2a96536e KC |
735 | return ret; |
736 | } | |
737 | ||
738 | static void exynos_iommu_detach_device(struct iommu_domain *domain, | |
739 | struct device *dev) | |
740 | { | |
6b21a5db | 741 | struct exynos_iommu_owner *owner; |
2a96536e | 742 | struct exynos_iommu_domain *priv = domain->priv; |
7222e8db | 743 | phys_addr_t pagetable = virt_to_phys(priv->pgtable); |
2a96536e | 744 | unsigned long flags; |
2a96536e KC |
745 | |
746 | spin_lock_irqsave(&priv->lock, flags); | |
747 | ||
6b21a5db CK |
748 | list_for_each_entry(owner, &priv->clients, client) { |
749 | if (owner == dev->archdata.iommu) { | |
750 | if (exynos_sysmmu_disable(dev)) { | |
751 | list_del_init(&owner->client); | |
752 | owner->domain = NULL; | |
753 | } | |
2a96536e KC |
754 | break; |
755 | } | |
756 | } | |
757 | ||
6b21a5db | 758 | spin_unlock_irqrestore(&priv->lock, flags); |
2a96536e | 759 | |
6b21a5db | 760 | if (owner == dev->archdata.iommu) |
7222e8db CK |
761 | dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", |
762 | __func__, &pagetable); | |
6b21a5db CK |
763 | else |
764 | dev_err(dev, "%s: No IOMMU is attached\n", __func__); | |
2a96536e KC |
765 | } |
766 | ||
d09d78fc | 767 | static sysmmu_pte_t *alloc_lv2entry(sysmmu_pte_t *sent, sysmmu_iova_t iova, |
2a96536e KC |
768 | short *pgcounter) |
769 | { | |
61128f08 | 770 | if (lv1ent_section(sent)) { |
d09d78fc | 771 | WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); |
61128f08 CK |
772 | return ERR_PTR(-EADDRINUSE); |
773 | } | |
774 | ||
2a96536e | 775 | if (lv1ent_fault(sent)) { |
d09d78fc | 776 | sysmmu_pte_t *pent; |
2a96536e | 777 | |
734c3c73 | 778 | pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); |
d09d78fc | 779 | BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1)); |
2a96536e | 780 | if (!pent) |
61128f08 | 781 | return ERR_PTR(-ENOMEM); |
2a96536e | 782 | |
7222e8db | 783 | *sent = mk_lv1ent_page(virt_to_phys(pent)); |
2a96536e KC |
784 | *pgcounter = NUM_LV2ENTRIES; |
785 | pgtable_flush(pent, pent + NUM_LV2ENTRIES); | |
786 | pgtable_flush(sent, sent + 1); | |
787 | } | |
788 | ||
789 | return page_entry(sent, iova); | |
790 | } | |
791 | ||
d09d78fc | 792 | static int lv1set_section(sysmmu_pte_t *sent, sysmmu_iova_t iova, |
61128f08 | 793 | phys_addr_t paddr, short *pgcnt) |
2a96536e | 794 | { |
61128f08 | 795 | if (lv1ent_section(sent)) { |
d09d78fc | 796 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 797 | iova); |
2a96536e | 798 | return -EADDRINUSE; |
61128f08 | 799 | } |
2a96536e KC |
800 | |
801 | if (lv1ent_page(sent)) { | |
61128f08 | 802 | if (*pgcnt != NUM_LV2ENTRIES) { |
d09d78fc | 803 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 804 | iova); |
2a96536e | 805 | return -EADDRINUSE; |
61128f08 | 806 | } |
2a96536e | 807 | |
734c3c73 | 808 | kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); |
2a96536e KC |
809 | *pgcnt = 0; |
810 | } | |
811 | ||
812 | *sent = mk_lv1ent_sect(paddr); | |
813 | ||
814 | pgtable_flush(sent, sent + 1); | |
815 | ||
816 | return 0; | |
817 | } | |
818 | ||
d09d78fc | 819 | static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, |
2a96536e KC |
820 | short *pgcnt) |
821 | { | |
822 | if (size == SPAGE_SIZE) { | |
0bf4e54d | 823 | if (WARN_ON(!lv2ent_fault(pent))) |
2a96536e KC |
824 | return -EADDRINUSE; |
825 | ||
826 | *pent = mk_lv2ent_spage(paddr); | |
827 | pgtable_flush(pent, pent + 1); | |
828 | *pgcnt -= 1; | |
829 | } else { /* size == LPAGE_SIZE */ | |
830 | int i; | |
831 | for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { | |
0bf4e54d | 832 | if (WARN_ON(!lv2ent_fault(pent))) { |
61128f08 CK |
833 | if (i > 0) |
834 | memset(pent - i, 0, sizeof(*pent) * i); | |
2a96536e KC |
835 | return -EADDRINUSE; |
836 | } | |
837 | ||
838 | *pent = mk_lv2ent_lpage(paddr); | |
839 | } | |
840 | pgtable_flush(pent - SPAGES_PER_LPAGE, pent); | |
841 | *pgcnt -= SPAGES_PER_LPAGE; | |
842 | } | |
843 | ||
844 | return 0; | |
845 | } | |
846 | ||
d09d78fc | 847 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, |
2a96536e KC |
848 | phys_addr_t paddr, size_t size, int prot) |
849 | { | |
850 | struct exynos_iommu_domain *priv = domain->priv; | |
d09d78fc CK |
851 | sysmmu_pte_t *entry; |
852 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | |
2a96536e KC |
853 | unsigned long flags; |
854 | int ret = -ENOMEM; | |
855 | ||
856 | BUG_ON(priv->pgtable == NULL); | |
857 | ||
858 | spin_lock_irqsave(&priv->pgtablelock, flags); | |
859 | ||
860 | entry = section_entry(priv->pgtable, iova); | |
861 | ||
862 | if (size == SECT_SIZE) { | |
61128f08 | 863 | ret = lv1set_section(entry, iova, paddr, |
2a96536e KC |
864 | &priv->lv2entcnt[lv1ent_offset(iova)]); |
865 | } else { | |
d09d78fc | 866 | sysmmu_pte_t *pent; |
2a96536e KC |
867 | |
868 | pent = alloc_lv2entry(entry, iova, | |
869 | &priv->lv2entcnt[lv1ent_offset(iova)]); | |
870 | ||
61128f08 CK |
871 | if (IS_ERR(pent)) |
872 | ret = PTR_ERR(pent); | |
2a96536e KC |
873 | else |
874 | ret = lv2set_page(pent, paddr, size, | |
875 | &priv->lv2entcnt[lv1ent_offset(iova)]); | |
876 | } | |
877 | ||
61128f08 | 878 | if (ret) |
0bf4e54d CK |
879 | pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", |
880 | __func__, ret, size, iova); | |
2a96536e KC |
881 | |
882 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | |
883 | ||
884 | return ret; | |
885 | } | |
886 | ||
887 | static size_t exynos_iommu_unmap(struct iommu_domain *domain, | |
d09d78fc | 888 | unsigned long l_iova, size_t size) |
2a96536e KC |
889 | { |
890 | struct exynos_iommu_domain *priv = domain->priv; | |
6b21a5db | 891 | struct exynos_iommu_owner *owner; |
d09d78fc CK |
892 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
893 | sysmmu_pte_t *ent; | |
61128f08 | 894 | size_t err_pgsize; |
d09d78fc | 895 | unsigned long flags; |
2a96536e KC |
896 | |
897 | BUG_ON(priv->pgtable == NULL); | |
898 | ||
899 | spin_lock_irqsave(&priv->pgtablelock, flags); | |
900 | ||
901 | ent = section_entry(priv->pgtable, iova); | |
902 | ||
903 | if (lv1ent_section(ent)) { | |
0bf4e54d | 904 | if (WARN_ON(size < SECT_SIZE)) { |
61128f08 CK |
905 | err_pgsize = SECT_SIZE; |
906 | goto err; | |
907 | } | |
2a96536e KC |
908 | |
909 | *ent = 0; | |
910 | pgtable_flush(ent, ent + 1); | |
911 | size = SECT_SIZE; | |
912 | goto done; | |
913 | } | |
914 | ||
915 | if (unlikely(lv1ent_fault(ent))) { | |
916 | if (size > SECT_SIZE) | |
917 | size = SECT_SIZE; | |
918 | goto done; | |
919 | } | |
920 | ||
921 | /* lv1ent_page(sent) == true here */ | |
922 | ||
923 | ent = page_entry(ent, iova); | |
924 | ||
925 | if (unlikely(lv2ent_fault(ent))) { | |
926 | size = SPAGE_SIZE; | |
927 | goto done; | |
928 | } | |
929 | ||
930 | if (lv2ent_small(ent)) { | |
931 | *ent = 0; | |
932 | size = SPAGE_SIZE; | |
6cb47ed7 | 933 | pgtable_flush(ent, ent + 1); |
2a96536e KC |
934 | priv->lv2entcnt[lv1ent_offset(iova)] += 1; |
935 | goto done; | |
936 | } | |
937 | ||
938 | /* lv1ent_large(ent) == true here */ | |
0bf4e54d | 939 | if (WARN_ON(size < LPAGE_SIZE)) { |
61128f08 CK |
940 | err_pgsize = LPAGE_SIZE; |
941 | goto err; | |
942 | } | |
2a96536e KC |
943 | |
944 | memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); | |
6cb47ed7 | 945 | pgtable_flush(ent, ent + SPAGES_PER_LPAGE); |
2a96536e KC |
946 | |
947 | size = LPAGE_SIZE; | |
948 | priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; | |
949 | done: | |
950 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | |
951 | ||
952 | spin_lock_irqsave(&priv->lock, flags); | |
6b21a5db CK |
953 | list_for_each_entry(owner, &priv->clients, client) |
954 | sysmmu_tlb_invalidate_entry(owner->dev, iova, size); | |
2a96536e KC |
955 | spin_unlock_irqrestore(&priv->lock, flags); |
956 | ||
2a96536e | 957 | return size; |
61128f08 CK |
958 | err: |
959 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | |
960 | ||
0bf4e54d CK |
961 | pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", |
962 | __func__, size, iova, err_pgsize); | |
61128f08 CK |
963 | |
964 | return 0; | |
2a96536e KC |
965 | } |
966 | ||
967 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, | |
bb5547ac | 968 | dma_addr_t iova) |
2a96536e KC |
969 | { |
970 | struct exynos_iommu_domain *priv = domain->priv; | |
d09d78fc | 971 | sysmmu_pte_t *entry; |
2a96536e KC |
972 | unsigned long flags; |
973 | phys_addr_t phys = 0; | |
974 | ||
975 | spin_lock_irqsave(&priv->pgtablelock, flags); | |
976 | ||
977 | entry = section_entry(priv->pgtable, iova); | |
978 | ||
979 | if (lv1ent_section(entry)) { | |
980 | phys = section_phys(entry) + section_offs(iova); | |
981 | } else if (lv1ent_page(entry)) { | |
982 | entry = page_entry(entry, iova); | |
983 | ||
984 | if (lv2ent_large(entry)) | |
985 | phys = lpage_phys(entry) + lpage_offs(iova); | |
986 | else if (lv2ent_small(entry)) | |
987 | phys = spage_phys(entry) + spage_offs(iova); | |
988 | } | |
989 | ||
990 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | |
991 | ||
992 | return phys; | |
993 | } | |
994 | ||
bf4a1c92 AM |
995 | static int exynos_iommu_add_device(struct device *dev) |
996 | { | |
997 | struct iommu_group *group; | |
998 | int ret; | |
999 | ||
1000 | group = iommu_group_get(dev); | |
1001 | ||
1002 | if (!group) { | |
1003 | group = iommu_group_alloc(); | |
1004 | if (IS_ERR(group)) { | |
1005 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
1006 | return PTR_ERR(group); | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | ret = iommu_group_add_device(group, dev); | |
1011 | iommu_group_put(group); | |
1012 | ||
1013 | return ret; | |
1014 | } | |
1015 | ||
1016 | static void exynos_iommu_remove_device(struct device *dev) | |
1017 | { | |
1018 | iommu_group_remove_device(dev); | |
1019 | } | |
1020 | ||
2a96536e KC |
1021 | static struct iommu_ops exynos_iommu_ops = { |
1022 | .domain_init = &exynos_iommu_domain_init, | |
1023 | .domain_destroy = &exynos_iommu_domain_destroy, | |
1024 | .attach_dev = &exynos_iommu_attach_device, | |
1025 | .detach_dev = &exynos_iommu_detach_device, | |
1026 | .map = &exynos_iommu_map, | |
1027 | .unmap = &exynos_iommu_unmap, | |
1028 | .iova_to_phys = &exynos_iommu_iova_to_phys, | |
bf4a1c92 AM |
1029 | .add_device = &exynos_iommu_add_device, |
1030 | .remove_device = &exynos_iommu_remove_device, | |
2a96536e KC |
1031 | .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, |
1032 | }; | |
1033 | ||
1034 | static int __init exynos_iommu_init(void) | |
1035 | { | |
1036 | int ret; | |
1037 | ||
734c3c73 CK |
1038 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1039 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | |
1040 | if (!lv2table_kmem_cache) { | |
1041 | pr_err("%s: Failed to create kmem cache\n", __func__); | |
1042 | return -ENOMEM; | |
1043 | } | |
1044 | ||
2a96536e | 1045 | ret = platform_driver_register(&exynos_sysmmu_driver); |
734c3c73 CK |
1046 | if (ret) { |
1047 | pr_err("%s: Failed to register driver\n", __func__); | |
1048 | goto err_reg_driver; | |
1049 | } | |
2a96536e | 1050 | |
734c3c73 CK |
1051 | ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); |
1052 | if (ret) { | |
1053 | pr_err("%s: Failed to register exynos-iommu driver.\n", | |
1054 | __func__); | |
1055 | goto err_set_iommu; | |
1056 | } | |
2a96536e | 1057 | |
734c3c73 CK |
1058 | return 0; |
1059 | err_set_iommu: | |
1060 | platform_driver_unregister(&exynos_sysmmu_driver); | |
1061 | err_reg_driver: | |
1062 | kmem_cache_destroy(lv2table_kmem_cache); | |
2a96536e KC |
1063 | return ret; |
1064 | } | |
1065 | subsys_initcall(exynos_iommu_init); |