Commit | Line | Data |
---|---|---|
2a96536e KC |
1 | /* linux/drivers/iommu/exynos_iommu.c |
2 | * | |
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | |
4 | * http://www.samsung.com | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG | |
12 | #define DEBUG | |
13 | #endif | |
14 | ||
15 | #include <linux/io.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/pm_runtime.h> | |
20 | #include <linux/clk.h> | |
21 | #include <linux/err.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/memblock.h> | |
27 | #include <linux/export.h> | |
28 | ||
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/pgtable.h> | |
31 | ||
d09d78fc CK |
32 | typedef u32 sysmmu_iova_t; |
33 | typedef u32 sysmmu_pte_t; | |
34 | ||
f171abab | 35 | /* We do not consider super section mapping (16MB) */ |
2a96536e KC |
36 | #define SECT_ORDER 20 |
37 | #define LPAGE_ORDER 16 | |
38 | #define SPAGE_ORDER 12 | |
39 | ||
40 | #define SECT_SIZE (1 << SECT_ORDER) | |
41 | #define LPAGE_SIZE (1 << LPAGE_ORDER) | |
42 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
43 | ||
44 | #define SECT_MASK (~(SECT_SIZE - 1)) | |
45 | #define LPAGE_MASK (~(LPAGE_SIZE - 1)) | |
46 | #define SPAGE_MASK (~(SPAGE_SIZE - 1)) | |
47 | ||
66a7ed84 CK |
48 | #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ |
49 | ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) | |
50 | #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) | |
51 | #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) | |
52 | #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ | |
53 | ((*(sent) & 3) == 1)) | |
2a96536e KC |
54 | #define lv1ent_section(sent) ((*(sent) & 3) == 2) |
55 | ||
56 | #define lv2ent_fault(pent) ((*(pent) & 3) == 0) | |
57 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) | |
58 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) | |
59 | ||
d09d78fc CK |
60 | static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size) |
61 | { | |
62 | return iova & (size - 1); | |
63 | } | |
64 | ||
2a96536e | 65 | #define section_phys(sent) (*(sent) & SECT_MASK) |
d09d78fc | 66 | #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE) |
2a96536e | 67 | #define lpage_phys(pent) (*(pent) & LPAGE_MASK) |
d09d78fc | 68 | #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE) |
2a96536e | 69 | #define spage_phys(pent) (*(pent) & SPAGE_MASK) |
d09d78fc | 70 | #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE) |
2a96536e KC |
71 | |
72 | #define NUM_LV1ENTRIES 4096 | |
d09d78fc | 73 | #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) |
2a96536e | 74 | |
d09d78fc CK |
75 | static u32 lv1ent_offset(sysmmu_iova_t iova) |
76 | { | |
77 | return iova >> SECT_ORDER; | |
78 | } | |
79 | ||
80 | static u32 lv2ent_offset(sysmmu_iova_t iova) | |
81 | { | |
82 | return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); | |
83 | } | |
84 | ||
85 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) | |
2a96536e KC |
86 | |
87 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) | |
88 | ||
89 | #define lv2table_base(sent) (*(sent) & 0xFFFFFC00) | |
90 | ||
91 | #define mk_lv1ent_sect(pa) ((pa) | 2) | |
92 | #define mk_lv1ent_page(pa) ((pa) | 1) | |
93 | #define mk_lv2ent_lpage(pa) ((pa) | 1) | |
94 | #define mk_lv2ent_spage(pa) ((pa) | 2) | |
95 | ||
96 | #define CTRL_ENABLE 0x5 | |
97 | #define CTRL_BLOCK 0x7 | |
98 | #define CTRL_DISABLE 0x0 | |
99 | ||
eeb5184b CK |
100 | #define CFG_LRU 0x1 |
101 | #define CFG_QOS(n) ((n & 0xF) << 7) | |
102 | #define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */ | |
103 | #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ | |
104 | #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ | |
105 | #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ | |
106 | ||
2a96536e KC |
107 | #define REG_MMU_CTRL 0x000 |
108 | #define REG_MMU_CFG 0x004 | |
109 | #define REG_MMU_STATUS 0x008 | |
110 | #define REG_MMU_FLUSH 0x00C | |
111 | #define REG_MMU_FLUSH_ENTRY 0x010 | |
112 | #define REG_PT_BASE_ADDR 0x014 | |
113 | #define REG_INT_STATUS 0x018 | |
114 | #define REG_INT_CLEAR 0x01C | |
115 | ||
116 | #define REG_PAGE_FAULT_ADDR 0x024 | |
117 | #define REG_AW_FAULT_ADDR 0x028 | |
118 | #define REG_AR_FAULT_ADDR 0x02C | |
119 | #define REG_DEFAULT_SLAVE_ADDR 0x030 | |
120 | ||
121 | #define REG_MMU_VERSION 0x034 | |
122 | ||
eeb5184b CK |
123 | #define MMU_MAJ_VER(val) ((val) >> 7) |
124 | #define MMU_MIN_VER(val) ((val) & 0x7F) | |
125 | #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ | |
126 | ||
127 | #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) | |
128 | ||
2a96536e KC |
129 | #define REG_PB0_SADDR 0x04C |
130 | #define REG_PB0_EADDR 0x050 | |
131 | #define REG_PB1_SADDR 0x054 | |
132 | #define REG_PB1_EADDR 0x058 | |
133 | ||
6b21a5db CK |
134 | #define has_sysmmu(dev) (dev->archdata.iommu != NULL) |
135 | ||
734c3c73 | 136 | static struct kmem_cache *lv2table_kmem_cache; |
66a7ed84 CK |
137 | static sysmmu_pte_t *zero_lv2_table; |
138 | #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) | |
734c3c73 | 139 | |
d09d78fc | 140 | static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) |
2a96536e KC |
141 | { |
142 | return pgtable + lv1ent_offset(iova); | |
143 | } | |
144 | ||
d09d78fc | 145 | static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) |
2a96536e | 146 | { |
d09d78fc | 147 | return (sysmmu_pte_t *)phys_to_virt( |
7222e8db | 148 | lv2table_base(sent)) + lv2ent_offset(iova); |
2a96536e KC |
149 | } |
150 | ||
151 | enum exynos_sysmmu_inttype { | |
152 | SYSMMU_PAGEFAULT, | |
153 | SYSMMU_AR_MULTIHIT, | |
154 | SYSMMU_AW_MULTIHIT, | |
155 | SYSMMU_BUSERROR, | |
156 | SYSMMU_AR_SECURITY, | |
157 | SYSMMU_AR_ACCESS, | |
158 | SYSMMU_AW_SECURITY, | |
159 | SYSMMU_AW_PROTECTION, /* 7 */ | |
160 | SYSMMU_FAULT_UNKNOWN, | |
161 | SYSMMU_FAULTS_NUM | |
162 | }; | |
163 | ||
2a96536e KC |
164 | static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { |
165 | REG_PAGE_FAULT_ADDR, | |
166 | REG_AR_FAULT_ADDR, | |
167 | REG_AW_FAULT_ADDR, | |
168 | REG_DEFAULT_SLAVE_ADDR, | |
169 | REG_AR_FAULT_ADDR, | |
170 | REG_AR_FAULT_ADDR, | |
171 | REG_AW_FAULT_ADDR, | |
172 | REG_AW_FAULT_ADDR | |
173 | }; | |
174 | ||
175 | static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { | |
176 | "PAGE FAULT", | |
177 | "AR MULTI-HIT FAULT", | |
178 | "AW MULTI-HIT FAULT", | |
179 | "BUS ERROR", | |
180 | "AR SECURITY PROTECTION FAULT", | |
181 | "AR ACCESS PROTECTION FAULT", | |
182 | "AW SECURITY PROTECTION FAULT", | |
183 | "AW ACCESS PROTECTION FAULT", | |
184 | "UNKNOWN FAULT" | |
185 | }; | |
186 | ||
6b21a5db CK |
187 | /* attached to dev.archdata.iommu of the master device */ |
188 | struct exynos_iommu_owner { | |
6b21a5db | 189 | struct device *sysmmu; |
6b21a5db CK |
190 | }; |
191 | ||
2a96536e KC |
192 | struct exynos_iommu_domain { |
193 | struct list_head clients; /* list of sysmmu_drvdata.node */ | |
d09d78fc | 194 | sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ |
2a96536e KC |
195 | short *lv2entcnt; /* free lv2 entry counter for each section */ |
196 | spinlock_t lock; /* lock for this structure */ | |
197 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ | |
e1fd1eaa | 198 | struct iommu_domain domain; /* generic domain data structure */ |
2a96536e KC |
199 | }; |
200 | ||
201 | struct sysmmu_drvdata { | |
2a96536e | 202 | struct device *sysmmu; /* System MMU's device descriptor */ |
6b21a5db | 203 | struct device *master; /* Owner of system MMU */ |
7222e8db CK |
204 | void __iomem *sfrbase; |
205 | struct clk *clk; | |
70605870 | 206 | struct clk *clk_master; |
2a96536e | 207 | int activations; |
9d4e7a24 | 208 | spinlock_t lock; |
2a96536e | 209 | struct iommu_domain *domain; |
469acebe | 210 | struct list_head domain_node; |
7222e8db | 211 | phys_addr_t pgtable; |
512bd0c6 | 212 | unsigned int version; |
2a96536e KC |
213 | }; |
214 | ||
e1fd1eaa JR |
215 | static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) |
216 | { | |
217 | return container_of(dom, struct exynos_iommu_domain, domain); | |
218 | } | |
219 | ||
2a96536e KC |
220 | static bool set_sysmmu_active(struct sysmmu_drvdata *data) |
221 | { | |
222 | /* return true if the System MMU was not active previously | |
223 | and it needs to be initialized */ | |
224 | return ++data->activations == 1; | |
225 | } | |
226 | ||
227 | static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) | |
228 | { | |
229 | /* return true if the System MMU is needed to be disabled */ | |
230 | BUG_ON(data->activations < 1); | |
231 | return --data->activations == 0; | |
232 | } | |
233 | ||
234 | static bool is_sysmmu_active(struct sysmmu_drvdata *data) | |
235 | { | |
236 | return data->activations > 0; | |
237 | } | |
238 | ||
239 | static void sysmmu_unblock(void __iomem *sfrbase) | |
240 | { | |
241 | __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL); | |
242 | } | |
243 | ||
244 | static bool sysmmu_block(void __iomem *sfrbase) | |
245 | { | |
246 | int i = 120; | |
247 | ||
248 | __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL); | |
249 | while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) | |
250 | --i; | |
251 | ||
252 | if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) { | |
253 | sysmmu_unblock(sfrbase); | |
254 | return false; | |
255 | } | |
256 | ||
257 | return true; | |
258 | } | |
259 | ||
260 | static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) | |
261 | { | |
262 | __raw_writel(0x1, sfrbase + REG_MMU_FLUSH); | |
263 | } | |
264 | ||
265 | static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, | |
d09d78fc | 266 | sysmmu_iova_t iova, unsigned int num_inv) |
2a96536e | 267 | { |
3ad6b7f3 | 268 | unsigned int i; |
365409db | 269 | |
3ad6b7f3 CK |
270 | for (i = 0; i < num_inv; i++) { |
271 | __raw_writel((iova & SPAGE_MASK) | 1, | |
272 | sfrbase + REG_MMU_FLUSH_ENTRY); | |
273 | iova += SPAGE_SIZE; | |
274 | } | |
2a96536e KC |
275 | } |
276 | ||
277 | static void __sysmmu_set_ptbase(void __iomem *sfrbase, | |
d09d78fc | 278 | phys_addr_t pgd) |
2a96536e | 279 | { |
2a96536e KC |
280 | __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); |
281 | ||
282 | __sysmmu_tlb_invalidate(sfrbase); | |
283 | } | |
284 | ||
1fab7fa7 CK |
285 | static void show_fault_information(const char *name, |
286 | enum exynos_sysmmu_inttype itype, | |
d09d78fc | 287 | phys_addr_t pgtable_base, sysmmu_iova_t fault_addr) |
2a96536e | 288 | { |
d09d78fc | 289 | sysmmu_pte_t *ent; |
2a96536e KC |
290 | |
291 | if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) | |
292 | itype = SYSMMU_FAULT_UNKNOWN; | |
293 | ||
d09d78fc | 294 | pr_err("%s occurred at %#x by %s(Page table base: %pa)\n", |
1fab7fa7 | 295 | sysmmu_fault_name[itype], fault_addr, name, &pgtable_base); |
2a96536e | 296 | |
7222e8db | 297 | ent = section_entry(phys_to_virt(pgtable_base), fault_addr); |
d09d78fc | 298 | pr_err("\tLv1 entry: %#x\n", *ent); |
2a96536e KC |
299 | |
300 | if (lv1ent_page(ent)) { | |
301 | ent = page_entry(ent, fault_addr); | |
d09d78fc | 302 | pr_err("\t Lv2 entry: %#x\n", *ent); |
2a96536e | 303 | } |
2a96536e KC |
304 | } |
305 | ||
306 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | |
307 | { | |
f171abab | 308 | /* SYSMMU is in blocked state when interrupt occurred. */ |
2a96536e | 309 | struct sysmmu_drvdata *data = dev_id; |
2a96536e | 310 | enum exynos_sysmmu_inttype itype; |
d09d78fc | 311 | sysmmu_iova_t addr = -1; |
7222e8db | 312 | int ret = -ENOSYS; |
2a96536e | 313 | |
2a96536e KC |
314 | WARN_ON(!is_sysmmu_active(data)); |
315 | ||
9d4e7a24 CK |
316 | spin_lock(&data->lock); |
317 | ||
70605870 CK |
318 | if (!IS_ERR(data->clk_master)) |
319 | clk_enable(data->clk_master); | |
9d4e7a24 | 320 | |
7222e8db CK |
321 | itype = (enum exynos_sysmmu_inttype) |
322 | __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS)); | |
323 | if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) | |
2a96536e | 324 | itype = SYSMMU_FAULT_UNKNOWN; |
7222e8db CK |
325 | else |
326 | addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]); | |
2a96536e | 327 | |
1fab7fa7 CK |
328 | if (itype == SYSMMU_FAULT_UNKNOWN) { |
329 | pr_err("%s: Fault is not occurred by System MMU '%s'!\n", | |
330 | __func__, dev_name(data->sysmmu)); | |
331 | pr_err("%s: Please check if IRQ is correctly configured.\n", | |
332 | __func__); | |
333 | BUG(); | |
334 | } else { | |
d09d78fc | 335 | unsigned int base = |
1fab7fa7 CK |
336 | __raw_readl(data->sfrbase + REG_PT_BASE_ADDR); |
337 | show_fault_information(dev_name(data->sysmmu), | |
338 | itype, base, addr); | |
339 | if (data->domain) | |
340 | ret = report_iommu_fault(data->domain, | |
6b21a5db | 341 | data->master, addr, itype); |
2a96536e KC |
342 | } |
343 | ||
1fab7fa7 CK |
344 | /* fault is not recovered by fault handler */ |
345 | BUG_ON(ret != 0); | |
2a96536e | 346 | |
1fab7fa7 CK |
347 | __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR); |
348 | ||
349 | sysmmu_unblock(data->sfrbase); | |
2a96536e | 350 | |
70605870 CK |
351 | if (!IS_ERR(data->clk_master)) |
352 | clk_disable(data->clk_master); | |
353 | ||
9d4e7a24 | 354 | spin_unlock(&data->lock); |
2a96536e KC |
355 | |
356 | return IRQ_HANDLED; | |
357 | } | |
358 | ||
6b21a5db | 359 | static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data) |
2a96536e | 360 | { |
70605870 CK |
361 | if (!IS_ERR(data->clk_master)) |
362 | clk_enable(data->clk_master); | |
363 | ||
7222e8db | 364 | __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); |
6b21a5db | 365 | __raw_writel(0, data->sfrbase + REG_MMU_CFG); |
2a96536e | 366 | |
46c16d1e | 367 | clk_disable(data->clk); |
70605870 CK |
368 | if (!IS_ERR(data->clk_master)) |
369 | clk_disable(data->clk_master); | |
2a96536e KC |
370 | } |
371 | ||
6b21a5db | 372 | static bool __sysmmu_disable(struct sysmmu_drvdata *data) |
2a96536e | 373 | { |
6b21a5db | 374 | bool disabled; |
2a96536e KC |
375 | unsigned long flags; |
376 | ||
9d4e7a24 | 377 | spin_lock_irqsave(&data->lock, flags); |
2a96536e | 378 | |
6b21a5db CK |
379 | disabled = set_sysmmu_inactive(data); |
380 | ||
381 | if (disabled) { | |
382 | data->pgtable = 0; | |
383 | data->domain = NULL; | |
384 | ||
385 | __sysmmu_disable_nocount(data); | |
2a96536e | 386 | |
6b21a5db CK |
387 | dev_dbg(data->sysmmu, "Disabled\n"); |
388 | } else { | |
389 | dev_dbg(data->sysmmu, "%d times left to disable\n", | |
390 | data->activations); | |
2a96536e KC |
391 | } |
392 | ||
6b21a5db CK |
393 | spin_unlock_irqrestore(&data->lock, flags); |
394 | ||
395 | return disabled; | |
396 | } | |
2a96536e | 397 | |
6b21a5db CK |
398 | static void __sysmmu_init_config(struct sysmmu_drvdata *data) |
399 | { | |
eeb5184b CK |
400 | unsigned int cfg = CFG_LRU | CFG_QOS(15); |
401 | unsigned int ver; | |
402 | ||
512bd0c6 | 403 | ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION)); |
eeb5184b CK |
404 | if (MMU_MAJ_VER(ver) == 3) { |
405 | if (MMU_MIN_VER(ver) >= 2) { | |
406 | cfg |= CFG_FLPDCACHE; | |
407 | if (MMU_MIN_VER(ver) == 3) { | |
408 | cfg |= CFG_ACGEN; | |
409 | cfg &= ~CFG_LRU; | |
410 | } else { | |
411 | cfg |= CFG_SYSSEL; | |
412 | } | |
413 | } | |
414 | } | |
6b21a5db CK |
415 | |
416 | __raw_writel(cfg, data->sfrbase + REG_MMU_CFG); | |
512bd0c6 | 417 | data->version = ver; |
6b21a5db CK |
418 | } |
419 | ||
420 | static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data) | |
421 | { | |
70605870 CK |
422 | if (!IS_ERR(data->clk_master)) |
423 | clk_enable(data->clk_master); | |
424 | clk_enable(data->clk); | |
425 | ||
6b21a5db CK |
426 | __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); |
427 | ||
428 | __sysmmu_init_config(data); | |
429 | ||
430 | __sysmmu_set_ptbase(data->sfrbase, data->pgtable); | |
2a96536e | 431 | |
7222e8db CK |
432 | __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); |
433 | ||
70605870 CK |
434 | if (!IS_ERR(data->clk_master)) |
435 | clk_disable(data->clk_master); | |
6b21a5db | 436 | } |
70605870 | 437 | |
bfa00489 MS |
438 | static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable, |
439 | struct iommu_domain *iommu_domain) | |
6b21a5db CK |
440 | { |
441 | int ret = 0; | |
442 | unsigned long flags; | |
443 | ||
444 | spin_lock_irqsave(&data->lock, flags); | |
445 | if (set_sysmmu_active(data)) { | |
446 | data->pgtable = pgtable; | |
bfa00489 | 447 | data->domain = iommu_domain; |
6b21a5db CK |
448 | |
449 | __sysmmu_enable_nocount(data); | |
450 | ||
451 | dev_dbg(data->sysmmu, "Enabled\n"); | |
452 | } else { | |
453 | ret = (pgtable == data->pgtable) ? 1 : -EBUSY; | |
454 | ||
455 | dev_dbg(data->sysmmu, "already enabled\n"); | |
456 | } | |
457 | ||
458 | if (WARN_ON(ret < 0)) | |
459 | set_sysmmu_inactive(data); /* decrement count */ | |
2a96536e | 460 | |
9d4e7a24 | 461 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
462 | |
463 | return ret; | |
464 | } | |
465 | ||
66a7ed84 CK |
466 | static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, |
467 | sysmmu_iova_t iova) | |
468 | { | |
512bd0c6 | 469 | if (data->version == MAKE_MMU_VER(3, 3)) |
66a7ed84 CK |
470 | __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY); |
471 | } | |
472 | ||
469acebe | 473 | static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, |
66a7ed84 CK |
474 | sysmmu_iova_t iova) |
475 | { | |
476 | unsigned long flags; | |
66a7ed84 CK |
477 | |
478 | if (!IS_ERR(data->clk_master)) | |
479 | clk_enable(data->clk_master); | |
480 | ||
481 | spin_lock_irqsave(&data->lock, flags); | |
482 | if (is_sysmmu_active(data)) | |
483 | __sysmmu_tlb_invalidate_flpdcache(data, iova); | |
484 | spin_unlock_irqrestore(&data->lock, flags); | |
485 | ||
486 | if (!IS_ERR(data->clk_master)) | |
487 | clk_disable(data->clk_master); | |
488 | } | |
489 | ||
469acebe MS |
490 | static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, |
491 | sysmmu_iova_t iova, size_t size) | |
2a96536e KC |
492 | { |
493 | unsigned long flags; | |
2a96536e | 494 | |
6b21a5db | 495 | spin_lock_irqsave(&data->lock, flags); |
2a96536e | 496 | if (is_sysmmu_active(data)) { |
3ad6b7f3 | 497 | unsigned int num_inv = 1; |
70605870 CK |
498 | |
499 | if (!IS_ERR(data->clk_master)) | |
500 | clk_enable(data->clk_master); | |
501 | ||
3ad6b7f3 CK |
502 | /* |
503 | * L2TLB invalidation required | |
504 | * 4KB page: 1 invalidation | |
f171abab SK |
505 | * 64KB page: 16 invalidations |
506 | * 1MB page: 64 invalidations | |
3ad6b7f3 CK |
507 | * because it is set-associative TLB |
508 | * with 8-way and 64 sets. | |
509 | * 1MB page can be cached in one of all sets. | |
510 | * 64KB page can be one of 16 consecutive sets. | |
511 | */ | |
512bd0c6 | 512 | if (MMU_MAJ_VER(data->version) == 2) |
3ad6b7f3 CK |
513 | num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); |
514 | ||
7222e8db CK |
515 | if (sysmmu_block(data->sfrbase)) { |
516 | __sysmmu_tlb_invalidate_entry( | |
3ad6b7f3 | 517 | data->sfrbase, iova, num_inv); |
7222e8db | 518 | sysmmu_unblock(data->sfrbase); |
2a96536e | 519 | } |
70605870 CK |
520 | if (!IS_ERR(data->clk_master)) |
521 | clk_disable(data->clk_master); | |
2a96536e | 522 | } else { |
469acebe MS |
523 | dev_dbg(data->master, |
524 | "disabled. Skipping TLB invalidation @ %#x\n", iova); | |
2a96536e | 525 | } |
9d4e7a24 | 526 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
527 | } |
528 | ||
6b21a5db | 529 | static int __init exynos_sysmmu_probe(struct platform_device *pdev) |
2a96536e | 530 | { |
46c16d1e | 531 | int irq, ret; |
7222e8db | 532 | struct device *dev = &pdev->dev; |
2a96536e | 533 | struct sysmmu_drvdata *data; |
7222e8db | 534 | struct resource *res; |
2a96536e | 535 | |
46c16d1e CK |
536 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
537 | if (!data) | |
538 | return -ENOMEM; | |
2a96536e | 539 | |
7222e8db | 540 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
46c16d1e CK |
541 | data->sfrbase = devm_ioremap_resource(dev, res); |
542 | if (IS_ERR(data->sfrbase)) | |
543 | return PTR_ERR(data->sfrbase); | |
2a96536e | 544 | |
46c16d1e CK |
545 | irq = platform_get_irq(pdev, 0); |
546 | if (irq <= 0) { | |
0bf4e54d | 547 | dev_err(dev, "Unable to find IRQ resource\n"); |
46c16d1e | 548 | return irq; |
2a96536e KC |
549 | } |
550 | ||
46c16d1e | 551 | ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, |
7222e8db CK |
552 | dev_name(dev), data); |
553 | if (ret) { | |
46c16d1e CK |
554 | dev_err(dev, "Unabled to register handler of irq %d\n", irq); |
555 | return ret; | |
2a96536e KC |
556 | } |
557 | ||
46c16d1e CK |
558 | data->clk = devm_clk_get(dev, "sysmmu"); |
559 | if (IS_ERR(data->clk)) { | |
560 | dev_err(dev, "Failed to get clock!\n"); | |
561 | return PTR_ERR(data->clk); | |
562 | } else { | |
563 | ret = clk_prepare(data->clk); | |
564 | if (ret) { | |
565 | dev_err(dev, "Failed to prepare clk\n"); | |
566 | return ret; | |
567 | } | |
2a96536e KC |
568 | } |
569 | ||
70605870 CK |
570 | data->clk_master = devm_clk_get(dev, "master"); |
571 | if (!IS_ERR(data->clk_master)) { | |
572 | ret = clk_prepare(data->clk_master); | |
573 | if (ret) { | |
574 | clk_unprepare(data->clk); | |
575 | dev_err(dev, "Failed to prepare master's clk\n"); | |
576 | return ret; | |
577 | } | |
578 | } | |
579 | ||
2a96536e | 580 | data->sysmmu = dev; |
9d4e7a24 | 581 | spin_lock_init(&data->lock); |
2a96536e | 582 | |
7222e8db CK |
583 | platform_set_drvdata(pdev, data); |
584 | ||
f4723ec1 | 585 | pm_runtime_enable(dev); |
2a96536e | 586 | |
2a96536e | 587 | return 0; |
2a96536e KC |
588 | } |
589 | ||
6b21a5db CK |
590 | static const struct of_device_id sysmmu_of_match[] __initconst = { |
591 | { .compatible = "samsung,exynos-sysmmu", }, | |
592 | { }, | |
593 | }; | |
594 | ||
595 | static struct platform_driver exynos_sysmmu_driver __refdata = { | |
596 | .probe = exynos_sysmmu_probe, | |
597 | .driver = { | |
2a96536e | 598 | .name = "exynos-sysmmu", |
6b21a5db | 599 | .of_match_table = sysmmu_of_match, |
2a96536e KC |
600 | } |
601 | }; | |
602 | ||
603 | static inline void pgtable_flush(void *vastart, void *vaend) | |
604 | { | |
605 | dmac_flush_range(vastart, vaend); | |
606 | outer_flush_range(virt_to_phys(vastart), | |
607 | virt_to_phys(vaend)); | |
608 | } | |
609 | ||
e1fd1eaa | 610 | static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) |
2a96536e | 611 | { |
bfa00489 | 612 | struct exynos_iommu_domain *domain; |
66a7ed84 | 613 | int i; |
2a96536e | 614 | |
e1fd1eaa JR |
615 | if (type != IOMMU_DOMAIN_UNMANAGED) |
616 | return NULL; | |
617 | ||
bfa00489 MS |
618 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
619 | if (!domain) | |
e1fd1eaa | 620 | return NULL; |
2a96536e | 621 | |
bfa00489 MS |
622 | domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); |
623 | if (!domain->pgtable) | |
2a96536e KC |
624 | goto err_pgtable; |
625 | ||
bfa00489 MS |
626 | domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); |
627 | if (!domain->lv2entcnt) | |
2a96536e KC |
628 | goto err_counter; |
629 | ||
f171abab | 630 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
66a7ed84 | 631 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { |
bfa00489 MS |
632 | domain->pgtable[i + 0] = ZERO_LV2LINK; |
633 | domain->pgtable[i + 1] = ZERO_LV2LINK; | |
634 | domain->pgtable[i + 2] = ZERO_LV2LINK; | |
635 | domain->pgtable[i + 3] = ZERO_LV2LINK; | |
636 | domain->pgtable[i + 4] = ZERO_LV2LINK; | |
637 | domain->pgtable[i + 5] = ZERO_LV2LINK; | |
638 | domain->pgtable[i + 6] = ZERO_LV2LINK; | |
639 | domain->pgtable[i + 7] = ZERO_LV2LINK; | |
66a7ed84 CK |
640 | } |
641 | ||
bfa00489 | 642 | pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES); |
2a96536e | 643 | |
bfa00489 MS |
644 | spin_lock_init(&domain->lock); |
645 | spin_lock_init(&domain->pgtablelock); | |
646 | INIT_LIST_HEAD(&domain->clients); | |
2a96536e | 647 | |
bfa00489 MS |
648 | domain->domain.geometry.aperture_start = 0; |
649 | domain->domain.geometry.aperture_end = ~0UL; | |
650 | domain->domain.geometry.force_aperture = true; | |
3177bb76 | 651 | |
bfa00489 | 652 | return &domain->domain; |
2a96536e KC |
653 | |
654 | err_counter: | |
bfa00489 | 655 | free_pages((unsigned long)domain->pgtable, 2); |
2a96536e | 656 | err_pgtable: |
bfa00489 | 657 | kfree(domain); |
e1fd1eaa | 658 | return NULL; |
2a96536e KC |
659 | } |
660 | ||
bfa00489 | 661 | static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) |
2a96536e | 662 | { |
bfa00489 | 663 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
469acebe | 664 | struct sysmmu_drvdata *data, *next; |
2a96536e KC |
665 | unsigned long flags; |
666 | int i; | |
667 | ||
bfa00489 | 668 | WARN_ON(!list_empty(&domain->clients)); |
2a96536e | 669 | |
bfa00489 | 670 | spin_lock_irqsave(&domain->lock, flags); |
2a96536e | 671 | |
bfa00489 | 672 | list_for_each_entry_safe(data, next, &domain->clients, domain_node) { |
469acebe MS |
673 | if (__sysmmu_disable(data)) |
674 | data->master = NULL; | |
675 | list_del_init(&data->domain_node); | |
2a96536e KC |
676 | } |
677 | ||
bfa00489 | 678 | spin_unlock_irqrestore(&domain->lock, flags); |
2a96536e KC |
679 | |
680 | for (i = 0; i < NUM_LV1ENTRIES; i++) | |
bfa00489 | 681 | if (lv1ent_page(domain->pgtable + i)) |
734c3c73 | 682 | kmem_cache_free(lv2table_kmem_cache, |
bfa00489 | 683 | phys_to_virt(lv2table_base(domain->pgtable + i))); |
2a96536e | 684 | |
bfa00489 MS |
685 | free_pages((unsigned long)domain->pgtable, 2); |
686 | free_pages((unsigned long)domain->lv2entcnt, 1); | |
687 | kfree(domain); | |
2a96536e KC |
688 | } |
689 | ||
bfa00489 | 690 | static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, |
2a96536e KC |
691 | struct device *dev) |
692 | { | |
6b21a5db | 693 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
bfa00489 | 694 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
469acebe | 695 | struct sysmmu_drvdata *data; |
bfa00489 | 696 | phys_addr_t pagetable = virt_to_phys(domain->pgtable); |
2a96536e | 697 | unsigned long flags; |
469acebe | 698 | int ret = -ENODEV; |
2a96536e | 699 | |
469acebe MS |
700 | if (!has_sysmmu(dev)) |
701 | return -ENODEV; | |
2a96536e | 702 | |
469acebe MS |
703 | data = dev_get_drvdata(owner->sysmmu); |
704 | if (data) { | |
bfa00489 | 705 | ret = __sysmmu_enable(data, pagetable, iommu_domain); |
469acebe MS |
706 | if (ret >= 0) { |
707 | data->master = dev; | |
708 | ||
bfa00489 MS |
709 | spin_lock_irqsave(&domain->lock, flags); |
710 | list_add_tail(&data->domain_node, &domain->clients); | |
711 | spin_unlock_irqrestore(&domain->lock, flags); | |
469acebe MS |
712 | } |
713 | } | |
2a96536e KC |
714 | |
715 | if (ret < 0) { | |
7222e8db CK |
716 | dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n", |
717 | __func__, &pagetable); | |
7222e8db | 718 | return ret; |
2a96536e KC |
719 | } |
720 | ||
7222e8db CK |
721 | dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n", |
722 | __func__, &pagetable, (ret == 0) ? "" : ", again"); | |
723 | ||
2a96536e KC |
724 | return ret; |
725 | } | |
726 | ||
bfa00489 | 727 | static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, |
2a96536e KC |
728 | struct device *dev) |
729 | { | |
bfa00489 MS |
730 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
731 | phys_addr_t pagetable = virt_to_phys(domain->pgtable); | |
469acebe | 732 | struct sysmmu_drvdata *data; |
2a96536e | 733 | unsigned long flags; |
469acebe | 734 | bool found = false; |
2a96536e | 735 | |
469acebe MS |
736 | if (!has_sysmmu(dev)) |
737 | return; | |
2a96536e | 738 | |
bfa00489 MS |
739 | spin_lock_irqsave(&domain->lock, flags); |
740 | list_for_each_entry(data, &domain->clients, domain_node) { | |
469acebe MS |
741 | if (data->master == dev) { |
742 | if (__sysmmu_disable(data)) { | |
743 | data->master = NULL; | |
744 | list_del_init(&data->domain_node); | |
745 | } | |
746 | found = true; | |
2a96536e KC |
747 | break; |
748 | } | |
749 | } | |
bfa00489 | 750 | spin_unlock_irqrestore(&domain->lock, flags); |
2a96536e | 751 | |
469acebe | 752 | if (found) |
7222e8db CK |
753 | dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", |
754 | __func__, &pagetable); | |
6b21a5db CK |
755 | else |
756 | dev_err(dev, "%s: No IOMMU is attached\n", __func__); | |
2a96536e KC |
757 | } |
758 | ||
bfa00489 | 759 | static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, |
66a7ed84 | 760 | sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) |
2a96536e | 761 | { |
61128f08 | 762 | if (lv1ent_section(sent)) { |
d09d78fc | 763 | WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); |
61128f08 CK |
764 | return ERR_PTR(-EADDRINUSE); |
765 | } | |
766 | ||
2a96536e | 767 | if (lv1ent_fault(sent)) { |
d09d78fc | 768 | sysmmu_pte_t *pent; |
66a7ed84 | 769 | bool need_flush_flpd_cache = lv1ent_zero(sent); |
2a96536e | 770 | |
734c3c73 | 771 | pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); |
d09d78fc | 772 | BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1)); |
2a96536e | 773 | if (!pent) |
61128f08 | 774 | return ERR_PTR(-ENOMEM); |
2a96536e | 775 | |
7222e8db | 776 | *sent = mk_lv1ent_page(virt_to_phys(pent)); |
dc3814f4 | 777 | kmemleak_ignore(pent); |
2a96536e KC |
778 | *pgcounter = NUM_LV2ENTRIES; |
779 | pgtable_flush(pent, pent + NUM_LV2ENTRIES); | |
780 | pgtable_flush(sent, sent + 1); | |
66a7ed84 CK |
781 | |
782 | /* | |
f171abab SK |
783 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, |
784 | * FLPD cache may cache the address of zero_l2_table. This | |
785 | * function replaces the zero_l2_table with new L2 page table | |
786 | * to write valid mappings. | |
66a7ed84 | 787 | * Accessing the valid area may cause page fault since FLPD |
f171abab SK |
788 | * cache may still cache zero_l2_table for the valid area |
789 | * instead of new L2 page table that has the mapping | |
790 | * information of the valid area. | |
66a7ed84 CK |
791 | * Thus any replacement of zero_l2_table with other valid L2 |
792 | * page table must involve FLPD cache invalidation for System | |
793 | * MMU v3.3. | |
794 | * FLPD cache invalidation is performed with TLB invalidation | |
795 | * by VPN without blocking. It is safe to invalidate TLB without | |
796 | * blocking because the target address of TLB invalidation is | |
797 | * not currently mapped. | |
798 | */ | |
799 | if (need_flush_flpd_cache) { | |
469acebe | 800 | struct sysmmu_drvdata *data; |
365409db | 801 | |
bfa00489 MS |
802 | spin_lock(&domain->lock); |
803 | list_for_each_entry(data, &domain->clients, domain_node) | |
469acebe | 804 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
bfa00489 | 805 | spin_unlock(&domain->lock); |
66a7ed84 | 806 | } |
2a96536e KC |
807 | } |
808 | ||
809 | return page_entry(sent, iova); | |
810 | } | |
811 | ||
bfa00489 | 812 | static int lv1set_section(struct exynos_iommu_domain *domain, |
66a7ed84 | 813 | sysmmu_pte_t *sent, sysmmu_iova_t iova, |
61128f08 | 814 | phys_addr_t paddr, short *pgcnt) |
2a96536e | 815 | { |
61128f08 | 816 | if (lv1ent_section(sent)) { |
d09d78fc | 817 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 818 | iova); |
2a96536e | 819 | return -EADDRINUSE; |
61128f08 | 820 | } |
2a96536e KC |
821 | |
822 | if (lv1ent_page(sent)) { | |
61128f08 | 823 | if (*pgcnt != NUM_LV2ENTRIES) { |
d09d78fc | 824 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 825 | iova); |
2a96536e | 826 | return -EADDRINUSE; |
61128f08 | 827 | } |
2a96536e | 828 | |
734c3c73 | 829 | kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); |
2a96536e KC |
830 | *pgcnt = 0; |
831 | } | |
832 | ||
833 | *sent = mk_lv1ent_sect(paddr); | |
834 | ||
835 | pgtable_flush(sent, sent + 1); | |
836 | ||
bfa00489 | 837 | spin_lock(&domain->lock); |
66a7ed84 | 838 | if (lv1ent_page_zero(sent)) { |
469acebe | 839 | struct sysmmu_drvdata *data; |
66a7ed84 CK |
840 | /* |
841 | * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD | |
842 | * entry by speculative prefetch of SLPD which has no mapping. | |
843 | */ | |
bfa00489 | 844 | list_for_each_entry(data, &domain->clients, domain_node) |
469acebe | 845 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
66a7ed84 | 846 | } |
bfa00489 | 847 | spin_unlock(&domain->lock); |
66a7ed84 | 848 | |
2a96536e KC |
849 | return 0; |
850 | } | |
851 | ||
d09d78fc | 852 | static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, |
2a96536e KC |
853 | short *pgcnt) |
854 | { | |
855 | if (size == SPAGE_SIZE) { | |
0bf4e54d | 856 | if (WARN_ON(!lv2ent_fault(pent))) |
2a96536e KC |
857 | return -EADDRINUSE; |
858 | ||
859 | *pent = mk_lv2ent_spage(paddr); | |
860 | pgtable_flush(pent, pent + 1); | |
861 | *pgcnt -= 1; | |
862 | } else { /* size == LPAGE_SIZE */ | |
863 | int i; | |
365409db | 864 | |
2a96536e | 865 | for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { |
0bf4e54d | 866 | if (WARN_ON(!lv2ent_fault(pent))) { |
61128f08 CK |
867 | if (i > 0) |
868 | memset(pent - i, 0, sizeof(*pent) * i); | |
2a96536e KC |
869 | return -EADDRINUSE; |
870 | } | |
871 | ||
872 | *pent = mk_lv2ent_lpage(paddr); | |
873 | } | |
874 | pgtable_flush(pent - SPAGES_PER_LPAGE, pent); | |
875 | *pgcnt -= SPAGES_PER_LPAGE; | |
876 | } | |
877 | ||
878 | return 0; | |
879 | } | |
880 | ||
66a7ed84 CK |
881 | /* |
882 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: | |
883 | * | |
f171abab | 884 | * System MMU v3.x has advanced logic to improve address translation |
66a7ed84 | 885 | * performance with caching more page table entries by a page table walk. |
f171abab SK |
886 | * However, the logic has a bug that while caching faulty page table entries, |
887 | * System MMU reports page fault if the cached fault entry is hit even though | |
888 | * the fault entry is updated to a valid entry after the entry is cached. | |
889 | * To prevent caching faulty page table entries which may be updated to valid | |
890 | * entries later, the virtual memory manager should care about the workaround | |
891 | * for the problem. The following describes the workaround. | |
66a7ed84 CK |
892 | * |
893 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB | |
f171abab | 894 | * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). |
66a7ed84 | 895 | * |
f171abab | 896 | * Precisely, any start address of I/O virtual region must be aligned with |
66a7ed84 CK |
897 | * the following sizes for System MMU v3.1 and v3.2. |
898 | * System MMU v3.1: 128KiB | |
899 | * System MMU v3.2: 256KiB | |
900 | * | |
901 | * Because System MMU v3.3 caches page table entries more aggressively, it needs | |
f171abab SK |
902 | * more workarounds. |
903 | * - Any two consecutive I/O virtual regions must have a hole of size larger | |
904 | * than or equal to 128KiB. | |
66a7ed84 CK |
905 | * - Start address of an I/O virtual region must be aligned by 128KiB. |
906 | */ | |
bfa00489 MS |
907 | static int exynos_iommu_map(struct iommu_domain *iommu_domain, |
908 | unsigned long l_iova, phys_addr_t paddr, size_t size, | |
909 | int prot) | |
2a96536e | 910 | { |
bfa00489 | 911 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc CK |
912 | sysmmu_pte_t *entry; |
913 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | |
2a96536e KC |
914 | unsigned long flags; |
915 | int ret = -ENOMEM; | |
916 | ||
bfa00489 | 917 | BUG_ON(domain->pgtable == NULL); |
2a96536e | 918 | |
bfa00489 | 919 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 920 | |
bfa00489 | 921 | entry = section_entry(domain->pgtable, iova); |
2a96536e KC |
922 | |
923 | if (size == SECT_SIZE) { | |
bfa00489 MS |
924 | ret = lv1set_section(domain, entry, iova, paddr, |
925 | &domain->lv2entcnt[lv1ent_offset(iova)]); | |
2a96536e | 926 | } else { |
d09d78fc | 927 | sysmmu_pte_t *pent; |
2a96536e | 928 | |
bfa00489 MS |
929 | pent = alloc_lv2entry(domain, entry, iova, |
930 | &domain->lv2entcnt[lv1ent_offset(iova)]); | |
2a96536e | 931 | |
61128f08 CK |
932 | if (IS_ERR(pent)) |
933 | ret = PTR_ERR(pent); | |
2a96536e KC |
934 | else |
935 | ret = lv2set_page(pent, paddr, size, | |
bfa00489 | 936 | &domain->lv2entcnt[lv1ent_offset(iova)]); |
2a96536e KC |
937 | } |
938 | ||
61128f08 | 939 | if (ret) |
0bf4e54d CK |
940 | pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", |
941 | __func__, ret, size, iova); | |
2a96536e | 942 | |
bfa00489 | 943 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e KC |
944 | |
945 | return ret; | |
946 | } | |
947 | ||
bfa00489 MS |
948 | static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, |
949 | sysmmu_iova_t iova, size_t size) | |
66a7ed84 | 950 | { |
469acebe | 951 | struct sysmmu_drvdata *data; |
66a7ed84 CK |
952 | unsigned long flags; |
953 | ||
bfa00489 | 954 | spin_lock_irqsave(&domain->lock, flags); |
66a7ed84 | 955 | |
bfa00489 | 956 | list_for_each_entry(data, &domain->clients, domain_node) |
469acebe | 957 | sysmmu_tlb_invalidate_entry(data, iova, size); |
66a7ed84 | 958 | |
bfa00489 | 959 | spin_unlock_irqrestore(&domain->lock, flags); |
66a7ed84 CK |
960 | } |
961 | ||
bfa00489 MS |
962 | static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, |
963 | unsigned long l_iova, size_t size) | |
2a96536e | 964 | { |
bfa00489 | 965 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc CK |
966 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
967 | sysmmu_pte_t *ent; | |
61128f08 | 968 | size_t err_pgsize; |
d09d78fc | 969 | unsigned long flags; |
2a96536e | 970 | |
bfa00489 | 971 | BUG_ON(domain->pgtable == NULL); |
2a96536e | 972 | |
bfa00489 | 973 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 974 | |
bfa00489 | 975 | ent = section_entry(domain->pgtable, iova); |
2a96536e KC |
976 | |
977 | if (lv1ent_section(ent)) { | |
0bf4e54d | 978 | if (WARN_ON(size < SECT_SIZE)) { |
61128f08 CK |
979 | err_pgsize = SECT_SIZE; |
980 | goto err; | |
981 | } | |
2a96536e | 982 | |
f171abab SK |
983 | /* workaround for h/w bug in System MMU v3.3 */ |
984 | *ent = ZERO_LV2LINK; | |
2a96536e KC |
985 | pgtable_flush(ent, ent + 1); |
986 | size = SECT_SIZE; | |
987 | goto done; | |
988 | } | |
989 | ||
990 | if (unlikely(lv1ent_fault(ent))) { | |
991 | if (size > SECT_SIZE) | |
992 | size = SECT_SIZE; | |
993 | goto done; | |
994 | } | |
995 | ||
996 | /* lv1ent_page(sent) == true here */ | |
997 | ||
998 | ent = page_entry(ent, iova); | |
999 | ||
1000 | if (unlikely(lv2ent_fault(ent))) { | |
1001 | size = SPAGE_SIZE; | |
1002 | goto done; | |
1003 | } | |
1004 | ||
1005 | if (lv2ent_small(ent)) { | |
1006 | *ent = 0; | |
1007 | size = SPAGE_SIZE; | |
6cb47ed7 | 1008 | pgtable_flush(ent, ent + 1); |
bfa00489 | 1009 | domain->lv2entcnt[lv1ent_offset(iova)] += 1; |
2a96536e KC |
1010 | goto done; |
1011 | } | |
1012 | ||
1013 | /* lv1ent_large(ent) == true here */ | |
0bf4e54d | 1014 | if (WARN_ON(size < LPAGE_SIZE)) { |
61128f08 CK |
1015 | err_pgsize = LPAGE_SIZE; |
1016 | goto err; | |
1017 | } | |
2a96536e KC |
1018 | |
1019 | memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); | |
6cb47ed7 | 1020 | pgtable_flush(ent, ent + SPAGES_PER_LPAGE); |
2a96536e KC |
1021 | |
1022 | size = LPAGE_SIZE; | |
bfa00489 | 1023 | domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; |
2a96536e | 1024 | done: |
bfa00489 | 1025 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e | 1026 | |
bfa00489 | 1027 | exynos_iommu_tlb_invalidate_entry(domain, iova, size); |
2a96536e | 1028 | |
2a96536e | 1029 | return size; |
61128f08 | 1030 | err: |
bfa00489 | 1031 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
61128f08 | 1032 | |
0bf4e54d CK |
1033 | pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", |
1034 | __func__, size, iova, err_pgsize); | |
61128f08 CK |
1035 | |
1036 | return 0; | |
2a96536e KC |
1037 | } |
1038 | ||
bfa00489 | 1039 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, |
bb5547ac | 1040 | dma_addr_t iova) |
2a96536e | 1041 | { |
bfa00489 | 1042 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc | 1043 | sysmmu_pte_t *entry; |
2a96536e KC |
1044 | unsigned long flags; |
1045 | phys_addr_t phys = 0; | |
1046 | ||
bfa00489 | 1047 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 1048 | |
bfa00489 | 1049 | entry = section_entry(domain->pgtable, iova); |
2a96536e KC |
1050 | |
1051 | if (lv1ent_section(entry)) { | |
1052 | phys = section_phys(entry) + section_offs(iova); | |
1053 | } else if (lv1ent_page(entry)) { | |
1054 | entry = page_entry(entry, iova); | |
1055 | ||
1056 | if (lv2ent_large(entry)) | |
1057 | phys = lpage_phys(entry) + lpage_offs(iova); | |
1058 | else if (lv2ent_small(entry)) | |
1059 | phys = spage_phys(entry) + spage_offs(iova); | |
1060 | } | |
1061 | ||
bfa00489 | 1062 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e KC |
1063 | |
1064 | return phys; | |
1065 | } | |
1066 | ||
bf4a1c92 AM |
1067 | static int exynos_iommu_add_device(struct device *dev) |
1068 | { | |
1069 | struct iommu_group *group; | |
1070 | int ret; | |
1071 | ||
1072 | group = iommu_group_get(dev); | |
1073 | ||
1074 | if (!group) { | |
1075 | group = iommu_group_alloc(); | |
1076 | if (IS_ERR(group)) { | |
1077 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
1078 | return PTR_ERR(group); | |
1079 | } | |
1080 | } | |
1081 | ||
1082 | ret = iommu_group_add_device(group, dev); | |
1083 | iommu_group_put(group); | |
1084 | ||
1085 | return ret; | |
1086 | } | |
1087 | ||
1088 | static void exynos_iommu_remove_device(struct device *dev) | |
1089 | { | |
1090 | iommu_group_remove_device(dev); | |
1091 | } | |
1092 | ||
b22f6434 | 1093 | static const struct iommu_ops exynos_iommu_ops = { |
e1fd1eaa JR |
1094 | .domain_alloc = exynos_iommu_domain_alloc, |
1095 | .domain_free = exynos_iommu_domain_free, | |
ba5fa6f6 BH |
1096 | .attach_dev = exynos_iommu_attach_device, |
1097 | .detach_dev = exynos_iommu_detach_device, | |
1098 | .map = exynos_iommu_map, | |
1099 | .unmap = exynos_iommu_unmap, | |
315786eb | 1100 | .map_sg = default_iommu_map_sg, |
ba5fa6f6 BH |
1101 | .iova_to_phys = exynos_iommu_iova_to_phys, |
1102 | .add_device = exynos_iommu_add_device, | |
1103 | .remove_device = exynos_iommu_remove_device, | |
2a96536e KC |
1104 | .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, |
1105 | }; | |
1106 | ||
1107 | static int __init exynos_iommu_init(void) | |
1108 | { | |
a7b67cd5 | 1109 | struct device_node *np; |
2a96536e KC |
1110 | int ret; |
1111 | ||
a7b67cd5 TR |
1112 | np = of_find_matching_node(NULL, sysmmu_of_match); |
1113 | if (!np) | |
1114 | return 0; | |
1115 | ||
1116 | of_node_put(np); | |
1117 | ||
734c3c73 CK |
1118 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1119 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | |
1120 | if (!lv2table_kmem_cache) { | |
1121 | pr_err("%s: Failed to create kmem cache\n", __func__); | |
1122 | return -ENOMEM; | |
1123 | } | |
1124 | ||
2a96536e | 1125 | ret = platform_driver_register(&exynos_sysmmu_driver); |
734c3c73 CK |
1126 | if (ret) { |
1127 | pr_err("%s: Failed to register driver\n", __func__); | |
1128 | goto err_reg_driver; | |
1129 | } | |
2a96536e | 1130 | |
66a7ed84 CK |
1131 | zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); |
1132 | if (zero_lv2_table == NULL) { | |
1133 | pr_err("%s: Failed to allocate zero level2 page table\n", | |
1134 | __func__); | |
1135 | ret = -ENOMEM; | |
1136 | goto err_zero_lv2; | |
1137 | } | |
1138 | ||
734c3c73 CK |
1139 | ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); |
1140 | if (ret) { | |
1141 | pr_err("%s: Failed to register exynos-iommu driver.\n", | |
1142 | __func__); | |
1143 | goto err_set_iommu; | |
1144 | } | |
2a96536e | 1145 | |
734c3c73 CK |
1146 | return 0; |
1147 | err_set_iommu: | |
66a7ed84 CK |
1148 | kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); |
1149 | err_zero_lv2: | |
734c3c73 CK |
1150 | platform_driver_unregister(&exynos_sysmmu_driver); |
1151 | err_reg_driver: | |
1152 | kmem_cache_destroy(lv2table_kmem_cache); | |
2a96536e KC |
1153 | return ret; |
1154 | } | |
1155 | subsys_initcall(exynos_iommu_init); |