Commit | Line | Data |
---|---|---|
740a01ee MS |
1 | /* |
2 | * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. | |
2a96536e KC |
3 | * http://www.samsung.com |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG | |
11 | #define DEBUG | |
12 | #endif | |
13 | ||
2a96536e | 14 | #include <linux/clk.h> |
8ed55c81 | 15 | #include <linux/dma-mapping.h> |
2a96536e | 16 | #include <linux/err.h> |
312900c6 | 17 | #include <linux/io.h> |
2a96536e | 18 | #include <linux/iommu.h> |
312900c6 | 19 | #include <linux/interrupt.h> |
2a96536e | 20 | #include <linux/list.h> |
8ed55c81 MS |
21 | #include <linux/of.h> |
22 | #include <linux/of_iommu.h> | |
23 | #include <linux/of_platform.h> | |
312900c6 MS |
24 | #include <linux/platform_device.h> |
25 | #include <linux/pm_runtime.h> | |
26 | #include <linux/slab.h> | |
58c6f6a3 | 27 | #include <linux/dma-iommu.h> |
2a96536e | 28 | |
d09d78fc CK |
29 | typedef u32 sysmmu_iova_t; |
30 | typedef u32 sysmmu_pte_t; | |
31 | ||
f171abab | 32 | /* We do not consider super section mapping (16MB) */ |
2a96536e KC |
33 | #define SECT_ORDER 20 |
34 | #define LPAGE_ORDER 16 | |
35 | #define SPAGE_ORDER 12 | |
36 | ||
37 | #define SECT_SIZE (1 << SECT_ORDER) | |
38 | #define LPAGE_SIZE (1 << LPAGE_ORDER) | |
39 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
40 | ||
41 | #define SECT_MASK (~(SECT_SIZE - 1)) | |
42 | #define LPAGE_MASK (~(LPAGE_SIZE - 1)) | |
43 | #define SPAGE_MASK (~(SPAGE_SIZE - 1)) | |
44 | ||
66a7ed84 CK |
45 | #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ |
46 | ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) | |
47 | #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) | |
48 | #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) | |
49 | #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ | |
50 | ((*(sent) & 3) == 1)) | |
2a96536e KC |
51 | #define lv1ent_section(sent) ((*(sent) & 3) == 2) |
52 | ||
53 | #define lv2ent_fault(pent) ((*(pent) & 3) == 0) | |
54 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) | |
55 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) | |
56 | ||
740a01ee MS |
57 | /* |
58 | * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces | |
59 | * v5.0 introduced support for 36bit physical address space by shifting | |
60 | * all page entry values by 4 bits. | |
61 | * All SYSMMU controllers in the system support the address spaces of the same | |
62 | * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper | |
63 | * value (0 or 4). | |
64 | */ | |
65 | static short PG_ENT_SHIFT = -1; | |
66 | #define SYSMMU_PG_ENT_SHIFT 0 | |
67 | #define SYSMMU_V5_PG_ENT_SHIFT 4 | |
68 | ||
69 | #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) | |
70 | #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) | |
71 | #define section_offs(iova) (iova & (SECT_SIZE - 1)) | |
72 | #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) | |
73 | #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) | |
74 | #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) | |
75 | #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) | |
2a96536e KC |
76 | |
77 | #define NUM_LV1ENTRIES 4096 | |
d09d78fc | 78 | #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) |
2a96536e | 79 | |
d09d78fc CK |
80 | static u32 lv1ent_offset(sysmmu_iova_t iova) |
81 | { | |
82 | return iova >> SECT_ORDER; | |
83 | } | |
84 | ||
85 | static u32 lv2ent_offset(sysmmu_iova_t iova) | |
86 | { | |
87 | return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); | |
88 | } | |
89 | ||
5e3435eb | 90 | #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) |
d09d78fc | 91 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) |
2a96536e KC |
92 | |
93 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) | |
740a01ee | 94 | #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) |
2a96536e | 95 | |
740a01ee MS |
96 | #define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2) |
97 | #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) | |
98 | #define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1) | |
99 | #define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2) | |
2a96536e KC |
100 | |
101 | #define CTRL_ENABLE 0x5 | |
102 | #define CTRL_BLOCK 0x7 | |
103 | #define CTRL_DISABLE 0x0 | |
104 | ||
eeb5184b CK |
105 | #define CFG_LRU 0x1 |
106 | #define CFG_QOS(n) ((n & 0xF) << 7) | |
eeb5184b CK |
107 | #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ |
108 | #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ | |
109 | #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ | |
110 | ||
740a01ee | 111 | /* common registers */ |
2a96536e KC |
112 | #define REG_MMU_CTRL 0x000 |
113 | #define REG_MMU_CFG 0x004 | |
114 | #define REG_MMU_STATUS 0x008 | |
740a01ee MS |
115 | #define REG_MMU_VERSION 0x034 |
116 | ||
117 | #define MMU_MAJ_VER(val) ((val) >> 7) | |
118 | #define MMU_MIN_VER(val) ((val) & 0x7F) | |
119 | #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ | |
120 | ||
121 | #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) | |
122 | ||
123 | /* v1.x - v3.x registers */ | |
2a96536e KC |
124 | #define REG_MMU_FLUSH 0x00C |
125 | #define REG_MMU_FLUSH_ENTRY 0x010 | |
126 | #define REG_PT_BASE_ADDR 0x014 | |
127 | #define REG_INT_STATUS 0x018 | |
128 | #define REG_INT_CLEAR 0x01C | |
129 | ||
130 | #define REG_PAGE_FAULT_ADDR 0x024 | |
131 | #define REG_AW_FAULT_ADDR 0x028 | |
132 | #define REG_AR_FAULT_ADDR 0x02C | |
133 | #define REG_DEFAULT_SLAVE_ADDR 0x030 | |
134 | ||
740a01ee MS |
135 | /* v5.x registers */ |
136 | #define REG_V5_PT_BASE_PFN 0x00C | |
137 | #define REG_V5_MMU_FLUSH_ALL 0x010 | |
138 | #define REG_V5_MMU_FLUSH_ENTRY 0x014 | |
139 | #define REG_V5_INT_STATUS 0x060 | |
140 | #define REG_V5_INT_CLEAR 0x064 | |
141 | #define REG_V5_FAULT_AR_VA 0x070 | |
142 | #define REG_V5_FAULT_AW_VA 0x080 | |
2a96536e | 143 | |
6b21a5db CK |
144 | #define has_sysmmu(dev) (dev->archdata.iommu != NULL) |
145 | ||
5e3435eb | 146 | static struct device *dma_dev; |
734c3c73 | 147 | static struct kmem_cache *lv2table_kmem_cache; |
66a7ed84 CK |
148 | static sysmmu_pte_t *zero_lv2_table; |
149 | #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) | |
734c3c73 | 150 | |
d09d78fc | 151 | static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) |
2a96536e KC |
152 | { |
153 | return pgtable + lv1ent_offset(iova); | |
154 | } | |
155 | ||
d09d78fc | 156 | static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) |
2a96536e | 157 | { |
d09d78fc | 158 | return (sysmmu_pte_t *)phys_to_virt( |
7222e8db | 159 | lv2table_base(sent)) + lv2ent_offset(iova); |
2a96536e KC |
160 | } |
161 | ||
d093fc7e MS |
162 | /* |
163 | * IOMMU fault information register | |
164 | */ | |
165 | struct sysmmu_fault_info { | |
166 | unsigned int bit; /* bit number in STATUS register */ | |
167 | unsigned short addr_reg; /* register to read VA fault address */ | |
168 | const char *name; /* human readable fault name */ | |
169 | unsigned int type; /* fault type for report_iommu_fault */ | |
2a96536e KC |
170 | }; |
171 | ||
d093fc7e MS |
172 | static const struct sysmmu_fault_info sysmmu_faults[] = { |
173 | { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ }, | |
174 | { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ }, | |
175 | { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, | |
176 | { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ }, | |
177 | { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, | |
178 | { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, | |
179 | { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, | |
180 | { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, | |
2a96536e KC |
181 | }; |
182 | ||
740a01ee MS |
183 | static const struct sysmmu_fault_info sysmmu_v5_faults[] = { |
184 | { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ }, | |
185 | { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ }, | |
186 | { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ }, | |
187 | { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, | |
188 | { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, | |
189 | { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE }, | |
190 | { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE }, | |
191 | { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, | |
192 | { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, | |
193 | { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, | |
194 | }; | |
195 | ||
2860af3c MS |
196 | /* |
197 | * This structure is attached to dev.archdata.iommu of the master device | |
198 | * on device add, contains a list of SYSMMU controllers defined by device tree, | |
199 | * which are bound to given master device. It is usually referenced by 'owner' | |
200 | * pointer. | |
201 | */ | |
6b21a5db | 202 | struct exynos_iommu_owner { |
1b092054 | 203 | struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ |
5fa61cbf | 204 | struct iommu_domain *domain; /* domain this device is attached */ |
6b21a5db CK |
205 | }; |
206 | ||
2860af3c MS |
207 | /* |
208 | * This structure exynos specific generalization of struct iommu_domain. | |
209 | * It contains list of SYSMMU controllers from all master devices, which has | |
210 | * been attached to this domain and page tables of IO address space defined by | |
211 | * it. It is usually referenced by 'domain' pointer. | |
212 | */ | |
2a96536e | 213 | struct exynos_iommu_domain { |
2860af3c MS |
214 | struct list_head clients; /* list of sysmmu_drvdata.domain_node */ |
215 | sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ | |
216 | short *lv2entcnt; /* free lv2 entry counter for each section */ | |
217 | spinlock_t lock; /* lock for modyfying list of clients */ | |
218 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ | |
e1fd1eaa | 219 | struct iommu_domain domain; /* generic domain data structure */ |
2a96536e KC |
220 | }; |
221 | ||
2860af3c MS |
222 | /* |
223 | * This structure hold all data of a single SYSMMU controller, this includes | |
224 | * hw resources like registers and clocks, pointers and list nodes to connect | |
225 | * it to all other structures, internal state and parameters read from device | |
226 | * tree. It is usually referenced by 'data' pointer. | |
227 | */ | |
2a96536e | 228 | struct sysmmu_drvdata { |
2860af3c MS |
229 | struct device *sysmmu; /* SYSMMU controller device */ |
230 | struct device *master; /* master device (owner) */ | |
231 | void __iomem *sfrbase; /* our registers */ | |
232 | struct clk *clk; /* SYSMMU's clock */ | |
740a01ee MS |
233 | struct clk *aclk; /* SYSMMU's aclk clock */ |
234 | struct clk *pclk; /* SYSMMU's pclk clock */ | |
2860af3c MS |
235 | struct clk *clk_master; /* master's device clock */ |
236 | int activations; /* number of calls to sysmmu_enable */ | |
237 | spinlock_t lock; /* lock for modyfying state */ | |
238 | struct exynos_iommu_domain *domain; /* domain we belong to */ | |
239 | struct list_head domain_node; /* node for domain clients list */ | |
1b092054 | 240 | struct list_head owner_node; /* node for owner controllers list */ |
2860af3c MS |
241 | phys_addr_t pgtable; /* assigned page table structure */ |
242 | unsigned int version; /* our version */ | |
2a96536e KC |
243 | }; |
244 | ||
e1fd1eaa JR |
245 | static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) |
246 | { | |
247 | return container_of(dom, struct exynos_iommu_domain, domain); | |
248 | } | |
249 | ||
2a96536e KC |
250 | static bool set_sysmmu_active(struct sysmmu_drvdata *data) |
251 | { | |
252 | /* return true if the System MMU was not active previously | |
253 | and it needs to be initialized */ | |
254 | return ++data->activations == 1; | |
255 | } | |
256 | ||
257 | static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) | |
258 | { | |
259 | /* return true if the System MMU is needed to be disabled */ | |
260 | BUG_ON(data->activations < 1); | |
261 | return --data->activations == 0; | |
262 | } | |
263 | ||
264 | static bool is_sysmmu_active(struct sysmmu_drvdata *data) | |
265 | { | |
266 | return data->activations > 0; | |
267 | } | |
268 | ||
02cdc365 | 269 | static void sysmmu_unblock(struct sysmmu_drvdata *data) |
2a96536e | 270 | { |
84bd0428 | 271 | writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); |
2a96536e KC |
272 | } |
273 | ||
02cdc365 | 274 | static bool sysmmu_block(struct sysmmu_drvdata *data) |
2a96536e KC |
275 | { |
276 | int i = 120; | |
277 | ||
84bd0428 MS |
278 | writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); |
279 | while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) | |
2a96536e KC |
280 | --i; |
281 | ||
84bd0428 | 282 | if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { |
02cdc365 | 283 | sysmmu_unblock(data); |
2a96536e KC |
284 | return false; |
285 | } | |
286 | ||
287 | return true; | |
288 | } | |
289 | ||
02cdc365 | 290 | static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) |
2a96536e | 291 | { |
740a01ee | 292 | if (MMU_MAJ_VER(data->version) < 5) |
84bd0428 | 293 | writel(0x1, data->sfrbase + REG_MMU_FLUSH); |
740a01ee | 294 | else |
84bd0428 | 295 | writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL); |
2a96536e KC |
296 | } |
297 | ||
02cdc365 | 298 | static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, |
d09d78fc | 299 | sysmmu_iova_t iova, unsigned int num_inv) |
2a96536e | 300 | { |
3ad6b7f3 | 301 | unsigned int i; |
365409db | 302 | |
3ad6b7f3 | 303 | for (i = 0; i < num_inv; i++) { |
740a01ee | 304 | if (MMU_MAJ_VER(data->version) < 5) |
84bd0428 | 305 | writel((iova & SPAGE_MASK) | 1, |
740a01ee MS |
306 | data->sfrbase + REG_MMU_FLUSH_ENTRY); |
307 | else | |
84bd0428 | 308 | writel((iova & SPAGE_MASK) | 1, |
740a01ee | 309 | data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); |
3ad6b7f3 CK |
310 | iova += SPAGE_SIZE; |
311 | } | |
2a96536e KC |
312 | } |
313 | ||
02cdc365 | 314 | static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) |
2a96536e | 315 | { |
740a01ee | 316 | if (MMU_MAJ_VER(data->version) < 5) |
84bd0428 | 317 | writel(pgd, data->sfrbase + REG_PT_BASE_ADDR); |
740a01ee | 318 | else |
84bd0428 | 319 | writel(pgd >> PAGE_SHIFT, |
740a01ee | 320 | data->sfrbase + REG_V5_PT_BASE_PFN); |
2a96536e | 321 | |
02cdc365 | 322 | __sysmmu_tlb_invalidate(data); |
2a96536e KC |
323 | } |
324 | ||
850d313e MS |
325 | static void __sysmmu_get_version(struct sysmmu_drvdata *data) |
326 | { | |
327 | u32 ver; | |
328 | ||
329 | clk_enable(data->clk_master); | |
330 | clk_enable(data->clk); | |
740a01ee MS |
331 | clk_enable(data->pclk); |
332 | clk_enable(data->aclk); | |
850d313e | 333 | |
84bd0428 | 334 | ver = readl(data->sfrbase + REG_MMU_VERSION); |
850d313e MS |
335 | |
336 | /* controllers on some SoCs don't report proper version */ | |
337 | if (ver == 0x80000001u) | |
338 | data->version = MAKE_MMU_VER(1, 0); | |
339 | else | |
340 | data->version = MMU_RAW_VER(ver); | |
341 | ||
342 | dev_dbg(data->sysmmu, "hardware version: %d.%d\n", | |
343 | MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); | |
344 | ||
740a01ee MS |
345 | clk_disable(data->aclk); |
346 | clk_disable(data->pclk); | |
850d313e MS |
347 | clk_disable(data->clk); |
348 | clk_disable(data->clk_master); | |
349 | } | |
350 | ||
d093fc7e MS |
351 | static void show_fault_information(struct sysmmu_drvdata *data, |
352 | const struct sysmmu_fault_info *finfo, | |
353 | sysmmu_iova_t fault_addr) | |
2a96536e | 354 | { |
d09d78fc | 355 | sysmmu_pte_t *ent; |
2a96536e | 356 | |
d093fc7e MS |
357 | dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n", |
358 | finfo->name, fault_addr, &data->pgtable); | |
359 | ent = section_entry(phys_to_virt(data->pgtable), fault_addr); | |
360 | dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent); | |
2a96536e KC |
361 | if (lv1ent_page(ent)) { |
362 | ent = page_entry(ent, fault_addr); | |
d093fc7e | 363 | dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); |
2a96536e | 364 | } |
2a96536e KC |
365 | } |
366 | ||
367 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | |
368 | { | |
f171abab | 369 | /* SYSMMU is in blocked state when interrupt occurred. */ |
2a96536e | 370 | struct sysmmu_drvdata *data = dev_id; |
740a01ee MS |
371 | const struct sysmmu_fault_info *finfo; |
372 | unsigned int i, n, itype; | |
d093fc7e | 373 | sysmmu_iova_t fault_addr = -1; |
740a01ee | 374 | unsigned short reg_status, reg_clear; |
7222e8db | 375 | int ret = -ENOSYS; |
2a96536e | 376 | |
2a96536e KC |
377 | WARN_ON(!is_sysmmu_active(data)); |
378 | ||
740a01ee MS |
379 | if (MMU_MAJ_VER(data->version) < 5) { |
380 | reg_status = REG_INT_STATUS; | |
381 | reg_clear = REG_INT_CLEAR; | |
382 | finfo = sysmmu_faults; | |
383 | n = ARRAY_SIZE(sysmmu_faults); | |
384 | } else { | |
385 | reg_status = REG_V5_INT_STATUS; | |
386 | reg_clear = REG_V5_INT_CLEAR; | |
387 | finfo = sysmmu_v5_faults; | |
388 | n = ARRAY_SIZE(sysmmu_v5_faults); | |
389 | } | |
390 | ||
9d4e7a24 CK |
391 | spin_lock(&data->lock); |
392 | ||
b398af21 | 393 | clk_enable(data->clk_master); |
9d4e7a24 | 394 | |
84bd0428 | 395 | itype = __ffs(readl(data->sfrbase + reg_status)); |
d093fc7e MS |
396 | for (i = 0; i < n; i++, finfo++) |
397 | if (finfo->bit == itype) | |
398 | break; | |
399 | /* unknown/unsupported fault */ | |
400 | BUG_ON(i == n); | |
401 | ||
402 | /* print debug message */ | |
84bd0428 | 403 | fault_addr = readl(data->sfrbase + finfo->addr_reg); |
d093fc7e | 404 | show_fault_information(data, finfo, fault_addr); |
2a96536e | 405 | |
d093fc7e MS |
406 | if (data->domain) |
407 | ret = report_iommu_fault(&data->domain->domain, | |
408 | data->master, fault_addr, finfo->type); | |
1fab7fa7 CK |
409 | /* fault is not recovered by fault handler */ |
410 | BUG_ON(ret != 0); | |
2a96536e | 411 | |
84bd0428 | 412 | writel(1 << itype, data->sfrbase + reg_clear); |
1fab7fa7 | 413 | |
02cdc365 | 414 | sysmmu_unblock(data); |
2a96536e | 415 | |
b398af21 | 416 | clk_disable(data->clk_master); |
70605870 | 417 | |
9d4e7a24 | 418 | spin_unlock(&data->lock); |
2a96536e KC |
419 | |
420 | return IRQ_HANDLED; | |
421 | } | |
422 | ||
6b21a5db | 423 | static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data) |
2a96536e | 424 | { |
b398af21 | 425 | clk_enable(data->clk_master); |
70605870 | 426 | |
84bd0428 MS |
427 | writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); |
428 | writel(0, data->sfrbase + REG_MMU_CFG); | |
2a96536e | 429 | |
740a01ee MS |
430 | clk_disable(data->aclk); |
431 | clk_disable(data->pclk); | |
46c16d1e | 432 | clk_disable(data->clk); |
b398af21 | 433 | clk_disable(data->clk_master); |
2a96536e KC |
434 | } |
435 | ||
6b21a5db | 436 | static bool __sysmmu_disable(struct sysmmu_drvdata *data) |
2a96536e | 437 | { |
6b21a5db | 438 | bool disabled; |
2a96536e KC |
439 | unsigned long flags; |
440 | ||
9d4e7a24 | 441 | spin_lock_irqsave(&data->lock, flags); |
2a96536e | 442 | |
6b21a5db CK |
443 | disabled = set_sysmmu_inactive(data); |
444 | ||
445 | if (disabled) { | |
446 | data->pgtable = 0; | |
447 | data->domain = NULL; | |
448 | ||
449 | __sysmmu_disable_nocount(data); | |
2a96536e | 450 | |
6b21a5db CK |
451 | dev_dbg(data->sysmmu, "Disabled\n"); |
452 | } else { | |
453 | dev_dbg(data->sysmmu, "%d times left to disable\n", | |
454 | data->activations); | |
2a96536e KC |
455 | } |
456 | ||
6b21a5db CK |
457 | spin_unlock_irqrestore(&data->lock, flags); |
458 | ||
459 | return disabled; | |
460 | } | |
2a96536e | 461 | |
6b21a5db CK |
462 | static void __sysmmu_init_config(struct sysmmu_drvdata *data) |
463 | { | |
83addecd MS |
464 | unsigned int cfg; |
465 | ||
83addecd MS |
466 | if (data->version <= MAKE_MMU_VER(3, 1)) |
467 | cfg = CFG_LRU | CFG_QOS(15); | |
468 | else if (data->version <= MAKE_MMU_VER(3, 2)) | |
469 | cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; | |
470 | else | |
471 | cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; | |
6b21a5db | 472 | |
84bd0428 | 473 | writel(cfg, data->sfrbase + REG_MMU_CFG); |
6b21a5db CK |
474 | } |
475 | ||
476 | static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data) | |
477 | { | |
b398af21 | 478 | clk_enable(data->clk_master); |
70605870 | 479 | clk_enable(data->clk); |
740a01ee MS |
480 | clk_enable(data->pclk); |
481 | clk_enable(data->aclk); | |
70605870 | 482 | |
84bd0428 | 483 | writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); |
6b21a5db CK |
484 | |
485 | __sysmmu_init_config(data); | |
486 | ||
02cdc365 | 487 | __sysmmu_set_ptbase(data, data->pgtable); |
2a96536e | 488 | |
84bd0428 | 489 | writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); |
7222e8db | 490 | |
b398af21 | 491 | clk_disable(data->clk_master); |
6b21a5db | 492 | } |
70605870 | 493 | |
bfa00489 | 494 | static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable, |
a9133b99 | 495 | struct exynos_iommu_domain *domain) |
6b21a5db CK |
496 | { |
497 | int ret = 0; | |
498 | unsigned long flags; | |
499 | ||
500 | spin_lock_irqsave(&data->lock, flags); | |
501 | if (set_sysmmu_active(data)) { | |
502 | data->pgtable = pgtable; | |
a9133b99 | 503 | data->domain = domain; |
6b21a5db CK |
504 | |
505 | __sysmmu_enable_nocount(data); | |
506 | ||
507 | dev_dbg(data->sysmmu, "Enabled\n"); | |
508 | } else { | |
509 | ret = (pgtable == data->pgtable) ? 1 : -EBUSY; | |
510 | ||
511 | dev_dbg(data->sysmmu, "already enabled\n"); | |
512 | } | |
513 | ||
514 | if (WARN_ON(ret < 0)) | |
515 | set_sysmmu_inactive(data); /* decrement count */ | |
2a96536e | 516 | |
9d4e7a24 | 517 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
518 | |
519 | return ret; | |
520 | } | |
521 | ||
469acebe | 522 | static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, |
66a7ed84 CK |
523 | sysmmu_iova_t iova) |
524 | { | |
525 | unsigned long flags; | |
66a7ed84 | 526 | |
b398af21 | 527 | clk_enable(data->clk_master); |
66a7ed84 CK |
528 | |
529 | spin_lock_irqsave(&data->lock, flags); | |
d631ea98 MS |
530 | if (is_sysmmu_active(data)) { |
531 | if (data->version >= MAKE_MMU_VER(3, 3)) | |
532 | __sysmmu_tlb_invalidate_entry(data, iova, 1); | |
533 | } | |
66a7ed84 CK |
534 | spin_unlock_irqrestore(&data->lock, flags); |
535 | ||
b398af21 | 536 | clk_disable(data->clk_master); |
66a7ed84 CK |
537 | } |
538 | ||
469acebe MS |
539 | static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, |
540 | sysmmu_iova_t iova, size_t size) | |
2a96536e KC |
541 | { |
542 | unsigned long flags; | |
2a96536e | 543 | |
6b21a5db | 544 | spin_lock_irqsave(&data->lock, flags); |
2a96536e | 545 | if (is_sysmmu_active(data)) { |
3ad6b7f3 | 546 | unsigned int num_inv = 1; |
70605870 | 547 | |
b398af21 | 548 | clk_enable(data->clk_master); |
70605870 | 549 | |
3ad6b7f3 CK |
550 | /* |
551 | * L2TLB invalidation required | |
552 | * 4KB page: 1 invalidation | |
f171abab SK |
553 | * 64KB page: 16 invalidations |
554 | * 1MB page: 64 invalidations | |
3ad6b7f3 CK |
555 | * because it is set-associative TLB |
556 | * with 8-way and 64 sets. | |
557 | * 1MB page can be cached in one of all sets. | |
558 | * 64KB page can be one of 16 consecutive sets. | |
559 | */ | |
512bd0c6 | 560 | if (MMU_MAJ_VER(data->version) == 2) |
3ad6b7f3 CK |
561 | num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); |
562 | ||
02cdc365 MS |
563 | if (sysmmu_block(data)) { |
564 | __sysmmu_tlb_invalidate_entry(data, iova, num_inv); | |
565 | sysmmu_unblock(data); | |
2a96536e | 566 | } |
b398af21 | 567 | clk_disable(data->clk_master); |
2a96536e | 568 | } else { |
469acebe MS |
569 | dev_dbg(data->master, |
570 | "disabled. Skipping TLB invalidation @ %#x\n", iova); | |
2a96536e | 571 | } |
9d4e7a24 | 572 | spin_unlock_irqrestore(&data->lock, flags); |
2a96536e KC |
573 | } |
574 | ||
6b21a5db | 575 | static int __init exynos_sysmmu_probe(struct platform_device *pdev) |
2a96536e | 576 | { |
46c16d1e | 577 | int irq, ret; |
7222e8db | 578 | struct device *dev = &pdev->dev; |
2a96536e | 579 | struct sysmmu_drvdata *data; |
7222e8db | 580 | struct resource *res; |
2a96536e | 581 | |
46c16d1e CK |
582 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
583 | if (!data) | |
584 | return -ENOMEM; | |
2a96536e | 585 | |
7222e8db | 586 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
46c16d1e CK |
587 | data->sfrbase = devm_ioremap_resource(dev, res); |
588 | if (IS_ERR(data->sfrbase)) | |
589 | return PTR_ERR(data->sfrbase); | |
2a96536e | 590 | |
46c16d1e CK |
591 | irq = platform_get_irq(pdev, 0); |
592 | if (irq <= 0) { | |
0bf4e54d | 593 | dev_err(dev, "Unable to find IRQ resource\n"); |
46c16d1e | 594 | return irq; |
2a96536e KC |
595 | } |
596 | ||
46c16d1e | 597 | ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, |
7222e8db CK |
598 | dev_name(dev), data); |
599 | if (ret) { | |
46c16d1e CK |
600 | dev_err(dev, "Unabled to register handler of irq %d\n", irq); |
601 | return ret; | |
2a96536e KC |
602 | } |
603 | ||
46c16d1e | 604 | data->clk = devm_clk_get(dev, "sysmmu"); |
740a01ee | 605 | if (!IS_ERR(data->clk)) { |
46c16d1e CK |
606 | ret = clk_prepare(data->clk); |
607 | if (ret) { | |
608 | dev_err(dev, "Failed to prepare clk\n"); | |
609 | return ret; | |
610 | } | |
740a01ee MS |
611 | } else { |
612 | data->clk = NULL; | |
613 | } | |
614 | ||
615 | data->aclk = devm_clk_get(dev, "aclk"); | |
616 | if (!IS_ERR(data->aclk)) { | |
617 | ret = clk_prepare(data->aclk); | |
618 | if (ret) { | |
619 | dev_err(dev, "Failed to prepare aclk\n"); | |
620 | return ret; | |
621 | } | |
622 | } else { | |
623 | data->aclk = NULL; | |
624 | } | |
625 | ||
626 | data->pclk = devm_clk_get(dev, "pclk"); | |
627 | if (!IS_ERR(data->pclk)) { | |
628 | ret = clk_prepare(data->pclk); | |
629 | if (ret) { | |
630 | dev_err(dev, "Failed to prepare pclk\n"); | |
631 | return ret; | |
632 | } | |
633 | } else { | |
634 | data->pclk = NULL; | |
635 | } | |
636 | ||
637 | if (!data->clk && (!data->aclk || !data->pclk)) { | |
638 | dev_err(dev, "Failed to get device clock(s)!\n"); | |
639 | return -ENOSYS; | |
2a96536e KC |
640 | } |
641 | ||
70605870 CK |
642 | data->clk_master = devm_clk_get(dev, "master"); |
643 | if (!IS_ERR(data->clk_master)) { | |
644 | ret = clk_prepare(data->clk_master); | |
645 | if (ret) { | |
70605870 CK |
646 | dev_err(dev, "Failed to prepare master's clk\n"); |
647 | return ret; | |
648 | } | |
b398af21 MS |
649 | } else { |
650 | data->clk_master = NULL; | |
70605870 CK |
651 | } |
652 | ||
2a96536e | 653 | data->sysmmu = dev; |
9d4e7a24 | 654 | spin_lock_init(&data->lock); |
2a96536e | 655 | |
7222e8db CK |
656 | platform_set_drvdata(pdev, data); |
657 | ||
850d313e | 658 | __sysmmu_get_version(data); |
740a01ee MS |
659 | if (PG_ENT_SHIFT < 0) { |
660 | if (MMU_MAJ_VER(data->version) < 5) | |
661 | PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; | |
662 | else | |
663 | PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; | |
664 | } | |
665 | ||
f4723ec1 | 666 | pm_runtime_enable(dev); |
2a96536e | 667 | |
2a96536e | 668 | return 0; |
2a96536e KC |
669 | } |
670 | ||
622015e4 MS |
671 | #ifdef CONFIG_PM_SLEEP |
672 | static int exynos_sysmmu_suspend(struct device *dev) | |
673 | { | |
674 | struct sysmmu_drvdata *data = dev_get_drvdata(dev); | |
675 | ||
676 | dev_dbg(dev, "suspend\n"); | |
677 | if (is_sysmmu_active(data)) { | |
678 | __sysmmu_disable_nocount(data); | |
679 | pm_runtime_put(dev); | |
680 | } | |
681 | return 0; | |
682 | } | |
683 | ||
684 | static int exynos_sysmmu_resume(struct device *dev) | |
685 | { | |
686 | struct sysmmu_drvdata *data = dev_get_drvdata(dev); | |
687 | ||
688 | dev_dbg(dev, "resume\n"); | |
689 | if (is_sysmmu_active(data)) { | |
690 | pm_runtime_get_sync(dev); | |
691 | __sysmmu_enable_nocount(data); | |
692 | } | |
693 | return 0; | |
694 | } | |
695 | #endif | |
696 | ||
697 | static const struct dev_pm_ops sysmmu_pm_ops = { | |
698 | SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume) | |
699 | }; | |
700 | ||
6b21a5db CK |
701 | static const struct of_device_id sysmmu_of_match[] __initconst = { |
702 | { .compatible = "samsung,exynos-sysmmu", }, | |
703 | { }, | |
704 | }; | |
705 | ||
706 | static struct platform_driver exynos_sysmmu_driver __refdata = { | |
707 | .probe = exynos_sysmmu_probe, | |
708 | .driver = { | |
2a96536e | 709 | .name = "exynos-sysmmu", |
6b21a5db | 710 | .of_match_table = sysmmu_of_match, |
622015e4 | 711 | .pm = &sysmmu_pm_ops, |
2a96536e KC |
712 | } |
713 | }; | |
714 | ||
5e3435eb | 715 | static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) |
2a96536e | 716 | { |
5e3435eb MS |
717 | dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), |
718 | DMA_TO_DEVICE); | |
719 | *ent = val; | |
720 | dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), | |
721 | DMA_TO_DEVICE); | |
2a96536e KC |
722 | } |
723 | ||
e1fd1eaa | 724 | static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) |
2a96536e | 725 | { |
bfa00489 | 726 | struct exynos_iommu_domain *domain; |
5e3435eb | 727 | dma_addr_t handle; |
66a7ed84 | 728 | int i; |
2a96536e | 729 | |
740a01ee MS |
730 | /* Check if correct PTE offsets are initialized */ |
731 | BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); | |
e1fd1eaa | 732 | |
bfa00489 MS |
733 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
734 | if (!domain) | |
e1fd1eaa | 735 | return NULL; |
2a96536e | 736 | |
58c6f6a3 MS |
737 | if (type == IOMMU_DOMAIN_DMA) { |
738 | if (iommu_get_dma_cookie(&domain->domain) != 0) | |
739 | goto err_pgtable; | |
740 | } else if (type != IOMMU_DOMAIN_UNMANAGED) { | |
741 | goto err_pgtable; | |
742 | } | |
743 | ||
bfa00489 MS |
744 | domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); |
745 | if (!domain->pgtable) | |
58c6f6a3 | 746 | goto err_dma_cookie; |
2a96536e | 747 | |
bfa00489 MS |
748 | domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); |
749 | if (!domain->lv2entcnt) | |
2a96536e KC |
750 | goto err_counter; |
751 | ||
f171abab | 752 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
66a7ed84 | 753 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { |
bfa00489 MS |
754 | domain->pgtable[i + 0] = ZERO_LV2LINK; |
755 | domain->pgtable[i + 1] = ZERO_LV2LINK; | |
756 | domain->pgtable[i + 2] = ZERO_LV2LINK; | |
757 | domain->pgtable[i + 3] = ZERO_LV2LINK; | |
758 | domain->pgtable[i + 4] = ZERO_LV2LINK; | |
759 | domain->pgtable[i + 5] = ZERO_LV2LINK; | |
760 | domain->pgtable[i + 6] = ZERO_LV2LINK; | |
761 | domain->pgtable[i + 7] = ZERO_LV2LINK; | |
66a7ed84 CK |
762 | } |
763 | ||
5e3435eb MS |
764 | handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, |
765 | DMA_TO_DEVICE); | |
766 | /* For mapping page table entries we rely on dma == phys */ | |
767 | BUG_ON(handle != virt_to_phys(domain->pgtable)); | |
2a96536e | 768 | |
bfa00489 MS |
769 | spin_lock_init(&domain->lock); |
770 | spin_lock_init(&domain->pgtablelock); | |
771 | INIT_LIST_HEAD(&domain->clients); | |
2a96536e | 772 | |
bfa00489 MS |
773 | domain->domain.geometry.aperture_start = 0; |
774 | domain->domain.geometry.aperture_end = ~0UL; | |
775 | domain->domain.geometry.force_aperture = true; | |
3177bb76 | 776 | |
bfa00489 | 777 | return &domain->domain; |
2a96536e KC |
778 | |
779 | err_counter: | |
bfa00489 | 780 | free_pages((unsigned long)domain->pgtable, 2); |
58c6f6a3 MS |
781 | err_dma_cookie: |
782 | if (type == IOMMU_DOMAIN_DMA) | |
783 | iommu_put_dma_cookie(&domain->domain); | |
2a96536e | 784 | err_pgtable: |
bfa00489 | 785 | kfree(domain); |
e1fd1eaa | 786 | return NULL; |
2a96536e KC |
787 | } |
788 | ||
bfa00489 | 789 | static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) |
2a96536e | 790 | { |
bfa00489 | 791 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
469acebe | 792 | struct sysmmu_drvdata *data, *next; |
2a96536e KC |
793 | unsigned long flags; |
794 | int i; | |
795 | ||
bfa00489 | 796 | WARN_ON(!list_empty(&domain->clients)); |
2a96536e | 797 | |
bfa00489 | 798 | spin_lock_irqsave(&domain->lock, flags); |
2a96536e | 799 | |
bfa00489 | 800 | list_for_each_entry_safe(data, next, &domain->clients, domain_node) { |
469acebe MS |
801 | if (__sysmmu_disable(data)) |
802 | data->master = NULL; | |
803 | list_del_init(&data->domain_node); | |
2a96536e KC |
804 | } |
805 | ||
bfa00489 | 806 | spin_unlock_irqrestore(&domain->lock, flags); |
2a96536e | 807 | |
58c6f6a3 MS |
808 | if (iommu_domain->type == IOMMU_DOMAIN_DMA) |
809 | iommu_put_dma_cookie(iommu_domain); | |
810 | ||
5e3435eb MS |
811 | dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, |
812 | DMA_TO_DEVICE); | |
813 | ||
2a96536e | 814 | for (i = 0; i < NUM_LV1ENTRIES; i++) |
5e3435eb MS |
815 | if (lv1ent_page(domain->pgtable + i)) { |
816 | phys_addr_t base = lv2table_base(domain->pgtable + i); | |
817 | ||
818 | dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, | |
819 | DMA_TO_DEVICE); | |
734c3c73 | 820 | kmem_cache_free(lv2table_kmem_cache, |
5e3435eb MS |
821 | phys_to_virt(base)); |
822 | } | |
2a96536e | 823 | |
bfa00489 MS |
824 | free_pages((unsigned long)domain->pgtable, 2); |
825 | free_pages((unsigned long)domain->lv2entcnt, 1); | |
826 | kfree(domain); | |
2a96536e KC |
827 | } |
828 | ||
5fa61cbf MS |
829 | static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, |
830 | struct device *dev) | |
831 | { | |
832 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | |
833 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); | |
834 | phys_addr_t pagetable = virt_to_phys(domain->pgtable); | |
835 | struct sysmmu_drvdata *data, *next; | |
836 | unsigned long flags; | |
837 | bool found = false; | |
838 | ||
839 | if (!has_sysmmu(dev) || owner->domain != iommu_domain) | |
840 | return; | |
841 | ||
842 | spin_lock_irqsave(&domain->lock, flags); | |
843 | list_for_each_entry_safe(data, next, &domain->clients, domain_node) { | |
844 | if (data->master == dev) { | |
845 | if (__sysmmu_disable(data)) { | |
846 | data->master = NULL; | |
847 | list_del_init(&data->domain_node); | |
848 | } | |
849 | pm_runtime_put(data->sysmmu); | |
850 | found = true; | |
851 | } | |
852 | } | |
853 | spin_unlock_irqrestore(&domain->lock, flags); | |
854 | ||
855 | owner->domain = NULL; | |
856 | ||
857 | if (found) | |
858 | dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", | |
859 | __func__, &pagetable); | |
860 | else | |
861 | dev_err(dev, "%s: No IOMMU is attached\n", __func__); | |
862 | } | |
863 | ||
bfa00489 | 864 | static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, |
2a96536e KC |
865 | struct device *dev) |
866 | { | |
6b21a5db | 867 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
bfa00489 | 868 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
469acebe | 869 | struct sysmmu_drvdata *data; |
bfa00489 | 870 | phys_addr_t pagetable = virt_to_phys(domain->pgtable); |
2a96536e | 871 | unsigned long flags; |
469acebe | 872 | int ret = -ENODEV; |
2a96536e | 873 | |
469acebe MS |
874 | if (!has_sysmmu(dev)) |
875 | return -ENODEV; | |
2a96536e | 876 | |
5fa61cbf MS |
877 | if (owner->domain) |
878 | exynos_iommu_detach_device(owner->domain, dev); | |
879 | ||
1b092054 | 880 | list_for_each_entry(data, &owner->controllers, owner_node) { |
ce70ca56 | 881 | pm_runtime_get_sync(data->sysmmu); |
a9133b99 | 882 | ret = __sysmmu_enable(data, pagetable, domain); |
469acebe MS |
883 | if (ret >= 0) { |
884 | data->master = dev; | |
885 | ||
bfa00489 MS |
886 | spin_lock_irqsave(&domain->lock, flags); |
887 | list_add_tail(&data->domain_node, &domain->clients); | |
888 | spin_unlock_irqrestore(&domain->lock, flags); | |
469acebe MS |
889 | } |
890 | } | |
2a96536e KC |
891 | |
892 | if (ret < 0) { | |
7222e8db CK |
893 | dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n", |
894 | __func__, &pagetable); | |
7222e8db | 895 | return ret; |
2a96536e KC |
896 | } |
897 | ||
5fa61cbf | 898 | owner->domain = iommu_domain; |
7222e8db CK |
899 | dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n", |
900 | __func__, &pagetable, (ret == 0) ? "" : ", again"); | |
901 | ||
2a96536e KC |
902 | return ret; |
903 | } | |
904 | ||
bfa00489 | 905 | static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, |
66a7ed84 | 906 | sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) |
2a96536e | 907 | { |
61128f08 | 908 | if (lv1ent_section(sent)) { |
d09d78fc | 909 | WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); |
61128f08 CK |
910 | return ERR_PTR(-EADDRINUSE); |
911 | } | |
912 | ||
2a96536e | 913 | if (lv1ent_fault(sent)) { |
d09d78fc | 914 | sysmmu_pte_t *pent; |
66a7ed84 | 915 | bool need_flush_flpd_cache = lv1ent_zero(sent); |
2a96536e | 916 | |
734c3c73 | 917 | pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); |
dbf6c6ef | 918 | BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); |
2a96536e | 919 | if (!pent) |
61128f08 | 920 | return ERR_PTR(-ENOMEM); |
2a96536e | 921 | |
5e3435eb | 922 | update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); |
dc3814f4 | 923 | kmemleak_ignore(pent); |
2a96536e | 924 | *pgcounter = NUM_LV2ENTRIES; |
5e3435eb | 925 | dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE); |
66a7ed84 CK |
926 | |
927 | /* | |
f171abab SK |
928 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, |
929 | * FLPD cache may cache the address of zero_l2_table. This | |
930 | * function replaces the zero_l2_table with new L2 page table | |
931 | * to write valid mappings. | |
66a7ed84 | 932 | * Accessing the valid area may cause page fault since FLPD |
f171abab SK |
933 | * cache may still cache zero_l2_table for the valid area |
934 | * instead of new L2 page table that has the mapping | |
935 | * information of the valid area. | |
66a7ed84 CK |
936 | * Thus any replacement of zero_l2_table with other valid L2 |
937 | * page table must involve FLPD cache invalidation for System | |
938 | * MMU v3.3. | |
939 | * FLPD cache invalidation is performed with TLB invalidation | |
940 | * by VPN without blocking. It is safe to invalidate TLB without | |
941 | * blocking because the target address of TLB invalidation is | |
942 | * not currently mapped. | |
943 | */ | |
944 | if (need_flush_flpd_cache) { | |
469acebe | 945 | struct sysmmu_drvdata *data; |
365409db | 946 | |
bfa00489 MS |
947 | spin_lock(&domain->lock); |
948 | list_for_each_entry(data, &domain->clients, domain_node) | |
469acebe | 949 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
bfa00489 | 950 | spin_unlock(&domain->lock); |
66a7ed84 | 951 | } |
2a96536e KC |
952 | } |
953 | ||
954 | return page_entry(sent, iova); | |
955 | } | |
956 | ||
bfa00489 | 957 | static int lv1set_section(struct exynos_iommu_domain *domain, |
66a7ed84 | 958 | sysmmu_pte_t *sent, sysmmu_iova_t iova, |
61128f08 | 959 | phys_addr_t paddr, short *pgcnt) |
2a96536e | 960 | { |
61128f08 | 961 | if (lv1ent_section(sent)) { |
d09d78fc | 962 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 963 | iova); |
2a96536e | 964 | return -EADDRINUSE; |
61128f08 | 965 | } |
2a96536e KC |
966 | |
967 | if (lv1ent_page(sent)) { | |
61128f08 | 968 | if (*pgcnt != NUM_LV2ENTRIES) { |
d09d78fc | 969 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
61128f08 | 970 | iova); |
2a96536e | 971 | return -EADDRINUSE; |
61128f08 | 972 | } |
2a96536e | 973 | |
734c3c73 | 974 | kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); |
2a96536e KC |
975 | *pgcnt = 0; |
976 | } | |
977 | ||
5e3435eb | 978 | update_pte(sent, mk_lv1ent_sect(paddr)); |
2a96536e | 979 | |
bfa00489 | 980 | spin_lock(&domain->lock); |
66a7ed84 | 981 | if (lv1ent_page_zero(sent)) { |
469acebe | 982 | struct sysmmu_drvdata *data; |
66a7ed84 CK |
983 | /* |
984 | * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD | |
985 | * entry by speculative prefetch of SLPD which has no mapping. | |
986 | */ | |
bfa00489 | 987 | list_for_each_entry(data, &domain->clients, domain_node) |
469acebe | 988 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
66a7ed84 | 989 | } |
bfa00489 | 990 | spin_unlock(&domain->lock); |
66a7ed84 | 991 | |
2a96536e KC |
992 | return 0; |
993 | } | |
994 | ||
d09d78fc | 995 | static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, |
2a96536e KC |
996 | short *pgcnt) |
997 | { | |
998 | if (size == SPAGE_SIZE) { | |
0bf4e54d | 999 | if (WARN_ON(!lv2ent_fault(pent))) |
2a96536e KC |
1000 | return -EADDRINUSE; |
1001 | ||
5e3435eb | 1002 | update_pte(pent, mk_lv2ent_spage(paddr)); |
2a96536e KC |
1003 | *pgcnt -= 1; |
1004 | } else { /* size == LPAGE_SIZE */ | |
1005 | int i; | |
5e3435eb | 1006 | dma_addr_t pent_base = virt_to_phys(pent); |
365409db | 1007 | |
5e3435eb MS |
1008 | dma_sync_single_for_cpu(dma_dev, pent_base, |
1009 | sizeof(*pent) * SPAGES_PER_LPAGE, | |
1010 | DMA_TO_DEVICE); | |
2a96536e | 1011 | for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { |
0bf4e54d | 1012 | if (WARN_ON(!lv2ent_fault(pent))) { |
61128f08 CK |
1013 | if (i > 0) |
1014 | memset(pent - i, 0, sizeof(*pent) * i); | |
2a96536e KC |
1015 | return -EADDRINUSE; |
1016 | } | |
1017 | ||
1018 | *pent = mk_lv2ent_lpage(paddr); | |
1019 | } | |
5e3435eb MS |
1020 | dma_sync_single_for_device(dma_dev, pent_base, |
1021 | sizeof(*pent) * SPAGES_PER_LPAGE, | |
1022 | DMA_TO_DEVICE); | |
2a96536e KC |
1023 | *pgcnt -= SPAGES_PER_LPAGE; |
1024 | } | |
1025 | ||
1026 | return 0; | |
1027 | } | |
1028 | ||
66a7ed84 CK |
1029 | /* |
1030 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: | |
1031 | * | |
f171abab | 1032 | * System MMU v3.x has advanced logic to improve address translation |
66a7ed84 | 1033 | * performance with caching more page table entries by a page table walk. |
f171abab SK |
1034 | * However, the logic has a bug that while caching faulty page table entries, |
1035 | * System MMU reports page fault if the cached fault entry is hit even though | |
1036 | * the fault entry is updated to a valid entry after the entry is cached. | |
1037 | * To prevent caching faulty page table entries which may be updated to valid | |
1038 | * entries later, the virtual memory manager should care about the workaround | |
1039 | * for the problem. The following describes the workaround. | |
66a7ed84 CK |
1040 | * |
1041 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB | |
f171abab | 1042 | * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). |
66a7ed84 | 1043 | * |
f171abab | 1044 | * Precisely, any start address of I/O virtual region must be aligned with |
66a7ed84 CK |
1045 | * the following sizes for System MMU v3.1 and v3.2. |
1046 | * System MMU v3.1: 128KiB | |
1047 | * System MMU v3.2: 256KiB | |
1048 | * | |
1049 | * Because System MMU v3.3 caches page table entries more aggressively, it needs | |
f171abab SK |
1050 | * more workarounds. |
1051 | * - Any two consecutive I/O virtual regions must have a hole of size larger | |
1052 | * than or equal to 128KiB. | |
66a7ed84 CK |
1053 | * - Start address of an I/O virtual region must be aligned by 128KiB. |
1054 | */ | |
bfa00489 MS |
1055 | static int exynos_iommu_map(struct iommu_domain *iommu_domain, |
1056 | unsigned long l_iova, phys_addr_t paddr, size_t size, | |
1057 | int prot) | |
2a96536e | 1058 | { |
bfa00489 | 1059 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc CK |
1060 | sysmmu_pte_t *entry; |
1061 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | |
2a96536e KC |
1062 | unsigned long flags; |
1063 | int ret = -ENOMEM; | |
1064 | ||
bfa00489 | 1065 | BUG_ON(domain->pgtable == NULL); |
2a96536e | 1066 | |
bfa00489 | 1067 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 1068 | |
bfa00489 | 1069 | entry = section_entry(domain->pgtable, iova); |
2a96536e KC |
1070 | |
1071 | if (size == SECT_SIZE) { | |
bfa00489 MS |
1072 | ret = lv1set_section(domain, entry, iova, paddr, |
1073 | &domain->lv2entcnt[lv1ent_offset(iova)]); | |
2a96536e | 1074 | } else { |
d09d78fc | 1075 | sysmmu_pte_t *pent; |
2a96536e | 1076 | |
bfa00489 MS |
1077 | pent = alloc_lv2entry(domain, entry, iova, |
1078 | &domain->lv2entcnt[lv1ent_offset(iova)]); | |
2a96536e | 1079 | |
61128f08 CK |
1080 | if (IS_ERR(pent)) |
1081 | ret = PTR_ERR(pent); | |
2a96536e KC |
1082 | else |
1083 | ret = lv2set_page(pent, paddr, size, | |
bfa00489 | 1084 | &domain->lv2entcnt[lv1ent_offset(iova)]); |
2a96536e KC |
1085 | } |
1086 | ||
61128f08 | 1087 | if (ret) |
0bf4e54d CK |
1088 | pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", |
1089 | __func__, ret, size, iova); | |
2a96536e | 1090 | |
bfa00489 | 1091 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e KC |
1092 | |
1093 | return ret; | |
1094 | } | |
1095 | ||
bfa00489 MS |
1096 | static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, |
1097 | sysmmu_iova_t iova, size_t size) | |
66a7ed84 | 1098 | { |
469acebe | 1099 | struct sysmmu_drvdata *data; |
66a7ed84 CK |
1100 | unsigned long flags; |
1101 | ||
bfa00489 | 1102 | spin_lock_irqsave(&domain->lock, flags); |
66a7ed84 | 1103 | |
bfa00489 | 1104 | list_for_each_entry(data, &domain->clients, domain_node) |
469acebe | 1105 | sysmmu_tlb_invalidate_entry(data, iova, size); |
66a7ed84 | 1106 | |
bfa00489 | 1107 | spin_unlock_irqrestore(&domain->lock, flags); |
66a7ed84 CK |
1108 | } |
1109 | ||
bfa00489 MS |
1110 | static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, |
1111 | unsigned long l_iova, size_t size) | |
2a96536e | 1112 | { |
bfa00489 | 1113 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc CK |
1114 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
1115 | sysmmu_pte_t *ent; | |
61128f08 | 1116 | size_t err_pgsize; |
d09d78fc | 1117 | unsigned long flags; |
2a96536e | 1118 | |
bfa00489 | 1119 | BUG_ON(domain->pgtable == NULL); |
2a96536e | 1120 | |
bfa00489 | 1121 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 1122 | |
bfa00489 | 1123 | ent = section_entry(domain->pgtable, iova); |
2a96536e KC |
1124 | |
1125 | if (lv1ent_section(ent)) { | |
0bf4e54d | 1126 | if (WARN_ON(size < SECT_SIZE)) { |
61128f08 CK |
1127 | err_pgsize = SECT_SIZE; |
1128 | goto err; | |
1129 | } | |
2a96536e | 1130 | |
f171abab | 1131 | /* workaround for h/w bug in System MMU v3.3 */ |
5e3435eb | 1132 | update_pte(ent, ZERO_LV2LINK); |
2a96536e KC |
1133 | size = SECT_SIZE; |
1134 | goto done; | |
1135 | } | |
1136 | ||
1137 | if (unlikely(lv1ent_fault(ent))) { | |
1138 | if (size > SECT_SIZE) | |
1139 | size = SECT_SIZE; | |
1140 | goto done; | |
1141 | } | |
1142 | ||
1143 | /* lv1ent_page(sent) == true here */ | |
1144 | ||
1145 | ent = page_entry(ent, iova); | |
1146 | ||
1147 | if (unlikely(lv2ent_fault(ent))) { | |
1148 | size = SPAGE_SIZE; | |
1149 | goto done; | |
1150 | } | |
1151 | ||
1152 | if (lv2ent_small(ent)) { | |
5e3435eb | 1153 | update_pte(ent, 0); |
2a96536e | 1154 | size = SPAGE_SIZE; |
bfa00489 | 1155 | domain->lv2entcnt[lv1ent_offset(iova)] += 1; |
2a96536e KC |
1156 | goto done; |
1157 | } | |
1158 | ||
1159 | /* lv1ent_large(ent) == true here */ | |
0bf4e54d | 1160 | if (WARN_ON(size < LPAGE_SIZE)) { |
61128f08 CK |
1161 | err_pgsize = LPAGE_SIZE; |
1162 | goto err; | |
1163 | } | |
2a96536e | 1164 | |
5e3435eb MS |
1165 | dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), |
1166 | sizeof(*ent) * SPAGES_PER_LPAGE, | |
1167 | DMA_TO_DEVICE); | |
2a96536e | 1168 | memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); |
5e3435eb MS |
1169 | dma_sync_single_for_device(dma_dev, virt_to_phys(ent), |
1170 | sizeof(*ent) * SPAGES_PER_LPAGE, | |
1171 | DMA_TO_DEVICE); | |
2a96536e | 1172 | size = LPAGE_SIZE; |
bfa00489 | 1173 | domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; |
2a96536e | 1174 | done: |
bfa00489 | 1175 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e | 1176 | |
bfa00489 | 1177 | exynos_iommu_tlb_invalidate_entry(domain, iova, size); |
2a96536e | 1178 | |
2a96536e | 1179 | return size; |
61128f08 | 1180 | err: |
bfa00489 | 1181 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
61128f08 | 1182 | |
0bf4e54d CK |
1183 | pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", |
1184 | __func__, size, iova, err_pgsize); | |
61128f08 CK |
1185 | |
1186 | return 0; | |
2a96536e KC |
1187 | } |
1188 | ||
bfa00489 | 1189 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, |
bb5547ac | 1190 | dma_addr_t iova) |
2a96536e | 1191 | { |
bfa00489 | 1192 | struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); |
d09d78fc | 1193 | sysmmu_pte_t *entry; |
2a96536e KC |
1194 | unsigned long flags; |
1195 | phys_addr_t phys = 0; | |
1196 | ||
bfa00489 | 1197 | spin_lock_irqsave(&domain->pgtablelock, flags); |
2a96536e | 1198 | |
bfa00489 | 1199 | entry = section_entry(domain->pgtable, iova); |
2a96536e KC |
1200 | |
1201 | if (lv1ent_section(entry)) { | |
1202 | phys = section_phys(entry) + section_offs(iova); | |
1203 | } else if (lv1ent_page(entry)) { | |
1204 | entry = page_entry(entry, iova); | |
1205 | ||
1206 | if (lv2ent_large(entry)) | |
1207 | phys = lpage_phys(entry) + lpage_offs(iova); | |
1208 | else if (lv2ent_small(entry)) | |
1209 | phys = spage_phys(entry) + spage_offs(iova); | |
1210 | } | |
1211 | ||
bfa00489 | 1212 | spin_unlock_irqrestore(&domain->pgtablelock, flags); |
2a96536e KC |
1213 | |
1214 | return phys; | |
1215 | } | |
1216 | ||
6c2ae7e2 MS |
1217 | static struct iommu_group *get_device_iommu_group(struct device *dev) |
1218 | { | |
1219 | struct iommu_group *group; | |
1220 | ||
1221 | group = iommu_group_get(dev); | |
1222 | if (!group) | |
1223 | group = iommu_group_alloc(); | |
1224 | ||
1225 | return group; | |
1226 | } | |
1227 | ||
bf4a1c92 AM |
1228 | static int exynos_iommu_add_device(struct device *dev) |
1229 | { | |
1230 | struct iommu_group *group; | |
bf4a1c92 | 1231 | |
06801db0 MS |
1232 | if (!has_sysmmu(dev)) |
1233 | return -ENODEV; | |
1234 | ||
6c2ae7e2 | 1235 | group = iommu_group_get_for_dev(dev); |
bf4a1c92 | 1236 | |
6c2ae7e2 MS |
1237 | if (IS_ERR(group)) |
1238 | return PTR_ERR(group); | |
bf4a1c92 | 1239 | |
bf4a1c92 AM |
1240 | iommu_group_put(group); |
1241 | ||
6c2ae7e2 | 1242 | return 0; |
bf4a1c92 AM |
1243 | } |
1244 | ||
1245 | static void exynos_iommu_remove_device(struct device *dev) | |
1246 | { | |
06801db0 MS |
1247 | if (!has_sysmmu(dev)) |
1248 | return; | |
1249 | ||
bf4a1c92 AM |
1250 | iommu_group_remove_device(dev); |
1251 | } | |
1252 | ||
aa759fd3 MS |
1253 | static int exynos_iommu_of_xlate(struct device *dev, |
1254 | struct of_phandle_args *spec) | |
1255 | { | |
1256 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | |
1257 | struct platform_device *sysmmu = of_find_device_by_node(spec->np); | |
1258 | struct sysmmu_drvdata *data; | |
1259 | ||
1260 | if (!sysmmu) | |
1261 | return -ENODEV; | |
1262 | ||
1263 | data = platform_get_drvdata(sysmmu); | |
1264 | if (!data) | |
1265 | return -ENODEV; | |
1266 | ||
1267 | if (!owner) { | |
1268 | owner = kzalloc(sizeof(*owner), GFP_KERNEL); | |
1269 | if (!owner) | |
1270 | return -ENOMEM; | |
1271 | ||
1272 | INIT_LIST_HEAD(&owner->controllers); | |
1273 | dev->archdata.iommu = owner; | |
1274 | } | |
1275 | ||
1276 | list_add_tail(&data->owner_node, &owner->controllers); | |
1277 | return 0; | |
1278 | } | |
1279 | ||
8ed55c81 | 1280 | static struct iommu_ops exynos_iommu_ops = { |
e1fd1eaa JR |
1281 | .domain_alloc = exynos_iommu_domain_alloc, |
1282 | .domain_free = exynos_iommu_domain_free, | |
ba5fa6f6 BH |
1283 | .attach_dev = exynos_iommu_attach_device, |
1284 | .detach_dev = exynos_iommu_detach_device, | |
1285 | .map = exynos_iommu_map, | |
1286 | .unmap = exynos_iommu_unmap, | |
315786eb | 1287 | .map_sg = default_iommu_map_sg, |
ba5fa6f6 | 1288 | .iova_to_phys = exynos_iommu_iova_to_phys, |
6c2ae7e2 | 1289 | .device_group = get_device_iommu_group, |
ba5fa6f6 BH |
1290 | .add_device = exynos_iommu_add_device, |
1291 | .remove_device = exynos_iommu_remove_device, | |
2a96536e | 1292 | .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, |
aa759fd3 | 1293 | .of_xlate = exynos_iommu_of_xlate, |
2a96536e KC |
1294 | }; |
1295 | ||
8ed55c81 MS |
1296 | static bool init_done; |
1297 | ||
2a96536e KC |
1298 | static int __init exynos_iommu_init(void) |
1299 | { | |
1300 | int ret; | |
1301 | ||
734c3c73 CK |
1302 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1303 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | |
1304 | if (!lv2table_kmem_cache) { | |
1305 | pr_err("%s: Failed to create kmem cache\n", __func__); | |
1306 | return -ENOMEM; | |
1307 | } | |
1308 | ||
2a96536e | 1309 | ret = platform_driver_register(&exynos_sysmmu_driver); |
734c3c73 CK |
1310 | if (ret) { |
1311 | pr_err("%s: Failed to register driver\n", __func__); | |
1312 | goto err_reg_driver; | |
1313 | } | |
2a96536e | 1314 | |
66a7ed84 CK |
1315 | zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); |
1316 | if (zero_lv2_table == NULL) { | |
1317 | pr_err("%s: Failed to allocate zero level2 page table\n", | |
1318 | __func__); | |
1319 | ret = -ENOMEM; | |
1320 | goto err_zero_lv2; | |
1321 | } | |
1322 | ||
734c3c73 CK |
1323 | ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); |
1324 | if (ret) { | |
1325 | pr_err("%s: Failed to register exynos-iommu driver.\n", | |
1326 | __func__); | |
1327 | goto err_set_iommu; | |
1328 | } | |
2a96536e | 1329 | |
8ed55c81 MS |
1330 | init_done = true; |
1331 | ||
734c3c73 CK |
1332 | return 0; |
1333 | err_set_iommu: | |
66a7ed84 CK |
1334 | kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); |
1335 | err_zero_lv2: | |
734c3c73 CK |
1336 | platform_driver_unregister(&exynos_sysmmu_driver); |
1337 | err_reg_driver: | |
1338 | kmem_cache_destroy(lv2table_kmem_cache); | |
2a96536e KC |
1339 | return ret; |
1340 | } | |
8ed55c81 MS |
1341 | |
1342 | static int __init exynos_iommu_of_setup(struct device_node *np) | |
1343 | { | |
1344 | struct platform_device *pdev; | |
1345 | ||
1346 | if (!init_done) | |
1347 | exynos_iommu_init(); | |
1348 | ||
1349 | pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); | |
1350 | if (IS_ERR(pdev)) | |
1351 | return PTR_ERR(pdev); | |
1352 | ||
5e3435eb MS |
1353 | /* |
1354 | * use the first registered sysmmu device for performing | |
1355 | * dma mapping operations on iommu page tables (cpu cache flush) | |
1356 | */ | |
1357 | if (!dma_dev) | |
1358 | dma_dev = &pdev->dev; | |
1359 | ||
8ed55c81 MS |
1360 | of_iommu_set_ops(np, &exynos_iommu_ops); |
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", | |
1365 | exynos_iommu_of_setup); |