Commit | Line | Data |
---|---|---|
0720d1f0 SM |
1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. |
2 | * | |
3 | * This program is free software; you can redistribute it and/or modify | |
4 | * it under the terms of the GNU General Public License version 2 and | |
5 | * only version 2 as published by the Free Software Foundation. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
15 | * 02110-1301, USA. | |
16 | */ | |
17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/iommu.h> | |
29 | ||
30 | #include <asm/cacheflush.h> | |
31 | #include <asm/sizes.h> | |
32 | ||
33 | #include <mach/iommu_hw-8xxx.h> | |
34 | #include <mach/iommu.h> | |
35 | ||
100832c9 SM |
36 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
37 | __asm__ __volatile__ ( \ | |
38 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ | |
39 | : "=r" (reg)) | |
40 | ||
41 | #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) | |
42 | #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) | |
43 | ||
44 | static int msm_iommu_tex_class[4]; | |
45 | ||
0720d1f0 SM |
46 | DEFINE_SPINLOCK(msm_iommu_lock); |
47 | ||
48 | struct msm_priv { | |
49 | unsigned long *pgtable; | |
50 | struct list_head list_attached; | |
51 | }; | |
52 | ||
53 | static void __flush_iotlb(struct iommu_domain *domain) | |
54 | { | |
55 | struct msm_priv *priv = domain->priv; | |
56 | struct msm_iommu_drvdata *iommu_drvdata; | |
57 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
58 | ||
59 | #ifndef CONFIG_IOMMU_PGTABLES_L2 | |
60 | unsigned long *fl_table = priv->pgtable; | |
61 | int i; | |
62 | ||
f6f41eb9 SM |
63 | if (!list_empty(&priv->list_attached)) { |
64 | dmac_flush_range(fl_table, fl_table + SZ_16K); | |
0720d1f0 | 65 | |
f6f41eb9 SM |
66 | for (i = 0; i < NUM_FL_PTE; i++) |
67 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { | |
68 | void *sl_table = __va(fl_table[i] & | |
69 | FL_BASE_MASK); | |
70 | dmac_flush_range(sl_table, sl_table + SZ_4K); | |
71 | } | |
72 | } | |
0720d1f0 SM |
73 | #endif |
74 | ||
75 | list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { | |
76 | if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) | |
77 | BUG(); | |
78 | ||
79 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); | |
80 | SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); | |
81 | } | |
82 | } | |
83 | ||
84 | static void __reset_context(void __iomem *base, int ctx) | |
85 | { | |
86 | SET_BPRCOSH(base, ctx, 0); | |
87 | SET_BPRCISH(base, ctx, 0); | |
88 | SET_BPRCNSH(base, ctx, 0); | |
89 | SET_BPSHCFG(base, ctx, 0); | |
90 | SET_BPMTCFG(base, ctx, 0); | |
91 | SET_ACTLR(base, ctx, 0); | |
92 | SET_SCTLR(base, ctx, 0); | |
93 | SET_FSRRESTORE(base, ctx, 0); | |
94 | SET_TTBR0(base, ctx, 0); | |
95 | SET_TTBR1(base, ctx, 0); | |
96 | SET_TTBCR(base, ctx, 0); | |
97 | SET_BFBCR(base, ctx, 0); | |
98 | SET_PAR(base, ctx, 0); | |
99 | SET_FAR(base, ctx, 0); | |
100 | SET_CTX_TLBIALL(base, ctx, 0); | |
101 | SET_TLBFLPTER(base, ctx, 0); | |
102 | SET_TLBSLPTER(base, ctx, 0); | |
103 | SET_TLBLKCR(base, ctx, 0); | |
104 | SET_PRRR(base, ctx, 0); | |
105 | SET_NMRR(base, ctx, 0); | |
106 | SET_CONTEXTIDR(base, ctx, 0); | |
107 | } | |
108 | ||
109 | static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) | |
110 | { | |
100832c9 | 111 | unsigned int prrr, nmrr; |
0720d1f0 SM |
112 | __reset_context(base, ctx); |
113 | ||
114 | /* Set up HTW mode */ | |
115 | /* TLB miss configuration: perform HTW on miss */ | |
116 | SET_TLBMCFG(base, ctx, 0x3); | |
117 | ||
118 | /* V2P configuration: HTW for access */ | |
119 | SET_V2PCFG(base, ctx, 0x3); | |
120 | ||
121 | SET_TTBCR(base, ctx, 0); | |
122 | SET_TTBR0_PA(base, ctx, (pgtable >> 14)); | |
123 | ||
124 | /* Invalidate the TLB for this context */ | |
125 | SET_CTX_TLBIALL(base, ctx, 0); | |
126 | ||
127 | /* Set interrupt number to "secure" interrupt */ | |
128 | SET_IRPTNDX(base, ctx, 0); | |
129 | ||
130 | /* Enable context fault interrupt */ | |
131 | SET_CFEIE(base, ctx, 1); | |
132 | ||
133 | /* Stall access on a context fault and let the handler deal with it */ | |
134 | SET_CFCFG(base, ctx, 1); | |
135 | ||
136 | /* Redirect all cacheable requests to L2 slave port. */ | |
137 | SET_RCISH(base, ctx, 1); | |
138 | SET_RCOSH(base, ctx, 1); | |
139 | SET_RCNSH(base, ctx, 1); | |
140 | ||
141 | /* Turn on TEX Remap */ | |
142 | SET_TRE(base, ctx, 1); | |
143 | ||
100832c9 SM |
144 | /* Set TEX remap attributes */ |
145 | RCP15_PRRR(prrr); | |
146 | RCP15_NMRR(nmrr); | |
147 | SET_PRRR(base, ctx, prrr); | |
148 | SET_NMRR(base, ctx, nmrr); | |
0720d1f0 SM |
149 | |
150 | /* Turn on BFB prefetch */ | |
151 | SET_BFBDFE(base, ctx, 1); | |
152 | ||
153 | #ifdef CONFIG_IOMMU_PGTABLES_L2 | |
154 | /* Configure page tables as inner-cacheable and shareable to reduce | |
155 | * the TLB miss penalty. | |
156 | */ | |
157 | SET_TTBR0_SH(base, ctx, 1); | |
158 | SET_TTBR1_SH(base, ctx, 1); | |
159 | ||
160 | SET_TTBR0_NOS(base, ctx, 1); | |
161 | SET_TTBR1_NOS(base, ctx, 1); | |
162 | ||
163 | SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ | |
164 | SET_TTBR0_IRGNL(base, ctx, 1); | |
165 | ||
166 | SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ | |
167 | SET_TTBR1_IRGNL(base, ctx, 1); | |
168 | ||
169 | SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ | |
170 | SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ | |
171 | #endif | |
172 | ||
173 | /* Enable the MMU */ | |
174 | SET_M(base, ctx, 1); | |
175 | } | |
176 | ||
177 | static int msm_iommu_domain_init(struct iommu_domain *domain) | |
178 | { | |
179 | struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
180 | ||
181 | if (!priv) | |
182 | goto fail_nomem; | |
183 | ||
184 | INIT_LIST_HEAD(&priv->list_attached); | |
185 | priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, | |
186 | get_order(SZ_16K)); | |
187 | ||
188 | if (!priv->pgtable) | |
189 | goto fail_nomem; | |
190 | ||
191 | memset(priv->pgtable, 0, SZ_16K); | |
192 | domain->priv = priv; | |
193 | return 0; | |
194 | ||
195 | fail_nomem: | |
196 | kfree(priv); | |
197 | return -ENOMEM; | |
198 | } | |
199 | ||
200 | static void msm_iommu_domain_destroy(struct iommu_domain *domain) | |
201 | { | |
202 | struct msm_priv *priv; | |
203 | unsigned long flags; | |
204 | unsigned long *fl_table; | |
205 | int i; | |
206 | ||
207 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
208 | priv = domain->priv; | |
209 | domain->priv = NULL; | |
210 | ||
211 | if (priv) { | |
212 | fl_table = priv->pgtable; | |
213 | ||
214 | for (i = 0; i < NUM_FL_PTE; i++) | |
215 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) | |
216 | free_page((unsigned long) __va(((fl_table[i]) & | |
217 | FL_BASE_MASK))); | |
218 | ||
219 | free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); | |
220 | priv->pgtable = NULL; | |
221 | } | |
222 | ||
223 | kfree(priv); | |
224 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
225 | } | |
226 | ||
227 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
228 | { | |
229 | struct msm_priv *priv; | |
230 | struct msm_iommu_ctx_dev *ctx_dev; | |
231 | struct msm_iommu_drvdata *iommu_drvdata; | |
232 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
233 | struct msm_iommu_ctx_drvdata *tmp_drvdata; | |
234 | int ret = 0; | |
235 | unsigned long flags; | |
236 | ||
237 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
238 | ||
239 | priv = domain->priv; | |
240 | ||
241 | if (!priv || !dev) { | |
242 | ret = -EINVAL; | |
243 | goto fail; | |
244 | } | |
245 | ||
246 | iommu_drvdata = dev_get_drvdata(dev->parent); | |
247 | ctx_drvdata = dev_get_drvdata(dev); | |
248 | ctx_dev = dev->platform_data; | |
249 | ||
250 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { | |
251 | ret = -EINVAL; | |
252 | goto fail; | |
253 | } | |
254 | ||
00d4b2bb SM |
255 | if (!list_empty(&ctx_drvdata->attached_elm)) { |
256 | ret = -EBUSY; | |
257 | goto fail; | |
258 | } | |
259 | ||
0720d1f0 SM |
260 | list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) |
261 | if (tmp_drvdata == ctx_drvdata) { | |
262 | ret = -EBUSY; | |
263 | goto fail; | |
264 | } | |
265 | ||
266 | __program_context(iommu_drvdata->base, ctx_dev->num, | |
267 | __pa(priv->pgtable)); | |
268 | ||
269 | list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); | |
270 | __flush_iotlb(domain); | |
271 | ||
272 | fail: | |
273 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
274 | return ret; | |
275 | } | |
276 | ||
277 | static void msm_iommu_detach_dev(struct iommu_domain *domain, | |
278 | struct device *dev) | |
279 | { | |
280 | struct msm_priv *priv; | |
281 | struct msm_iommu_ctx_dev *ctx_dev; | |
282 | struct msm_iommu_drvdata *iommu_drvdata; | |
283 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
284 | unsigned long flags; | |
285 | ||
286 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
287 | priv = domain->priv; | |
288 | ||
289 | if (!priv || !dev) | |
290 | goto fail; | |
291 | ||
292 | iommu_drvdata = dev_get_drvdata(dev->parent); | |
293 | ctx_drvdata = dev_get_drvdata(dev); | |
294 | ctx_dev = dev->platform_data; | |
295 | ||
296 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) | |
297 | goto fail; | |
298 | ||
299 | __flush_iotlb(domain); | |
300 | __reset_context(iommu_drvdata->base, ctx_dev->num); | |
301 | list_del_init(&ctx_drvdata->attached_elm); | |
302 | ||
303 | fail: | |
304 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
305 | } | |
306 | ||
307 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, | |
308 | phys_addr_t pa, int order, int prot) | |
309 | { | |
310 | struct msm_priv *priv; | |
311 | unsigned long flags; | |
312 | unsigned long *fl_table; | |
313 | unsigned long *fl_pte; | |
314 | unsigned long fl_offset; | |
315 | unsigned long *sl_table; | |
316 | unsigned long *sl_pte; | |
317 | unsigned long sl_offset; | |
100832c9 | 318 | unsigned int pgprot; |
0720d1f0 | 319 | size_t len = 0x1000UL << order; |
100832c9 | 320 | int ret = 0, tex, sh; |
0720d1f0 SM |
321 | |
322 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
0720d1f0 | 323 | |
100832c9 SM |
324 | sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; |
325 | tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; | |
326 | ||
327 | if (tex < 0 || tex > NUM_TEX_CLASS - 1) { | |
328 | ret = -EINVAL; | |
329 | goto fail; | |
330 | } | |
331 | ||
332 | priv = domain->priv; | |
0720d1f0 SM |
333 | if (!priv) { |
334 | ret = -EINVAL; | |
335 | goto fail; | |
336 | } | |
337 | ||
338 | fl_table = priv->pgtable; | |
339 | ||
340 | if (len != SZ_16M && len != SZ_1M && | |
341 | len != SZ_64K && len != SZ_4K) { | |
342 | pr_debug("Bad size: %d\n", len); | |
343 | ret = -EINVAL; | |
344 | goto fail; | |
345 | } | |
346 | ||
347 | if (!fl_table) { | |
348 | pr_debug("Null page table\n"); | |
349 | ret = -EINVAL; | |
350 | goto fail; | |
351 | } | |
352 | ||
100832c9 SM |
353 | if (len == SZ_16M || len == SZ_1M) { |
354 | pgprot = sh ? FL_SHARED : 0; | |
355 | pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; | |
356 | pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; | |
357 | pgprot |= tex & 0x04 ? FL_TEX0 : 0; | |
358 | } else { | |
359 | pgprot = sh ? SL_SHARED : 0; | |
360 | pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; | |
361 | pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; | |
362 | pgprot |= tex & 0x04 ? SL_TEX0 : 0; | |
363 | } | |
364 | ||
0720d1f0 SM |
365 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
366 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ | |
367 | ||
368 | if (len == SZ_16M) { | |
369 | int i = 0; | |
370 | for (i = 0; i < 16; i++) | |
371 | *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | | |
372 | FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | | |
100832c9 | 373 | FL_SHARED | pgprot; |
0720d1f0 SM |
374 | } |
375 | ||
376 | if (len == SZ_1M) | |
377 | *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | | |
100832c9 | 378 | FL_TYPE_SECT | FL_SHARED | pgprot; |
0720d1f0 SM |
379 | |
380 | /* Need a 2nd level table */ | |
381 | if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { | |
382 | unsigned long *sl; | |
383 | sl = (unsigned long *) __get_free_pages(GFP_KERNEL, | |
384 | get_order(SZ_4K)); | |
385 | ||
386 | if (!sl) { | |
387 | pr_debug("Could not allocate second level table\n"); | |
388 | ret = -ENOMEM; | |
389 | goto fail; | |
390 | } | |
391 | ||
392 | memset(sl, 0, SZ_4K); | |
393 | *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); | |
394 | } | |
395 | ||
396 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); | |
397 | sl_offset = SL_OFFSET(va); | |
398 | sl_pte = sl_table + sl_offset; | |
399 | ||
400 | ||
401 | if (len == SZ_4K) | |
402 | *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | | |
100832c9 | 403 | SL_SHARED | SL_TYPE_SMALL | pgprot; |
0720d1f0 SM |
404 | |
405 | if (len == SZ_64K) { | |
406 | int i; | |
407 | ||
408 | for (i = 0; i < 16; i++) | |
409 | *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | | |
100832c9 | 410 | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; |
0720d1f0 SM |
411 | } |
412 | ||
413 | __flush_iotlb(domain); | |
414 | fail: | |
415 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
416 | return ret; | |
417 | } | |
418 | ||
419 | static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, | |
420 | int order) | |
421 | { | |
422 | struct msm_priv *priv; | |
423 | unsigned long flags; | |
424 | unsigned long *fl_table; | |
425 | unsigned long *fl_pte; | |
426 | unsigned long fl_offset; | |
427 | unsigned long *sl_table; | |
428 | unsigned long *sl_pte; | |
429 | unsigned long sl_offset; | |
430 | size_t len = 0x1000UL << order; | |
431 | int i, ret = 0; | |
432 | ||
433 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
434 | ||
435 | priv = domain->priv; | |
436 | ||
437 | if (!priv) { | |
438 | ret = -ENODEV; | |
439 | goto fail; | |
440 | } | |
441 | ||
442 | fl_table = priv->pgtable; | |
443 | ||
444 | if (len != SZ_16M && len != SZ_1M && | |
445 | len != SZ_64K && len != SZ_4K) { | |
446 | pr_debug("Bad length: %d\n", len); | |
447 | ret = -EINVAL; | |
448 | goto fail; | |
449 | } | |
450 | ||
451 | if (!fl_table) { | |
452 | pr_debug("Null page table\n"); | |
453 | ret = -EINVAL; | |
454 | goto fail; | |
455 | } | |
456 | ||
457 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ | |
458 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ | |
459 | ||
460 | if (*fl_pte == 0) { | |
461 | pr_debug("First level PTE is 0\n"); | |
462 | ret = -ENODEV; | |
463 | goto fail; | |
464 | } | |
465 | ||
466 | /* Unmap supersection */ | |
467 | if (len == SZ_16M) | |
468 | for (i = 0; i < 16; i++) | |
469 | *(fl_pte+i) = 0; | |
470 | ||
471 | if (len == SZ_1M) | |
472 | *fl_pte = 0; | |
473 | ||
474 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); | |
475 | sl_offset = SL_OFFSET(va); | |
476 | sl_pte = sl_table + sl_offset; | |
477 | ||
478 | if (len == SZ_64K) { | |
479 | for (i = 0; i < 16; i++) | |
480 | *(sl_pte+i) = 0; | |
481 | } | |
482 | ||
483 | if (len == SZ_4K) | |
484 | *sl_pte = 0; | |
485 | ||
486 | if (len == SZ_4K || len == SZ_64K) { | |
487 | int used = 0; | |
488 | ||
489 | for (i = 0; i < NUM_SL_PTE; i++) | |
490 | if (sl_table[i]) | |
491 | used = 1; | |
492 | if (!used) { | |
493 | free_page((unsigned long)sl_table); | |
494 | *fl_pte = 0; | |
495 | } | |
496 | } | |
497 | ||
498 | __flush_iotlb(domain); | |
499 | fail: | |
500 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
501 | return ret; | |
502 | } | |
503 | ||
504 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | |
505 | unsigned long va) | |
506 | { | |
507 | struct msm_priv *priv; | |
508 | struct msm_iommu_drvdata *iommu_drvdata; | |
509 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
510 | unsigned int par; | |
511 | unsigned long flags; | |
512 | void __iomem *base; | |
513 | phys_addr_t ret = 0; | |
514 | int ctx; | |
515 | ||
516 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
517 | ||
518 | priv = domain->priv; | |
519 | if (list_empty(&priv->list_attached)) | |
520 | goto fail; | |
521 | ||
522 | ctx_drvdata = list_entry(priv->list_attached.next, | |
523 | struct msm_iommu_ctx_drvdata, attached_elm); | |
524 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); | |
525 | ||
526 | base = iommu_drvdata->base; | |
527 | ctx = ctx_drvdata->num; | |
528 | ||
529 | /* Invalidate context TLB */ | |
530 | SET_CTX_TLBIALL(base, ctx, 0); | |
531 | SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT); | |
532 | ||
533 | if (GET_FAULT(base, ctx)) | |
534 | goto fail; | |
535 | ||
536 | par = GET_PAR(base, ctx); | |
537 | ||
538 | /* We are dealing with a supersection */ | |
539 | if (GET_NOFAULT_SS(base, ctx)) | |
540 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); | |
541 | else /* Upper 20 bits from PAR, lower 12 from VA */ | |
542 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); | |
543 | ||
544 | fail: | |
545 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
546 | return ret; | |
547 | } | |
548 | ||
549 | static int msm_iommu_domain_has_cap(struct iommu_domain *domain, | |
550 | unsigned long cap) | |
551 | { | |
552 | return 0; | |
553 | } | |
554 | ||
555 | static void print_ctx_regs(void __iomem *base, int ctx) | |
556 | { | |
557 | unsigned int fsr = GET_FSR(base, ctx); | |
558 | pr_err("FAR = %08x PAR = %08x\n", | |
559 | GET_FAR(base, ctx), GET_PAR(base, ctx)); | |
560 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, | |
561 | (fsr & 0x02) ? "TF " : "", | |
562 | (fsr & 0x04) ? "AFF " : "", | |
563 | (fsr & 0x08) ? "APF " : "", | |
564 | (fsr & 0x10) ? "TLBMF " : "", | |
565 | (fsr & 0x20) ? "HTWDEEF " : "", | |
566 | (fsr & 0x40) ? "HTWSEEF " : "", | |
567 | (fsr & 0x80) ? "MHF " : "", | |
568 | (fsr & 0x10000) ? "SL " : "", | |
569 | (fsr & 0x40000000) ? "SS " : "", | |
570 | (fsr & 0x80000000) ? "MULTI " : ""); | |
571 | ||
572 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", | |
573 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); | |
574 | pr_err("TTBR0 = %08x TTBR1 = %08x\n", | |
575 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); | |
576 | pr_err("SCTLR = %08x ACTLR = %08x\n", | |
577 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); | |
578 | pr_err("PRRR = %08x NMRR = %08x\n", | |
579 | GET_PRRR(base, ctx), GET_NMRR(base, ctx)); | |
580 | } | |
581 | ||
582 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) | |
583 | { | |
584 | struct msm_iommu_drvdata *drvdata = dev_id; | |
585 | void __iomem *base; | |
586 | unsigned int fsr = 0; | |
587 | int ncb = 0, i = 0; | |
588 | ||
589 | spin_lock(&msm_iommu_lock); | |
590 | ||
591 | if (!drvdata) { | |
592 | pr_err("Invalid device ID in context interrupt handler\n"); | |
593 | goto fail; | |
594 | } | |
595 | ||
596 | base = drvdata->base; | |
597 | ||
598 | pr_err("===== WOAH! =====\n"); | |
599 | pr_err("Unexpected IOMMU page fault!\n"); | |
600 | pr_err("base = %08x\n", (unsigned int) base); | |
601 | ||
602 | ncb = GET_NCB(base)+1; | |
603 | for (i = 0; i < ncb; i++) { | |
604 | fsr = GET_FSR(base, i); | |
605 | if (fsr) { | |
606 | pr_err("Fault occurred in context %d.\n", i); | |
607 | pr_err("Interesting registers:\n"); | |
608 | print_ctx_regs(base, i); | |
609 | SET_FSR(base, i, 0x4000000F); | |
610 | } | |
611 | } | |
612 | fail: | |
613 | spin_unlock(&msm_iommu_lock); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | static struct iommu_ops msm_iommu_ops = { | |
618 | .domain_init = msm_iommu_domain_init, | |
619 | .domain_destroy = msm_iommu_domain_destroy, | |
620 | .attach_dev = msm_iommu_attach_dev, | |
621 | .detach_dev = msm_iommu_detach_dev, | |
622 | .map = msm_iommu_map, | |
623 | .unmap = msm_iommu_unmap, | |
624 | .iova_to_phys = msm_iommu_iova_to_phys, | |
625 | .domain_has_cap = msm_iommu_domain_has_cap | |
626 | }; | |
627 | ||
100832c9 SM |
628 | static int __init get_tex_class(int icp, int ocp, int mt, int nos) |
629 | { | |
630 | int i = 0; | |
631 | unsigned int prrr = 0; | |
632 | unsigned int nmrr = 0; | |
633 | int c_icp, c_ocp, c_mt, c_nos; | |
634 | ||
635 | RCP15_PRRR(prrr); | |
636 | RCP15_NMRR(nmrr); | |
637 | ||
638 | for (i = 0; i < NUM_TEX_CLASS; i++) { | |
639 | c_nos = PRRR_NOS(prrr, i); | |
640 | c_mt = PRRR_MT(prrr, i); | |
641 | c_icp = NMRR_ICP(nmrr, i); | |
642 | c_ocp = NMRR_OCP(nmrr, i); | |
643 | ||
644 | if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) | |
645 | return i; | |
646 | } | |
647 | ||
648 | return -ENODEV; | |
649 | } | |
650 | ||
651 | static void __init setup_iommu_tex_classes(void) | |
652 | { | |
653 | msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = | |
654 | get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); | |
655 | ||
656 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = | |
657 | get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); | |
658 | ||
659 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = | |
660 | get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); | |
661 | ||
662 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = | |
663 | get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); | |
664 | } | |
665 | ||
516cbc79 | 666 | static int __init msm_iommu_init(void) |
0720d1f0 | 667 | { |
100832c9 | 668 | setup_iommu_tex_classes(); |
0720d1f0 SM |
669 | register_iommu(&msm_iommu_ops); |
670 | return 0; | |
671 | } | |
672 | ||
673 | subsys_initcall(msm_iommu_init); | |
674 | ||
675 | MODULE_LICENSE("GPL v2"); | |
676 | MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); |