Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / drivers / iommu / io-pgtable-arm.c
1 /*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28
29 #include "io-pgtable.h"
30
31 #define ARM_LPAE_MAX_ADDR_BITS 48
32 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33 #define ARM_LPAE_MAX_LEVELS 4
34
35 /* Struct accessors */
36 #define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
38
39 #define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
41
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45 /*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51 /*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55 #define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
58
59 #define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
61
62 /*
63 * Calculate the index at level l used to map virtual address a using the
64 * pagetable in d.
65 */
66 #define ARM_LPAE_PGD_IDX(l,d) \
67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
68
69 #define ARM_LPAE_LVL_IDX(a,l,d) \
70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
72
73 /* Calculate the block/page mapping size at level l for pagetable in d. */
74 #define ARM_LPAE_BLOCK_SIZE(l,d) \
75 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
76 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
77
78 /* Page table bits */
79 #define ARM_LPAE_PTE_TYPE_SHIFT 0
80 #define ARM_LPAE_PTE_TYPE_MASK 0x3
81
82 #define ARM_LPAE_PTE_TYPE_BLOCK 1
83 #define ARM_LPAE_PTE_TYPE_TABLE 3
84 #define ARM_LPAE_PTE_TYPE_PAGE 3
85
86 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
87 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
88 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
89 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
90 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
91 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
92 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
93 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
94
95 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
96 /* Ignore the contiguous bit for block splitting */
97 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
98 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
99 ARM_LPAE_PTE_ATTR_HI_MASK)
100
101 /* Stage-1 PTE */
102 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
103 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
104 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
105 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
106
107 /* Stage-2 PTE */
108 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
109 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
110 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
111 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
112 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
113 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
114
115 /* Register bits */
116 #define ARM_32_LPAE_TCR_EAE (1 << 31)
117 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
118
119 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
120 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
121 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
122
123 #define ARM_LPAE_TCR_SH0_SHIFT 12
124 #define ARM_LPAE_TCR_SH0_MASK 0x3
125 #define ARM_LPAE_TCR_SH_NS 0
126 #define ARM_LPAE_TCR_SH_OS 2
127 #define ARM_LPAE_TCR_SH_IS 3
128
129 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
130 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
131 #define ARM_LPAE_TCR_RGN_MASK 0x3
132 #define ARM_LPAE_TCR_RGN_NC 0
133 #define ARM_LPAE_TCR_RGN_WBWA 1
134 #define ARM_LPAE_TCR_RGN_WT 2
135 #define ARM_LPAE_TCR_RGN_WB 3
136
137 #define ARM_LPAE_TCR_SL0_SHIFT 6
138 #define ARM_LPAE_TCR_SL0_MASK 0x3
139
140 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
141 #define ARM_LPAE_TCR_SZ_MASK 0xf
142
143 #define ARM_LPAE_TCR_PS_SHIFT 16
144 #define ARM_LPAE_TCR_PS_MASK 0x7
145
146 #define ARM_LPAE_TCR_IPS_SHIFT 32
147 #define ARM_LPAE_TCR_IPS_MASK 0x7
148
149 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
150 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
151 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
152 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
153 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
154 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
155
156 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
157 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
158 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
159 #define ARM_LPAE_MAIR_ATTR_NC 0x44
160 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
161 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
162 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
163 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
164
165 /* IOPTE accessors */
166 #define iopte_deref(pte,d) \
167 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
168 & ~((1ULL << (d)->pg_shift) - 1)))
169
170 #define iopte_type(pte,l) \
171 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
172
173 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
174
175 #define iopte_leaf(pte,l) \
176 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
177 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
178 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
179
180 #define iopte_to_pfn(pte,d) \
181 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
182
183 #define pfn_to_iopte(pfn,d) \
184 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
185
186 struct arm_lpae_io_pgtable {
187 struct io_pgtable iop;
188
189 int levels;
190 size_t pgd_size;
191 unsigned long pg_shift;
192 unsigned long bits_per_level;
193
194 void *pgd;
195 };
196
197 typedef u64 arm_lpae_iopte;
198
199 static bool selftest_running = false;
200
201 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
202 unsigned long iova, phys_addr_t paddr,
203 arm_lpae_iopte prot, int lvl,
204 arm_lpae_iopte *ptep)
205 {
206 arm_lpae_iopte pte = prot;
207
208 /* We require an unmap first */
209 if (iopte_leaf(*ptep, lvl)) {
210 WARN_ON(!selftest_running);
211 return -EEXIST;
212 }
213
214 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
215 pte |= ARM_LPAE_PTE_NS;
216
217 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
218 pte |= ARM_LPAE_PTE_TYPE_PAGE;
219 else
220 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
221
222 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
223 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
224
225 *ptep = pte;
226 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
227 return 0;
228 }
229
230 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
231 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
232 int lvl, arm_lpae_iopte *ptep)
233 {
234 arm_lpae_iopte *cptep, pte;
235 void *cookie = data->iop.cookie;
236 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
237
238 /* Find our entry at the current level */
239 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
240
241 /* If we can install a leaf entry at this level, then do so */
242 if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
243 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
244
245 /* We can't allocate tables at the final level */
246 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
247 return -EINVAL;
248
249 /* Grab a pointer to the next level */
250 pte = *ptep;
251 if (!pte) {
252 cptep = alloc_pages_exact(1UL << data->pg_shift,
253 GFP_ATOMIC | __GFP_ZERO);
254 if (!cptep)
255 return -ENOMEM;
256
257 data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
258 cookie);
259 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
260 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
261 pte |= ARM_LPAE_PTE_NSTABLE;
262 *ptep = pte;
263 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
264 } else {
265 cptep = iopte_deref(pte, data);
266 }
267
268 /* Rinse, repeat */
269 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
270 }
271
272 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
273 int prot)
274 {
275 arm_lpae_iopte pte;
276
277 if (data->iop.fmt == ARM_64_LPAE_S1 ||
278 data->iop.fmt == ARM_32_LPAE_S1) {
279 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
280
281 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
282 pte |= ARM_LPAE_PTE_AP_RDONLY;
283
284 if (prot & IOMMU_CACHE)
285 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
286 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
287 } else {
288 pte = ARM_LPAE_PTE_HAP_FAULT;
289 if (prot & IOMMU_READ)
290 pte |= ARM_LPAE_PTE_HAP_READ;
291 if (prot & IOMMU_WRITE)
292 pte |= ARM_LPAE_PTE_HAP_WRITE;
293 if (prot & IOMMU_CACHE)
294 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
295 else
296 pte |= ARM_LPAE_PTE_MEMATTR_NC;
297 }
298
299 if (prot & IOMMU_NOEXEC)
300 pte |= ARM_LPAE_PTE_XN;
301
302 return pte;
303 }
304
305 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
306 phys_addr_t paddr, size_t size, int iommu_prot)
307 {
308 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
309 arm_lpae_iopte *ptep = data->pgd;
310 int lvl = ARM_LPAE_START_LVL(data);
311 arm_lpae_iopte prot;
312
313 /* If no access, then nothing to do */
314 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
315 return 0;
316
317 prot = arm_lpae_prot_to_pte(data, iommu_prot);
318 return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
319 }
320
321 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
322 arm_lpae_iopte *ptep)
323 {
324 arm_lpae_iopte *start, *end;
325 unsigned long table_size;
326
327 /* Only leaf entries at the last level */
328 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
329 return;
330
331 if (lvl == ARM_LPAE_START_LVL(data))
332 table_size = data->pgd_size;
333 else
334 table_size = 1UL << data->pg_shift;
335
336 start = ptep;
337 end = (void *)ptep + table_size;
338
339 while (ptep != end) {
340 arm_lpae_iopte pte = *ptep++;
341
342 if (!pte || iopte_leaf(pte, lvl))
343 continue;
344
345 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
346 }
347
348 free_pages_exact(start, table_size);
349 }
350
351 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
352 {
353 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
354
355 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
356 kfree(data);
357 }
358
359 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
360 unsigned long iova, size_t size,
361 arm_lpae_iopte prot, int lvl,
362 arm_lpae_iopte *ptep, size_t blk_size)
363 {
364 unsigned long blk_start, blk_end;
365 phys_addr_t blk_paddr;
366 arm_lpae_iopte table = 0;
367 void *cookie = data->iop.cookie;
368 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
369
370 blk_start = iova & ~(blk_size - 1);
371 blk_end = blk_start + blk_size;
372 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
373
374 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
375 arm_lpae_iopte *tablep;
376
377 /* Unmap! */
378 if (blk_start == iova)
379 continue;
380
381 /* __arm_lpae_map expects a pointer to the start of the table */
382 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
383 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
384 tablep) < 0) {
385 if (table) {
386 /* Free the table we allocated */
387 tablep = iopte_deref(table, data);
388 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
389 }
390 return 0; /* Bytes unmapped */
391 }
392 }
393
394 *ptep = table;
395 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
396 iova &= ~(blk_size - 1);
397 tlb->tlb_add_flush(iova, blk_size, true, cookie);
398 return size;
399 }
400
401 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
402 unsigned long iova, size_t size, int lvl,
403 arm_lpae_iopte *ptep)
404 {
405 arm_lpae_iopte pte;
406 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
407 void *cookie = data->iop.cookie;
408 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
409
410 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
411 pte = *ptep;
412
413 /* Something went horribly wrong and we ran out of page table */
414 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
415 return 0;
416
417 /* If the size matches this level, we're in the right place */
418 if (size == blk_size) {
419 *ptep = 0;
420 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
421
422 if (!iopte_leaf(pte, lvl)) {
423 /* Also flush any partial walks */
424 tlb->tlb_add_flush(iova, size, false, cookie);
425 tlb->tlb_sync(data->iop.cookie);
426 ptep = iopte_deref(pte, data);
427 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
428 } else {
429 tlb->tlb_add_flush(iova, size, true, cookie);
430 }
431
432 return size;
433 } else if (iopte_leaf(pte, lvl)) {
434 /*
435 * Insert a table at the next level to map the old region,
436 * minus the part we want to unmap
437 */
438 return arm_lpae_split_blk_unmap(data, iova, size,
439 iopte_prot(pte), lvl, ptep,
440 blk_size);
441 }
442
443 /* Keep on walkin' */
444 ptep = iopte_deref(pte, data);
445 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
446 }
447
448 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
449 size_t size)
450 {
451 size_t unmapped;
452 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
453 struct io_pgtable *iop = &data->iop;
454 arm_lpae_iopte *ptep = data->pgd;
455 int lvl = ARM_LPAE_START_LVL(data);
456
457 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
458 if (unmapped)
459 iop->cfg.tlb->tlb_sync(iop->cookie);
460
461 return unmapped;
462 }
463
464 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
465 unsigned long iova)
466 {
467 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
468 arm_lpae_iopte pte, *ptep = data->pgd;
469 int lvl = ARM_LPAE_START_LVL(data);
470
471 do {
472 /* Valid IOPTE pointer? */
473 if (!ptep)
474 return 0;
475
476 /* Grab the IOPTE we're interested in */
477 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
478
479 /* Valid entry? */
480 if (!pte)
481 return 0;
482
483 /* Leaf entry? */
484 if (iopte_leaf(pte,lvl))
485 goto found_translation;
486
487 /* Take it to the next level */
488 ptep = iopte_deref(pte, data);
489 } while (++lvl < ARM_LPAE_MAX_LEVELS);
490
491 /* Ran out of page tables to walk */
492 return 0;
493
494 found_translation:
495 iova &= ((1 << data->pg_shift) - 1);
496 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
497 }
498
499 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
500 {
501 unsigned long granule;
502
503 /*
504 * We need to restrict the supported page sizes to match the
505 * translation regime for a particular granule. Aim to match
506 * the CPU page size if possible, otherwise prefer smaller sizes.
507 * While we're at it, restrict the block sizes to match the
508 * chosen granule.
509 */
510 if (cfg->pgsize_bitmap & PAGE_SIZE)
511 granule = PAGE_SIZE;
512 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
513 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
514 else if (cfg->pgsize_bitmap & PAGE_MASK)
515 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
516 else
517 granule = 0;
518
519 switch (granule) {
520 case SZ_4K:
521 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
522 break;
523 case SZ_16K:
524 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
525 break;
526 case SZ_64K:
527 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
528 break;
529 default:
530 cfg->pgsize_bitmap = 0;
531 }
532 }
533
534 static struct arm_lpae_io_pgtable *
535 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
536 {
537 unsigned long va_bits, pgd_bits;
538 struct arm_lpae_io_pgtable *data;
539
540 arm_lpae_restrict_pgsizes(cfg);
541
542 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
543 return NULL;
544
545 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
546 return NULL;
547
548 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
549 return NULL;
550
551 data = kmalloc(sizeof(*data), GFP_KERNEL);
552 if (!data)
553 return NULL;
554
555 data->pg_shift = __ffs(cfg->pgsize_bitmap);
556 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
557
558 va_bits = cfg->ias - data->pg_shift;
559 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
560
561 /* Calculate the actual size of our pgd (without concatenation) */
562 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
563 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
564
565 data->iop.ops = (struct io_pgtable_ops) {
566 .map = arm_lpae_map,
567 .unmap = arm_lpae_unmap,
568 .iova_to_phys = arm_lpae_iova_to_phys,
569 };
570
571 return data;
572 }
573
574 static struct io_pgtable *
575 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
576 {
577 u64 reg;
578 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
579
580 if (!data)
581 return NULL;
582
583 /* TCR */
584 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
585 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
586 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
587
588 switch (1 << data->pg_shift) {
589 case SZ_4K:
590 reg |= ARM_LPAE_TCR_TG0_4K;
591 break;
592 case SZ_16K:
593 reg |= ARM_LPAE_TCR_TG0_16K;
594 break;
595 case SZ_64K:
596 reg |= ARM_LPAE_TCR_TG0_64K;
597 break;
598 }
599
600 switch (cfg->oas) {
601 case 32:
602 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
603 break;
604 case 36:
605 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
606 break;
607 case 40:
608 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
609 break;
610 case 42:
611 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
612 break;
613 case 44:
614 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
615 break;
616 case 48:
617 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
618 break;
619 default:
620 goto out_free_data;
621 }
622
623 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
624 cfg->arm_lpae_s1_cfg.tcr = reg;
625
626 /* MAIRs */
627 reg = (ARM_LPAE_MAIR_ATTR_NC
628 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
629 (ARM_LPAE_MAIR_ATTR_WBRWA
630 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
631 (ARM_LPAE_MAIR_ATTR_DEVICE
632 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
633
634 cfg->arm_lpae_s1_cfg.mair[0] = reg;
635 cfg->arm_lpae_s1_cfg.mair[1] = 0;
636
637 /* Looking good; allocate a pgd */
638 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
639 if (!data->pgd)
640 goto out_free_data;
641
642 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
643
644 /* TTBRs */
645 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
646 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
647 return &data->iop;
648
649 out_free_data:
650 kfree(data);
651 return NULL;
652 }
653
654 static struct io_pgtable *
655 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
656 {
657 u64 reg, sl;
658 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
659
660 if (!data)
661 return NULL;
662
663 /*
664 * Concatenate PGDs at level 1 if possible in order to reduce
665 * the depth of the stage-2 walk.
666 */
667 if (data->levels == ARM_LPAE_MAX_LEVELS) {
668 unsigned long pgd_pages;
669
670 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
671 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
672 data->pgd_size = pgd_pages << data->pg_shift;
673 data->levels--;
674 }
675 }
676
677 /* VTCR */
678 reg = ARM_64_LPAE_S2_TCR_RES1 |
679 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
680 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
681 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
682
683 sl = ARM_LPAE_START_LVL(data);
684
685 switch (1 << data->pg_shift) {
686 case SZ_4K:
687 reg |= ARM_LPAE_TCR_TG0_4K;
688 sl++; /* SL0 format is different for 4K granule size */
689 break;
690 case SZ_16K:
691 reg |= ARM_LPAE_TCR_TG0_16K;
692 break;
693 case SZ_64K:
694 reg |= ARM_LPAE_TCR_TG0_64K;
695 break;
696 }
697
698 switch (cfg->oas) {
699 case 32:
700 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
701 break;
702 case 36:
703 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
704 break;
705 case 40:
706 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
707 break;
708 case 42:
709 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
710 break;
711 case 44:
712 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
713 break;
714 case 48:
715 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
716 break;
717 default:
718 goto out_free_data;
719 }
720
721 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
722 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
723 cfg->arm_lpae_s2_cfg.vtcr = reg;
724
725 /* Allocate pgd pages */
726 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
727 if (!data->pgd)
728 goto out_free_data;
729
730 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
731
732 /* VTTBR */
733 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
734 return &data->iop;
735
736 out_free_data:
737 kfree(data);
738 return NULL;
739 }
740
741 static struct io_pgtable *
742 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
743 {
744 struct io_pgtable *iop;
745
746 if (cfg->ias > 32 || cfg->oas > 40)
747 return NULL;
748
749 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
750 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
751 if (iop) {
752 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
753 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
754 }
755
756 return iop;
757 }
758
759 static struct io_pgtable *
760 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
761 {
762 struct io_pgtable *iop;
763
764 if (cfg->ias > 40 || cfg->oas > 40)
765 return NULL;
766
767 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
768 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
769 if (iop)
770 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
771
772 return iop;
773 }
774
775 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
776 .alloc = arm_64_lpae_alloc_pgtable_s1,
777 .free = arm_lpae_free_pgtable,
778 };
779
780 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
781 .alloc = arm_64_lpae_alloc_pgtable_s2,
782 .free = arm_lpae_free_pgtable,
783 };
784
785 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
786 .alloc = arm_32_lpae_alloc_pgtable_s1,
787 .free = arm_lpae_free_pgtable,
788 };
789
790 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
791 .alloc = arm_32_lpae_alloc_pgtable_s2,
792 .free = arm_lpae_free_pgtable,
793 };
794
795 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
796
797 static struct io_pgtable_cfg *cfg_cookie;
798
799 static void dummy_tlb_flush_all(void *cookie)
800 {
801 WARN_ON(cookie != cfg_cookie);
802 }
803
804 static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
805 void *cookie)
806 {
807 WARN_ON(cookie != cfg_cookie);
808 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
809 }
810
811 static void dummy_tlb_sync(void *cookie)
812 {
813 WARN_ON(cookie != cfg_cookie);
814 }
815
816 static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
817 {
818 WARN_ON(cookie != cfg_cookie);
819 }
820
821 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
822 .tlb_flush_all = dummy_tlb_flush_all,
823 .tlb_add_flush = dummy_tlb_add_flush,
824 .tlb_sync = dummy_tlb_sync,
825 .flush_pgtable = dummy_flush_pgtable,
826 };
827
828 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
829 {
830 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
831 struct io_pgtable_cfg *cfg = &data->iop.cfg;
832
833 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
834 cfg->pgsize_bitmap, cfg->ias);
835 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
836 data->levels, data->pgd_size, data->pg_shift,
837 data->bits_per_level, data->pgd);
838 }
839
840 #define __FAIL(ops, i) ({ \
841 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
842 arm_lpae_dump_ops(ops); \
843 selftest_running = false; \
844 -EFAULT; \
845 })
846
847 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
848 {
849 static const enum io_pgtable_fmt fmts[] = {
850 ARM_64_LPAE_S1,
851 ARM_64_LPAE_S2,
852 };
853
854 int i, j;
855 unsigned long iova;
856 size_t size;
857 struct io_pgtable_ops *ops;
858
859 selftest_running = true;
860
861 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
862 cfg_cookie = cfg;
863 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
864 if (!ops) {
865 pr_err("selftest: failed to allocate io pgtable ops\n");
866 return -ENOMEM;
867 }
868
869 /*
870 * Initial sanity checks.
871 * Empty page tables shouldn't provide any translations.
872 */
873 if (ops->iova_to_phys(ops, 42))
874 return __FAIL(ops, i);
875
876 if (ops->iova_to_phys(ops, SZ_1G + 42))
877 return __FAIL(ops, i);
878
879 if (ops->iova_to_phys(ops, SZ_2G + 42))
880 return __FAIL(ops, i);
881
882 /*
883 * Distinct mappings of different granule sizes.
884 */
885 iova = 0;
886 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
887 while (j != BITS_PER_LONG) {
888 size = 1UL << j;
889
890 if (ops->map(ops, iova, iova, size, IOMMU_READ |
891 IOMMU_WRITE |
892 IOMMU_NOEXEC |
893 IOMMU_CACHE))
894 return __FAIL(ops, i);
895
896 /* Overlapping mappings */
897 if (!ops->map(ops, iova, iova + size, size,
898 IOMMU_READ | IOMMU_NOEXEC))
899 return __FAIL(ops, i);
900
901 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
902 return __FAIL(ops, i);
903
904 iova += SZ_1G;
905 j++;
906 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
907 }
908
909 /* Partial unmap */
910 size = 1UL << __ffs(cfg->pgsize_bitmap);
911 if (ops->unmap(ops, SZ_1G + size, size) != size)
912 return __FAIL(ops, i);
913
914 /* Remap of partial unmap */
915 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
916 return __FAIL(ops, i);
917
918 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
919 return __FAIL(ops, i);
920
921 /* Full unmap */
922 iova = 0;
923 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
924 while (j != BITS_PER_LONG) {
925 size = 1UL << j;
926
927 if (ops->unmap(ops, iova, size) != size)
928 return __FAIL(ops, i);
929
930 if (ops->iova_to_phys(ops, iova + 42))
931 return __FAIL(ops, i);
932
933 /* Remap full block */
934 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
935 return __FAIL(ops, i);
936
937 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
938 return __FAIL(ops, i);
939
940 iova += SZ_1G;
941 j++;
942 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
943 }
944
945 free_io_pgtable_ops(ops);
946 }
947
948 selftest_running = false;
949 return 0;
950 }
951
952 static int __init arm_lpae_do_selftests(void)
953 {
954 static const unsigned long pgsize[] = {
955 SZ_4K | SZ_2M | SZ_1G,
956 SZ_16K | SZ_32M,
957 SZ_64K | SZ_512M,
958 };
959
960 static const unsigned int ias[] = {
961 32, 36, 40, 42, 44, 48,
962 };
963
964 int i, j, pass = 0, fail = 0;
965 struct io_pgtable_cfg cfg = {
966 .tlb = &dummy_tlb_ops,
967 .oas = 48,
968 };
969
970 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
971 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
972 cfg.pgsize_bitmap = pgsize[i];
973 cfg.ias = ias[j];
974 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
975 pgsize[i], ias[j]);
976 if (arm_lpae_run_tests(&cfg))
977 fail++;
978 else
979 pass++;
980 }
981 }
982
983 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
984 return fail ? -EFAULT : 0;
985 }
986 subsys_initcall(arm_lpae_do_selftests);
987 #endif
This page took 0.063154 seconds and 5 git commands to generate.