2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
29 #include "io-pgtable.h"
31 #define ARM_LPAE_MAX_ADDR_BITS 48
32 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33 #define ARM_LPAE_MAX_LEVELS 4
35 /* Struct accessors */
36 #define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
39 #define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
55 #define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
59 #define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
62 * Calculate the index at level l used to map virtual address a using the
65 #define ARM_LPAE_PGD_IDX(l,d) \
66 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
68 #define ARM_LPAE_LVL_IDX(a,l,d) \
69 (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
70 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
72 /* Calculate the block/page mapping size at level l for pagetable in d. */
73 #define ARM_LPAE_BLOCK_SIZE(l,d) \
74 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
75 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
78 #define ARM_LPAE_PTE_TYPE_SHIFT 0
79 #define ARM_LPAE_PTE_TYPE_MASK 0x3
81 #define ARM_LPAE_PTE_TYPE_BLOCK 1
82 #define ARM_LPAE_PTE_TYPE_TABLE 3
83 #define ARM_LPAE_PTE_TYPE_PAGE 3
85 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
86 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
87 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
88 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
89 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
90 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
91 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
92 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
94 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
95 /* Ignore the contiguous bit for block splitting */
96 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
97 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
98 ARM_LPAE_PTE_ATTR_HI_MASK)
101 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
104 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
107 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
108 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
109 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
110 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
111 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
112 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
115 #define ARM_32_LPAE_TCR_EAE (1 << 31)
116 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
118 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
119 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
120 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
122 #define ARM_LPAE_TCR_SH0_SHIFT 12
123 #define ARM_LPAE_TCR_SH0_MASK 0x3
124 #define ARM_LPAE_TCR_SH_NS 0
125 #define ARM_LPAE_TCR_SH_OS 2
126 #define ARM_LPAE_TCR_SH_IS 3
128 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
129 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
130 #define ARM_LPAE_TCR_RGN_MASK 0x3
131 #define ARM_LPAE_TCR_RGN_NC 0
132 #define ARM_LPAE_TCR_RGN_WBWA 1
133 #define ARM_LPAE_TCR_RGN_WT 2
134 #define ARM_LPAE_TCR_RGN_WB 3
136 #define ARM_LPAE_TCR_SL0_SHIFT 6
137 #define ARM_LPAE_TCR_SL0_MASK 0x3
139 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
140 #define ARM_LPAE_TCR_SZ_MASK 0xf
142 #define ARM_LPAE_TCR_PS_SHIFT 16
143 #define ARM_LPAE_TCR_PS_MASK 0x7
145 #define ARM_LPAE_TCR_IPS_SHIFT 32
146 #define ARM_LPAE_TCR_IPS_MASK 0x7
148 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
149 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
150 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
151 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
152 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
153 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
155 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
156 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
157 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
158 #define ARM_LPAE_MAIR_ATTR_NC 0x44
159 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
160 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
161 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
162 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
164 /* IOPTE accessors */
165 #define iopte_deref(pte,d) \
166 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
167 & ~((1ULL << (d)->pg_shift) - 1)))
169 #define iopte_type(pte,l) \
170 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
172 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
174 #define iopte_leaf(pte,l) \
175 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
176 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
177 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
179 #define iopte_to_pfn(pte,d) \
180 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
182 #define pfn_to_iopte(pfn,d) \
183 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
185 struct arm_lpae_io_pgtable
{
186 struct io_pgtable iop
;
190 unsigned long pg_shift
;
191 unsigned long bits_per_level
;
196 typedef u64 arm_lpae_iopte
;
198 static bool selftest_running
= false;
200 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
201 unsigned long iova
, phys_addr_t paddr
,
202 arm_lpae_iopte prot
, int lvl
,
203 arm_lpae_iopte
*ptep
)
205 arm_lpae_iopte pte
= prot
;
207 /* We require an unmap first */
208 if (iopte_leaf(*ptep
, lvl
)) {
209 WARN_ON(!selftest_running
);
213 if (data
->iop
.cfg
.quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
214 pte
|= ARM_LPAE_PTE_NS
;
216 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
217 pte
|= ARM_LPAE_PTE_TYPE_PAGE
;
219 pte
|= ARM_LPAE_PTE_TYPE_BLOCK
;
221 pte
|= ARM_LPAE_PTE_AF
| ARM_LPAE_PTE_SH_IS
;
222 pte
|= pfn_to_iopte(paddr
>> data
->pg_shift
, data
);
225 data
->iop
.cfg
.tlb
->flush_pgtable(ptep
, sizeof(*ptep
), data
->iop
.cookie
);
229 static int __arm_lpae_map(struct arm_lpae_io_pgtable
*data
, unsigned long iova
,
230 phys_addr_t paddr
, size_t size
, arm_lpae_iopte prot
,
231 int lvl
, arm_lpae_iopte
*ptep
)
233 arm_lpae_iopte
*cptep
, pte
;
234 void *cookie
= data
->iop
.cookie
;
235 size_t block_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
237 /* Find our entry at the current level */
238 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
240 /* If we can install a leaf entry at this level, then do so */
241 if (size
== block_size
&& (size
& data
->iop
.cfg
.pgsize_bitmap
))
242 return arm_lpae_init_pte(data
, iova
, paddr
, prot
, lvl
, ptep
);
244 /* We can't allocate tables at the final level */
245 if (WARN_ON(lvl
>= ARM_LPAE_MAX_LEVELS
- 1))
248 /* Grab a pointer to the next level */
251 cptep
= alloc_pages_exact(1UL << data
->pg_shift
,
252 GFP_ATOMIC
| __GFP_ZERO
);
256 data
->iop
.cfg
.tlb
->flush_pgtable(cptep
, 1UL << data
->pg_shift
,
258 pte
= __pa(cptep
) | ARM_LPAE_PTE_TYPE_TABLE
;
259 if (data
->iop
.cfg
.quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
260 pte
|= ARM_LPAE_PTE_NSTABLE
;
262 data
->iop
.cfg
.tlb
->flush_pgtable(ptep
, sizeof(*ptep
), cookie
);
264 cptep
= iopte_deref(pte
, data
);
268 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
+ 1, cptep
);
271 static arm_lpae_iopte
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable
*data
,
276 if (data
->iop
.fmt
== ARM_64_LPAE_S1
||
277 data
->iop
.fmt
== ARM_32_LPAE_S1
) {
278 pte
= ARM_LPAE_PTE_AP_UNPRIV
| ARM_LPAE_PTE_nG
;
280 if (!(prot
& IOMMU_WRITE
) && (prot
& IOMMU_READ
))
281 pte
|= ARM_LPAE_PTE_AP_RDONLY
;
283 if (prot
& IOMMU_CACHE
)
284 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
285 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
287 pte
= ARM_LPAE_PTE_HAP_FAULT
;
288 if (prot
& IOMMU_READ
)
289 pte
|= ARM_LPAE_PTE_HAP_READ
;
290 if (prot
& IOMMU_WRITE
)
291 pte
|= ARM_LPAE_PTE_HAP_WRITE
;
292 if (prot
& IOMMU_CACHE
)
293 pte
|= ARM_LPAE_PTE_MEMATTR_OIWB
;
295 pte
|= ARM_LPAE_PTE_MEMATTR_NC
;
298 if (prot
& IOMMU_NOEXEC
)
299 pte
|= ARM_LPAE_PTE_XN
;
304 static int arm_lpae_map(struct io_pgtable_ops
*ops
, unsigned long iova
,
305 phys_addr_t paddr
, size_t size
, int iommu_prot
)
307 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
308 arm_lpae_iopte
*ptep
= data
->pgd
;
309 int lvl
= ARM_LPAE_START_LVL(data
);
312 /* If no access, then nothing to do */
313 if (!(iommu_prot
& (IOMMU_READ
| IOMMU_WRITE
)))
316 prot
= arm_lpae_prot_to_pte(data
, iommu_prot
);
317 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
, ptep
);
320 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable
*data
, int lvl
,
321 arm_lpae_iopte
*ptep
)
323 arm_lpae_iopte
*start
, *end
;
324 unsigned long table_size
;
326 /* Only leaf entries at the last level */
327 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
330 if (lvl
== ARM_LPAE_START_LVL(data
))
331 table_size
= data
->pgd_size
;
333 table_size
= 1UL << data
->pg_shift
;
336 end
= (void *)ptep
+ table_size
;
338 while (ptep
!= end
) {
339 arm_lpae_iopte pte
= *ptep
++;
341 if (!pte
|| iopte_leaf(pte
, lvl
))
344 __arm_lpae_free_pgtable(data
, lvl
+ 1, iopte_deref(pte
, data
));
347 free_pages_exact(start
, table_size
);
350 static void arm_lpae_free_pgtable(struct io_pgtable
*iop
)
352 struct arm_lpae_io_pgtable
*data
= io_pgtable_to_data(iop
);
354 __arm_lpae_free_pgtable(data
, ARM_LPAE_START_LVL(data
), data
->pgd
);
358 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable
*data
,
359 unsigned long iova
, size_t size
,
360 arm_lpae_iopte prot
, int lvl
,
361 arm_lpae_iopte
*ptep
, size_t blk_size
)
363 unsigned long blk_start
, blk_end
;
364 phys_addr_t blk_paddr
;
365 arm_lpae_iopte table
= 0;
366 void *cookie
= data
->iop
.cookie
;
367 const struct iommu_gather_ops
*tlb
= data
->iop
.cfg
.tlb
;
369 blk_start
= iova
& ~(blk_size
- 1);
370 blk_end
= blk_start
+ blk_size
;
371 blk_paddr
= iopte_to_pfn(*ptep
, data
) << data
->pg_shift
;
373 for (; blk_start
< blk_end
; blk_start
+= size
, blk_paddr
+= size
) {
374 arm_lpae_iopte
*tablep
;
377 if (blk_start
== iova
)
380 /* __arm_lpae_map expects a pointer to the start of the table */
381 tablep
= &table
- ARM_LPAE_LVL_IDX(blk_start
, lvl
, data
);
382 if (__arm_lpae_map(data
, blk_start
, blk_paddr
, size
, prot
, lvl
,
385 /* Free the table we allocated */
386 tablep
= iopte_deref(table
, data
);
387 __arm_lpae_free_pgtable(data
, lvl
+ 1, tablep
);
389 return 0; /* Bytes unmapped */
394 tlb
->flush_pgtable(ptep
, sizeof(*ptep
), cookie
);
395 iova
&= ~(blk_size
- 1);
396 tlb
->tlb_add_flush(iova
, blk_size
, true, cookie
);
400 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
401 unsigned long iova
, size_t size
, int lvl
,
402 arm_lpae_iopte
*ptep
)
405 const struct iommu_gather_ops
*tlb
= data
->iop
.cfg
.tlb
;
406 void *cookie
= data
->iop
.cookie
;
407 size_t blk_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
409 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
412 /* Something went horribly wrong and we ran out of page table */
413 if (WARN_ON(!pte
|| (lvl
== ARM_LPAE_MAX_LEVELS
)))
416 /* If the size matches this level, we're in the right place */
417 if (size
== blk_size
) {
419 tlb
->flush_pgtable(ptep
, sizeof(*ptep
), cookie
);
421 if (!iopte_leaf(pte
, lvl
)) {
422 /* Also flush any partial walks */
423 tlb
->tlb_add_flush(iova
, size
, false, cookie
);
424 tlb
->tlb_sync(data
->iop
.cookie
);
425 ptep
= iopte_deref(pte
, data
);
426 __arm_lpae_free_pgtable(data
, lvl
+ 1, ptep
);
428 tlb
->tlb_add_flush(iova
, size
, true, cookie
);
432 } else if (iopte_leaf(pte
, lvl
)) {
434 * Insert a table at the next level to map the old region,
435 * minus the part we want to unmap
437 return arm_lpae_split_blk_unmap(data
, iova
, size
,
438 iopte_prot(pte
), lvl
, ptep
,
442 /* Keep on walkin' */
443 ptep
= iopte_deref(pte
, data
);
444 return __arm_lpae_unmap(data
, iova
, size
, lvl
+ 1, ptep
);
447 static int arm_lpae_unmap(struct io_pgtable_ops
*ops
, unsigned long iova
,
451 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
452 struct io_pgtable
*iop
= &data
->iop
;
453 arm_lpae_iopte
*ptep
= data
->pgd
;
454 int lvl
= ARM_LPAE_START_LVL(data
);
456 unmapped
= __arm_lpae_unmap(data
, iova
, size
, lvl
, ptep
);
458 iop
->cfg
.tlb
->tlb_sync(iop
->cookie
);
463 static phys_addr_t
arm_lpae_iova_to_phys(struct io_pgtable_ops
*ops
,
466 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
467 arm_lpae_iopte pte
, *ptep
= data
->pgd
;
468 int lvl
= ARM_LPAE_START_LVL(data
);
471 /* Valid IOPTE pointer? */
475 /* Grab the IOPTE we're interested in */
476 pte
= *(ptep
+ ARM_LPAE_LVL_IDX(iova
, lvl
, data
));
483 if (iopte_leaf(pte
,lvl
))
484 goto found_translation
;
486 /* Take it to the next level */
487 ptep
= iopte_deref(pte
, data
);
488 } while (++lvl
< ARM_LPAE_MAX_LEVELS
);
490 /* Ran out of page tables to walk */
494 iova
&= ((1 << data
->pg_shift
) - 1);
495 return ((phys_addr_t
)iopte_to_pfn(pte
,data
) << data
->pg_shift
) | iova
;
498 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg
*cfg
)
500 unsigned long granule
;
503 * We need to restrict the supported page sizes to match the
504 * translation regime for a particular granule. Aim to match
505 * the CPU page size if possible, otherwise prefer smaller sizes.
506 * While we're at it, restrict the block sizes to match the
509 if (cfg
->pgsize_bitmap
& PAGE_SIZE
)
511 else if (cfg
->pgsize_bitmap
& ~PAGE_MASK
)
512 granule
= 1UL << __fls(cfg
->pgsize_bitmap
& ~PAGE_MASK
);
513 else if (cfg
->pgsize_bitmap
& PAGE_MASK
)
514 granule
= 1UL << __ffs(cfg
->pgsize_bitmap
& PAGE_MASK
);
520 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
523 cfg
->pgsize_bitmap
&= (SZ_16K
| SZ_32M
);
526 cfg
->pgsize_bitmap
&= (SZ_64K
| SZ_512M
);
529 cfg
->pgsize_bitmap
= 0;
533 static struct arm_lpae_io_pgtable
*
534 arm_lpae_alloc_pgtable(struct io_pgtable_cfg
*cfg
)
536 unsigned long va_bits
, pgd_bits
;
537 struct arm_lpae_io_pgtable
*data
;
539 arm_lpae_restrict_pgsizes(cfg
);
541 if (!(cfg
->pgsize_bitmap
& (SZ_4K
| SZ_16K
| SZ_64K
)))
544 if (cfg
->ias
> ARM_LPAE_MAX_ADDR_BITS
)
547 if (cfg
->oas
> ARM_LPAE_MAX_ADDR_BITS
)
550 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
554 data
->pg_shift
= __ffs(cfg
->pgsize_bitmap
);
555 data
->bits_per_level
= data
->pg_shift
- ilog2(sizeof(arm_lpae_iopte
));
557 va_bits
= cfg
->ias
- data
->pg_shift
;
558 data
->levels
= DIV_ROUND_UP(va_bits
, data
->bits_per_level
);
560 /* Calculate the actual size of our pgd (without concatenation) */
561 pgd_bits
= va_bits
- (data
->bits_per_level
* (data
->levels
- 1));
562 data
->pgd_size
= 1UL << (pgd_bits
+ ilog2(sizeof(arm_lpae_iopte
)));
564 data
->iop
.ops
= (struct io_pgtable_ops
) {
566 .unmap
= arm_lpae_unmap
,
567 .iova_to_phys
= arm_lpae_iova_to_phys
,
573 static struct io_pgtable
*
574 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
577 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
583 reg
= (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
584 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
585 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
587 switch (1 << data
->pg_shift
) {
589 reg
|= ARM_LPAE_TCR_TG0_4K
;
592 reg
|= ARM_LPAE_TCR_TG0_16K
;
595 reg
|= ARM_LPAE_TCR_TG0_64K
;
601 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
604 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
607 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
610 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
613 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
616 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
622 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
623 cfg
->arm_lpae_s1_cfg
.tcr
= reg
;
626 reg
= (ARM_LPAE_MAIR_ATTR_NC
627 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC
)) |
628 (ARM_LPAE_MAIR_ATTR_WBRWA
629 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE
)) |
630 (ARM_LPAE_MAIR_ATTR_DEVICE
631 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV
));
633 cfg
->arm_lpae_s1_cfg
.mair
[0] = reg
;
634 cfg
->arm_lpae_s1_cfg
.mair
[1] = 0;
636 /* Looking good; allocate a pgd */
637 data
->pgd
= alloc_pages_exact(data
->pgd_size
, GFP_KERNEL
| __GFP_ZERO
);
641 cfg
->tlb
->flush_pgtable(data
->pgd
, data
->pgd_size
, cookie
);
644 cfg
->arm_lpae_s1_cfg
.ttbr
[0] = virt_to_phys(data
->pgd
);
645 cfg
->arm_lpae_s1_cfg
.ttbr
[1] = 0;
653 static struct io_pgtable
*
654 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
657 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
663 * Concatenate PGDs at level 1 if possible in order to reduce
664 * the depth of the stage-2 walk.
666 if (data
->levels
== ARM_LPAE_MAX_LEVELS
) {
667 unsigned long pgd_pages
;
669 pgd_pages
= data
->pgd_size
>> ilog2(sizeof(arm_lpae_iopte
));
670 if (pgd_pages
<= ARM_LPAE_S2_MAX_CONCAT_PAGES
) {
671 data
->pgd_size
= pgd_pages
<< data
->pg_shift
;
677 reg
= ARM_64_LPAE_S2_TCR_RES1
|
678 (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
679 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
680 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
682 sl
= ARM_LPAE_START_LVL(data
);
684 switch (1 << data
->pg_shift
) {
686 reg
|= ARM_LPAE_TCR_TG0_4K
;
687 sl
++; /* SL0 format is different for 4K granule size */
690 reg
|= ARM_LPAE_TCR_TG0_16K
;
693 reg
|= ARM_LPAE_TCR_TG0_64K
;
699 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
702 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
705 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
708 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
711 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
714 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
720 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
721 reg
|= (~sl
& ARM_LPAE_TCR_SL0_MASK
) << ARM_LPAE_TCR_SL0_SHIFT
;
722 cfg
->arm_lpae_s2_cfg
.vtcr
= reg
;
724 /* Allocate pgd pages */
725 data
->pgd
= alloc_pages_exact(data
->pgd_size
, GFP_KERNEL
| __GFP_ZERO
);
729 cfg
->tlb
->flush_pgtable(data
->pgd
, data
->pgd_size
, cookie
);
732 cfg
->arm_lpae_s2_cfg
.vttbr
= virt_to_phys(data
->pgd
);
740 static struct io_pgtable
*
741 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
743 struct io_pgtable
*iop
;
745 if (cfg
->ias
> 32 || cfg
->oas
> 40)
748 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
749 iop
= arm_64_lpae_alloc_pgtable_s1(cfg
, cookie
);
751 cfg
->arm_lpae_s1_cfg
.tcr
|= ARM_32_LPAE_TCR_EAE
;
752 cfg
->arm_lpae_s1_cfg
.tcr
&= 0xffffffff;
758 static struct io_pgtable
*
759 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
761 struct io_pgtable
*iop
;
763 if (cfg
->ias
> 40 || cfg
->oas
> 40)
766 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
767 iop
= arm_64_lpae_alloc_pgtable_s2(cfg
, cookie
);
769 cfg
->arm_lpae_s2_cfg
.vtcr
&= 0xffffffff;
774 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns
= {
775 .alloc
= arm_64_lpae_alloc_pgtable_s1
,
776 .free
= arm_lpae_free_pgtable
,
779 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns
= {
780 .alloc
= arm_64_lpae_alloc_pgtable_s2
,
781 .free
= arm_lpae_free_pgtable
,
784 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns
= {
785 .alloc
= arm_32_lpae_alloc_pgtable_s1
,
786 .free
= arm_lpae_free_pgtable
,
789 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns
= {
790 .alloc
= arm_32_lpae_alloc_pgtable_s2
,
791 .free
= arm_lpae_free_pgtable
,
794 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
796 static struct io_pgtable_cfg
*cfg_cookie
;
798 static void dummy_tlb_flush_all(void *cookie
)
800 WARN_ON(cookie
!= cfg_cookie
);
803 static void dummy_tlb_add_flush(unsigned long iova
, size_t size
, bool leaf
,
806 WARN_ON(cookie
!= cfg_cookie
);
807 WARN_ON(!(size
& cfg_cookie
->pgsize_bitmap
));
810 static void dummy_tlb_sync(void *cookie
)
812 WARN_ON(cookie
!= cfg_cookie
);
815 static void dummy_flush_pgtable(void *ptr
, size_t size
, void *cookie
)
817 WARN_ON(cookie
!= cfg_cookie
);
820 static struct iommu_gather_ops dummy_tlb_ops __initdata
= {
821 .tlb_flush_all
= dummy_tlb_flush_all
,
822 .tlb_add_flush
= dummy_tlb_add_flush
,
823 .tlb_sync
= dummy_tlb_sync
,
824 .flush_pgtable
= dummy_flush_pgtable
,
827 static void __init
arm_lpae_dump_ops(struct io_pgtable_ops
*ops
)
829 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
830 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
832 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
833 cfg
->pgsize_bitmap
, cfg
->ias
);
834 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
835 data
->levels
, data
->pgd_size
, data
->pg_shift
,
836 data
->bits_per_level
, data
->pgd
);
839 #define __FAIL(ops, i) ({ \
840 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
841 arm_lpae_dump_ops(ops); \
842 selftest_running = false; \
846 static int __init
arm_lpae_run_tests(struct io_pgtable_cfg
*cfg
)
848 static const enum io_pgtable_fmt fmts
[] = {
856 struct io_pgtable_ops
*ops
;
858 selftest_running
= true;
860 for (i
= 0; i
< ARRAY_SIZE(fmts
); ++i
) {
862 ops
= alloc_io_pgtable_ops(fmts
[i
], cfg
, cfg
);
864 pr_err("selftest: failed to allocate io pgtable ops\n");
869 * Initial sanity checks.
870 * Empty page tables shouldn't provide any translations.
872 if (ops
->iova_to_phys(ops
, 42))
873 return __FAIL(ops
, i
);
875 if (ops
->iova_to_phys(ops
, SZ_1G
+ 42))
876 return __FAIL(ops
, i
);
878 if (ops
->iova_to_phys(ops
, SZ_2G
+ 42))
879 return __FAIL(ops
, i
);
882 * Distinct mappings of different granule sizes.
885 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
886 while (j
!= BITS_PER_LONG
) {
889 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_READ
|
893 return __FAIL(ops
, i
);
895 /* Overlapping mappings */
896 if (!ops
->map(ops
, iova
, iova
+ size
, size
,
897 IOMMU_READ
| IOMMU_NOEXEC
))
898 return __FAIL(ops
, i
);
900 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
901 return __FAIL(ops
, i
);
905 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
909 size
= 1UL << __ffs(cfg
->pgsize_bitmap
);
910 if (ops
->unmap(ops
, SZ_1G
+ size
, size
) != size
)
911 return __FAIL(ops
, i
);
913 /* Remap of partial unmap */
914 if (ops
->map(ops
, SZ_1G
+ size
, size
, size
, IOMMU_READ
))
915 return __FAIL(ops
, i
);
917 if (ops
->iova_to_phys(ops
, SZ_1G
+ size
+ 42) != (size
+ 42))
918 return __FAIL(ops
, i
);
922 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
923 while (j
!= BITS_PER_LONG
) {
926 if (ops
->unmap(ops
, iova
, size
) != size
)
927 return __FAIL(ops
, i
);
929 if (ops
->iova_to_phys(ops
, iova
+ 42))
930 return __FAIL(ops
, i
);
932 /* Remap full block */
933 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_WRITE
))
934 return __FAIL(ops
, i
);
936 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
937 return __FAIL(ops
, i
);
941 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
944 free_io_pgtable_ops(ops
);
947 selftest_running
= false;
951 static int __init
arm_lpae_do_selftests(void)
953 static const unsigned long pgsize
[] = {
954 SZ_4K
| SZ_2M
| SZ_1G
,
959 static const unsigned int ias
[] = {
960 32, 36, 40, 42, 44, 48,
963 int i
, j
, pass
= 0, fail
= 0;
964 struct io_pgtable_cfg cfg
= {
965 .tlb
= &dummy_tlb_ops
,
969 for (i
= 0; i
< ARRAY_SIZE(pgsize
); ++i
) {
970 for (j
= 0; j
< ARRAY_SIZE(ias
); ++j
) {
971 cfg
.pgsize_bitmap
= pgsize
[i
];
973 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
975 if (arm_lpae_run_tests(&cfg
))
982 pr_info("selftest: completed with %d PASS %d FAIL\n", pass
, fail
);
983 return fail
? -EFAULT
: 0;
985 subsys_initcall(arm_lpae_do_selftests
);