2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
29 #include <asm/barrier.h>
31 #include "io-pgtable.h"
33 #define ARM_LPAE_MAX_ADDR_BITS 48
34 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
35 #define ARM_LPAE_MAX_LEVELS 4
37 /* Struct accessors */
38 #define io_pgtable_to_data(x) \
39 container_of((x), struct arm_lpae_io_pgtable, iop)
41 #define io_pgtable_ops_to_pgtable(x) \
42 container_of((x), struct io_pgtable, ops)
44 #define io_pgtable_ops_to_data(x) \
45 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
48 * For consistency with the architecture, we always consider
49 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
51 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
54 * Calculate the right shift amount to get to the portion describing level l
55 * in a virtual address mapped by the pagetable in d.
57 #define ARM_LPAE_LVL_SHIFT(l,d) \
58 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
59 * (d)->bits_per_level) + (d)->pg_shift)
61 #define ARM_LPAE_PAGES_PER_PGD(d) \
62 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
65 * Calculate the index at level l used to map virtual address a using the
68 #define ARM_LPAE_PGD_IDX(l,d) \
69 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71 #define ARM_LPAE_LVL_IDX(a,l,d) \
72 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
73 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
75 /* Calculate the block/page mapping size at level l for pagetable in d. */
76 #define ARM_LPAE_BLOCK_SIZE(l,d) \
77 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
78 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
81 #define ARM_LPAE_PTE_TYPE_SHIFT 0
82 #define ARM_LPAE_PTE_TYPE_MASK 0x3
84 #define ARM_LPAE_PTE_TYPE_BLOCK 1
85 #define ARM_LPAE_PTE_TYPE_TABLE 3
86 #define ARM_LPAE_PTE_TYPE_PAGE 3
88 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
89 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
90 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
91 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
92 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
93 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
94 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
95 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
97 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
98 /* Ignore the contiguous bit for block splitting */
99 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
100 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
101 ARM_LPAE_PTE_ATTR_HI_MASK)
104 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
105 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
106 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
107 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
110 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
111 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
112 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
113 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
114 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
115 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
118 #define ARM_32_LPAE_TCR_EAE (1 << 31)
119 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
121 #define ARM_LPAE_TCR_EPD1 (1 << 23)
123 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
124 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
125 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
127 #define ARM_LPAE_TCR_SH0_SHIFT 12
128 #define ARM_LPAE_TCR_SH0_MASK 0x3
129 #define ARM_LPAE_TCR_SH_NS 0
130 #define ARM_LPAE_TCR_SH_OS 2
131 #define ARM_LPAE_TCR_SH_IS 3
133 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
134 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
135 #define ARM_LPAE_TCR_RGN_MASK 0x3
136 #define ARM_LPAE_TCR_RGN_NC 0
137 #define ARM_LPAE_TCR_RGN_WBWA 1
138 #define ARM_LPAE_TCR_RGN_WT 2
139 #define ARM_LPAE_TCR_RGN_WB 3
141 #define ARM_LPAE_TCR_SL0_SHIFT 6
142 #define ARM_LPAE_TCR_SL0_MASK 0x3
144 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
145 #define ARM_LPAE_TCR_SZ_MASK 0xf
147 #define ARM_LPAE_TCR_PS_SHIFT 16
148 #define ARM_LPAE_TCR_PS_MASK 0x7
150 #define ARM_LPAE_TCR_IPS_SHIFT 32
151 #define ARM_LPAE_TCR_IPS_MASK 0x7
153 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
154 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
155 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
156 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
157 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
158 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
160 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
161 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
162 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
163 #define ARM_LPAE_MAIR_ATTR_NC 0x44
164 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
165 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
166 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
167 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
169 /* IOPTE accessors */
170 #define iopte_deref(pte,d) \
171 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
172 & ~((1ULL << (d)->pg_shift) - 1)))
174 #define iopte_type(pte,l) \
175 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
177 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
179 #define iopte_leaf(pte,l) \
180 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
182 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
184 #define iopte_to_pfn(pte,d) \
185 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
187 #define pfn_to_iopte(pfn,d) \
188 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
190 struct arm_lpae_io_pgtable
{
191 struct io_pgtable iop
;
195 unsigned long pg_shift
;
196 unsigned long bits_per_level
;
201 typedef u64 arm_lpae_iopte
;
203 static bool selftest_running
= false;
205 static dma_addr_t
__arm_lpae_dma_addr(struct device
*dev
, void *pages
)
207 return phys_to_dma(dev
, virt_to_phys(pages
));
210 static void *__arm_lpae_alloc_pages(size_t size
, gfp_t gfp
,
211 struct io_pgtable_cfg
*cfg
)
213 struct device
*dev
= cfg
->iommu_dev
;
215 void *pages
= alloc_pages_exact(size
, gfp
| __GFP_ZERO
);
220 if (!selftest_running
) {
221 dma
= dma_map_single(dev
, pages
, size
, DMA_TO_DEVICE
);
222 if (dma_mapping_error(dev
, dma
))
225 * We depend on the IOMMU being able to work with any physical
226 * address directly, so if the DMA layer suggests it can't by
227 * giving us back some translation, that bodes very badly...
229 if (dma
!= __arm_lpae_dma_addr(dev
, pages
))
236 dev_err(dev
, "Cannot accommodate DMA translation for IOMMU page tables\n");
237 dma_unmap_single(dev
, dma
, size
, DMA_TO_DEVICE
);
239 free_pages_exact(pages
, size
);
243 static void __arm_lpae_free_pages(void *pages
, size_t size
,
244 struct io_pgtable_cfg
*cfg
)
246 struct device
*dev
= cfg
->iommu_dev
;
248 if (!selftest_running
)
249 dma_unmap_single(dev
, __arm_lpae_dma_addr(dev
, pages
),
250 size
, DMA_TO_DEVICE
);
251 free_pages_exact(pages
, size
);
254 static void __arm_lpae_set_pte(arm_lpae_iopte
*ptep
, arm_lpae_iopte pte
,
255 struct io_pgtable_cfg
*cfg
)
257 struct device
*dev
= cfg
->iommu_dev
;
261 if (!selftest_running
)
262 dma_sync_single_for_device(dev
, __arm_lpae_dma_addr(dev
, ptep
),
263 sizeof(pte
), DMA_TO_DEVICE
);
266 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
267 unsigned long iova
, size_t size
, int lvl
,
268 arm_lpae_iopte
*ptep
);
270 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
271 unsigned long iova
, phys_addr_t paddr
,
272 arm_lpae_iopte prot
, int lvl
,
273 arm_lpae_iopte
*ptep
)
275 arm_lpae_iopte pte
= prot
;
276 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
278 if (iopte_leaf(*ptep
, lvl
)) {
279 /* We require an unmap first */
280 WARN_ON(!selftest_running
);
282 } else if (iopte_type(*ptep
, lvl
) == ARM_LPAE_PTE_TYPE_TABLE
) {
284 * We need to unmap and free the old table before
285 * overwriting it with a block entry.
287 arm_lpae_iopte
*tblp
;
288 size_t sz
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
290 tblp
= ptep
- ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
291 if (WARN_ON(__arm_lpae_unmap(data
, iova
, sz
, lvl
, tblp
) != sz
))
295 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
296 pte
|= ARM_LPAE_PTE_NS
;
298 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
299 pte
|= ARM_LPAE_PTE_TYPE_PAGE
;
301 pte
|= ARM_LPAE_PTE_TYPE_BLOCK
;
303 pte
|= ARM_LPAE_PTE_AF
| ARM_LPAE_PTE_SH_IS
;
304 pte
|= pfn_to_iopte(paddr
>> data
->pg_shift
, data
);
306 __arm_lpae_set_pte(ptep
, pte
, cfg
);
310 static int __arm_lpae_map(struct arm_lpae_io_pgtable
*data
, unsigned long iova
,
311 phys_addr_t paddr
, size_t size
, arm_lpae_iopte prot
,
312 int lvl
, arm_lpae_iopte
*ptep
)
314 arm_lpae_iopte
*cptep
, pte
;
315 size_t block_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
316 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
318 /* Find our entry at the current level */
319 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
321 /* If we can install a leaf entry at this level, then do so */
322 if (size
== block_size
&& (size
& cfg
->pgsize_bitmap
))
323 return arm_lpae_init_pte(data
, iova
, paddr
, prot
, lvl
, ptep
);
325 /* We can't allocate tables at the final level */
326 if (WARN_ON(lvl
>= ARM_LPAE_MAX_LEVELS
- 1))
329 /* Grab a pointer to the next level */
332 cptep
= __arm_lpae_alloc_pages(1UL << data
->pg_shift
,
337 pte
= __pa(cptep
) | ARM_LPAE_PTE_TYPE_TABLE
;
338 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
339 pte
|= ARM_LPAE_PTE_NSTABLE
;
340 __arm_lpae_set_pte(ptep
, pte
, cfg
);
342 cptep
= iopte_deref(pte
, data
);
346 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
+ 1, cptep
);
349 static arm_lpae_iopte
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable
*data
,
354 if (data
->iop
.fmt
== ARM_64_LPAE_S1
||
355 data
->iop
.fmt
== ARM_32_LPAE_S1
) {
356 pte
= ARM_LPAE_PTE_AP_UNPRIV
| ARM_LPAE_PTE_nG
;
358 if (!(prot
& IOMMU_WRITE
) && (prot
& IOMMU_READ
))
359 pte
|= ARM_LPAE_PTE_AP_RDONLY
;
361 if (prot
& IOMMU_CACHE
)
362 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
363 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
365 pte
= ARM_LPAE_PTE_HAP_FAULT
;
366 if (prot
& IOMMU_READ
)
367 pte
|= ARM_LPAE_PTE_HAP_READ
;
368 if (prot
& IOMMU_WRITE
)
369 pte
|= ARM_LPAE_PTE_HAP_WRITE
;
370 if (prot
& IOMMU_CACHE
)
371 pte
|= ARM_LPAE_PTE_MEMATTR_OIWB
;
373 pte
|= ARM_LPAE_PTE_MEMATTR_NC
;
376 if (prot
& IOMMU_NOEXEC
)
377 pte
|= ARM_LPAE_PTE_XN
;
382 static int arm_lpae_map(struct io_pgtable_ops
*ops
, unsigned long iova
,
383 phys_addr_t paddr
, size_t size
, int iommu_prot
)
385 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
386 arm_lpae_iopte
*ptep
= data
->pgd
;
387 int ret
, lvl
= ARM_LPAE_START_LVL(data
);
390 /* If no access, then nothing to do */
391 if (!(iommu_prot
& (IOMMU_READ
| IOMMU_WRITE
)))
394 prot
= arm_lpae_prot_to_pte(data
, iommu_prot
);
395 ret
= __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
, ptep
);
397 * Synchronise all PTE updates for the new mapping before there's
398 * a chance for anything to kick off a table walk for the new iova.
405 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable
*data
, int lvl
,
406 arm_lpae_iopte
*ptep
)
408 arm_lpae_iopte
*start
, *end
;
409 unsigned long table_size
;
411 /* Only leaf entries at the last level */
412 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
415 if (lvl
== ARM_LPAE_START_LVL(data
))
416 table_size
= data
->pgd_size
;
418 table_size
= 1UL << data
->pg_shift
;
421 end
= (void *)ptep
+ table_size
;
423 while (ptep
!= end
) {
424 arm_lpae_iopte pte
= *ptep
++;
426 if (!pte
|| iopte_leaf(pte
, lvl
))
429 __arm_lpae_free_pgtable(data
, lvl
+ 1, iopte_deref(pte
, data
));
432 __arm_lpae_free_pages(start
, table_size
, &data
->iop
.cfg
);
435 static void arm_lpae_free_pgtable(struct io_pgtable
*iop
)
437 struct arm_lpae_io_pgtable
*data
= io_pgtable_to_data(iop
);
439 __arm_lpae_free_pgtable(data
, ARM_LPAE_START_LVL(data
), data
->pgd
);
443 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable
*data
,
444 unsigned long iova
, size_t size
,
445 arm_lpae_iopte prot
, int lvl
,
446 arm_lpae_iopte
*ptep
, size_t blk_size
)
448 unsigned long blk_start
, blk_end
;
449 phys_addr_t blk_paddr
;
450 arm_lpae_iopte table
= 0;
451 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
453 blk_start
= iova
& ~(blk_size
- 1);
454 blk_end
= blk_start
+ blk_size
;
455 blk_paddr
= iopte_to_pfn(*ptep
, data
) << data
->pg_shift
;
457 for (; blk_start
< blk_end
; blk_start
+= size
, blk_paddr
+= size
) {
458 arm_lpae_iopte
*tablep
;
461 if (blk_start
== iova
)
464 /* __arm_lpae_map expects a pointer to the start of the table */
465 tablep
= &table
- ARM_LPAE_LVL_IDX(blk_start
, lvl
, data
);
466 if (__arm_lpae_map(data
, blk_start
, blk_paddr
, size
, prot
, lvl
,
469 /* Free the table we allocated */
470 tablep
= iopte_deref(table
, data
);
471 __arm_lpae_free_pgtable(data
, lvl
+ 1, tablep
);
473 return 0; /* Bytes unmapped */
477 __arm_lpae_set_pte(ptep
, table
, cfg
);
478 iova
&= ~(blk_size
- 1);
479 cfg
->tlb
->tlb_add_flush(iova
, blk_size
, true, data
->iop
.cookie
);
483 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
484 unsigned long iova
, size_t size
, int lvl
,
485 arm_lpae_iopte
*ptep
)
488 const struct iommu_gather_ops
*tlb
= data
->iop
.cfg
.tlb
;
489 void *cookie
= data
->iop
.cookie
;
490 size_t blk_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
492 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
495 /* Something went horribly wrong and we ran out of page table */
496 if (WARN_ON(!pte
|| (lvl
== ARM_LPAE_MAX_LEVELS
)))
499 /* If the size matches this level, we're in the right place */
500 if (size
== blk_size
) {
501 __arm_lpae_set_pte(ptep
, 0, &data
->iop
.cfg
);
503 if (!iopte_leaf(pte
, lvl
)) {
504 /* Also flush any partial walks */
505 tlb
->tlb_add_flush(iova
, size
, false, cookie
);
506 tlb
->tlb_sync(cookie
);
507 ptep
= iopte_deref(pte
, data
);
508 __arm_lpae_free_pgtable(data
, lvl
+ 1, ptep
);
510 tlb
->tlb_add_flush(iova
, size
, true, cookie
);
514 } else if (iopte_leaf(pte
, lvl
)) {
516 * Insert a table at the next level to map the old region,
517 * minus the part we want to unmap
519 return arm_lpae_split_blk_unmap(data
, iova
, size
,
520 iopte_prot(pte
), lvl
, ptep
,
524 /* Keep on walkin' */
525 ptep
= iopte_deref(pte
, data
);
526 return __arm_lpae_unmap(data
, iova
, size
, lvl
+ 1, ptep
);
529 static int arm_lpae_unmap(struct io_pgtable_ops
*ops
, unsigned long iova
,
533 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
534 struct io_pgtable
*iop
= &data
->iop
;
535 arm_lpae_iopte
*ptep
= data
->pgd
;
536 int lvl
= ARM_LPAE_START_LVL(data
);
538 unmapped
= __arm_lpae_unmap(data
, iova
, size
, lvl
, ptep
);
540 iop
->cfg
.tlb
->tlb_sync(iop
->cookie
);
545 static phys_addr_t
arm_lpae_iova_to_phys(struct io_pgtable_ops
*ops
,
548 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
549 arm_lpae_iopte pte
, *ptep
= data
->pgd
;
550 int lvl
= ARM_LPAE_START_LVL(data
);
553 /* Valid IOPTE pointer? */
557 /* Grab the IOPTE we're interested in */
558 pte
= *(ptep
+ ARM_LPAE_LVL_IDX(iova
, lvl
, data
));
565 if (iopte_leaf(pte
,lvl
))
566 goto found_translation
;
568 /* Take it to the next level */
569 ptep
= iopte_deref(pte
, data
);
570 } while (++lvl
< ARM_LPAE_MAX_LEVELS
);
572 /* Ran out of page tables to walk */
576 iova
&= ((1 << data
->pg_shift
) - 1);
577 return ((phys_addr_t
)iopte_to_pfn(pte
,data
) << data
->pg_shift
) | iova
;
580 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg
*cfg
)
582 unsigned long granule
;
585 * We need to restrict the supported page sizes to match the
586 * translation regime for a particular granule. Aim to match
587 * the CPU page size if possible, otherwise prefer smaller sizes.
588 * While we're at it, restrict the block sizes to match the
591 if (cfg
->pgsize_bitmap
& PAGE_SIZE
)
593 else if (cfg
->pgsize_bitmap
& ~PAGE_MASK
)
594 granule
= 1UL << __fls(cfg
->pgsize_bitmap
& ~PAGE_MASK
);
595 else if (cfg
->pgsize_bitmap
& PAGE_MASK
)
596 granule
= 1UL << __ffs(cfg
->pgsize_bitmap
& PAGE_MASK
);
602 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
605 cfg
->pgsize_bitmap
&= (SZ_16K
| SZ_32M
);
608 cfg
->pgsize_bitmap
&= (SZ_64K
| SZ_512M
);
611 cfg
->pgsize_bitmap
= 0;
615 static struct arm_lpae_io_pgtable
*
616 arm_lpae_alloc_pgtable(struct io_pgtable_cfg
*cfg
)
618 unsigned long va_bits
, pgd_bits
;
619 struct arm_lpae_io_pgtable
*data
;
621 arm_lpae_restrict_pgsizes(cfg
);
623 if (!(cfg
->pgsize_bitmap
& (SZ_4K
| SZ_16K
| SZ_64K
)))
626 if (cfg
->ias
> ARM_LPAE_MAX_ADDR_BITS
)
629 if (cfg
->oas
> ARM_LPAE_MAX_ADDR_BITS
)
632 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
636 data
->pg_shift
= __ffs(cfg
->pgsize_bitmap
);
637 data
->bits_per_level
= data
->pg_shift
- ilog2(sizeof(arm_lpae_iopte
));
639 va_bits
= cfg
->ias
- data
->pg_shift
;
640 data
->levels
= DIV_ROUND_UP(va_bits
, data
->bits_per_level
);
642 /* Calculate the actual size of our pgd (without concatenation) */
643 pgd_bits
= va_bits
- (data
->bits_per_level
* (data
->levels
- 1));
644 data
->pgd_size
= 1UL << (pgd_bits
+ ilog2(sizeof(arm_lpae_iopte
)));
646 data
->iop
.ops
= (struct io_pgtable_ops
) {
648 .unmap
= arm_lpae_unmap
,
649 .iova_to_phys
= arm_lpae_iova_to_phys
,
655 static struct io_pgtable
*
656 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
659 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
665 reg
= (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
666 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
667 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
669 switch (1 << data
->pg_shift
) {
671 reg
|= ARM_LPAE_TCR_TG0_4K
;
674 reg
|= ARM_LPAE_TCR_TG0_16K
;
677 reg
|= ARM_LPAE_TCR_TG0_64K
;
683 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
686 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
689 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
692 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
695 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
698 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
704 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
706 /* Disable speculative walks through TTBR1 */
707 reg
|= ARM_LPAE_TCR_EPD1
;
708 cfg
->arm_lpae_s1_cfg
.tcr
= reg
;
711 reg
= (ARM_LPAE_MAIR_ATTR_NC
712 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC
)) |
713 (ARM_LPAE_MAIR_ATTR_WBRWA
714 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE
)) |
715 (ARM_LPAE_MAIR_ATTR_DEVICE
716 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV
));
718 cfg
->arm_lpae_s1_cfg
.mair
[0] = reg
;
719 cfg
->arm_lpae_s1_cfg
.mair
[1] = 0;
721 /* Looking good; allocate a pgd */
722 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
726 /* Ensure the empty pgd is visible before any actual TTBR write */
730 cfg
->arm_lpae_s1_cfg
.ttbr
[0] = virt_to_phys(data
->pgd
);
731 cfg
->arm_lpae_s1_cfg
.ttbr
[1] = 0;
739 static struct io_pgtable
*
740 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
743 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
749 * Concatenate PGDs at level 1 if possible in order to reduce
750 * the depth of the stage-2 walk.
752 if (data
->levels
== ARM_LPAE_MAX_LEVELS
) {
753 unsigned long pgd_pages
;
755 pgd_pages
= data
->pgd_size
>> ilog2(sizeof(arm_lpae_iopte
));
756 if (pgd_pages
<= ARM_LPAE_S2_MAX_CONCAT_PAGES
) {
757 data
->pgd_size
= pgd_pages
<< data
->pg_shift
;
763 reg
= ARM_64_LPAE_S2_TCR_RES1
|
764 (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
765 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
766 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
768 sl
= ARM_LPAE_START_LVL(data
);
770 switch (1 << data
->pg_shift
) {
772 reg
|= ARM_LPAE_TCR_TG0_4K
;
773 sl
++; /* SL0 format is different for 4K granule size */
776 reg
|= ARM_LPAE_TCR_TG0_16K
;
779 reg
|= ARM_LPAE_TCR_TG0_64K
;
785 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
788 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
791 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
794 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
797 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
800 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
806 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
807 reg
|= (~sl
& ARM_LPAE_TCR_SL0_MASK
) << ARM_LPAE_TCR_SL0_SHIFT
;
808 cfg
->arm_lpae_s2_cfg
.vtcr
= reg
;
810 /* Allocate pgd pages */
811 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
815 /* Ensure the empty pgd is visible before any actual TTBR write */
819 cfg
->arm_lpae_s2_cfg
.vttbr
= virt_to_phys(data
->pgd
);
827 static struct io_pgtable
*
828 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
830 struct io_pgtable
*iop
;
832 if (cfg
->ias
> 32 || cfg
->oas
> 40)
835 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
836 iop
= arm_64_lpae_alloc_pgtable_s1(cfg
, cookie
);
838 cfg
->arm_lpae_s1_cfg
.tcr
|= ARM_32_LPAE_TCR_EAE
;
839 cfg
->arm_lpae_s1_cfg
.tcr
&= 0xffffffff;
845 static struct io_pgtable
*
846 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
848 struct io_pgtable
*iop
;
850 if (cfg
->ias
> 40 || cfg
->oas
> 40)
853 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
854 iop
= arm_64_lpae_alloc_pgtable_s2(cfg
, cookie
);
856 cfg
->arm_lpae_s2_cfg
.vtcr
&= 0xffffffff;
861 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns
= {
862 .alloc
= arm_64_lpae_alloc_pgtable_s1
,
863 .free
= arm_lpae_free_pgtable
,
866 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns
= {
867 .alloc
= arm_64_lpae_alloc_pgtable_s2
,
868 .free
= arm_lpae_free_pgtable
,
871 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns
= {
872 .alloc
= arm_32_lpae_alloc_pgtable_s1
,
873 .free
= arm_lpae_free_pgtable
,
876 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns
= {
877 .alloc
= arm_32_lpae_alloc_pgtable_s2
,
878 .free
= arm_lpae_free_pgtable
,
881 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
883 static struct io_pgtable_cfg
*cfg_cookie
;
885 static void dummy_tlb_flush_all(void *cookie
)
887 WARN_ON(cookie
!= cfg_cookie
);
890 static void dummy_tlb_add_flush(unsigned long iova
, size_t size
, bool leaf
,
893 WARN_ON(cookie
!= cfg_cookie
);
894 WARN_ON(!(size
& cfg_cookie
->pgsize_bitmap
));
897 static void dummy_tlb_sync(void *cookie
)
899 WARN_ON(cookie
!= cfg_cookie
);
902 static struct iommu_gather_ops dummy_tlb_ops __initdata
= {
903 .tlb_flush_all
= dummy_tlb_flush_all
,
904 .tlb_add_flush
= dummy_tlb_add_flush
,
905 .tlb_sync
= dummy_tlb_sync
,
908 static void __init
arm_lpae_dump_ops(struct io_pgtable_ops
*ops
)
910 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
911 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
913 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
914 cfg
->pgsize_bitmap
, cfg
->ias
);
915 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
916 data
->levels
, data
->pgd_size
, data
->pg_shift
,
917 data
->bits_per_level
, data
->pgd
);
920 #define __FAIL(ops, i) ({ \
921 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
922 arm_lpae_dump_ops(ops); \
923 selftest_running = false; \
927 static int __init
arm_lpae_run_tests(struct io_pgtable_cfg
*cfg
)
929 static const enum io_pgtable_fmt fmts
[] = {
937 struct io_pgtable_ops
*ops
;
939 selftest_running
= true;
941 for (i
= 0; i
< ARRAY_SIZE(fmts
); ++i
) {
943 ops
= alloc_io_pgtable_ops(fmts
[i
], cfg
, cfg
);
945 pr_err("selftest: failed to allocate io pgtable ops\n");
950 * Initial sanity checks.
951 * Empty page tables shouldn't provide any translations.
953 if (ops
->iova_to_phys(ops
, 42))
954 return __FAIL(ops
, i
);
956 if (ops
->iova_to_phys(ops
, SZ_1G
+ 42))
957 return __FAIL(ops
, i
);
959 if (ops
->iova_to_phys(ops
, SZ_2G
+ 42))
960 return __FAIL(ops
, i
);
963 * Distinct mappings of different granule sizes.
966 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
967 while (j
!= BITS_PER_LONG
) {
970 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_READ
|
974 return __FAIL(ops
, i
);
976 /* Overlapping mappings */
977 if (!ops
->map(ops
, iova
, iova
+ size
, size
,
978 IOMMU_READ
| IOMMU_NOEXEC
))
979 return __FAIL(ops
, i
);
981 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
982 return __FAIL(ops
, i
);
986 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
990 size
= 1UL << __ffs(cfg
->pgsize_bitmap
);
991 if (ops
->unmap(ops
, SZ_1G
+ size
, size
) != size
)
992 return __FAIL(ops
, i
);
994 /* Remap of partial unmap */
995 if (ops
->map(ops
, SZ_1G
+ size
, size
, size
, IOMMU_READ
))
996 return __FAIL(ops
, i
);
998 if (ops
->iova_to_phys(ops
, SZ_1G
+ size
+ 42) != (size
+ 42))
999 return __FAIL(ops
, i
);
1003 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
1004 while (j
!= BITS_PER_LONG
) {
1007 if (ops
->unmap(ops
, iova
, size
) != size
)
1008 return __FAIL(ops
, i
);
1010 if (ops
->iova_to_phys(ops
, iova
+ 42))
1011 return __FAIL(ops
, i
);
1013 /* Remap full block */
1014 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_WRITE
))
1015 return __FAIL(ops
, i
);
1017 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
1018 return __FAIL(ops
, i
);
1022 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
1025 free_io_pgtable_ops(ops
);
1028 selftest_running
= false;
1032 static int __init
arm_lpae_do_selftests(void)
1034 static const unsigned long pgsize
[] = {
1035 SZ_4K
| SZ_2M
| SZ_1G
,
1040 static const unsigned int ias
[] = {
1041 32, 36, 40, 42, 44, 48,
1044 int i
, j
, pass
= 0, fail
= 0;
1045 struct io_pgtable_cfg cfg
= {
1046 .tlb
= &dummy_tlb_ops
,
1050 for (i
= 0; i
< ARRAY_SIZE(pgsize
); ++i
) {
1051 for (j
= 0; j
< ARRAY_SIZE(ias
); ++j
) {
1052 cfg
.pgsize_bitmap
= pgsize
[i
];
1054 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1056 if (arm_lpae_run_tests(&cfg
))
1063 pr_info("selftest: completed with %d PASS %d FAIL\n", pass
, fail
);
1064 return fail
? -EFAULT
: 0;
1066 subsys_initcall(arm_lpae_do_selftests
);