arm64: move kernel image to base of vmalloc area
[deliverable/linux.git] / arch / arm64 / mm / mmu.c
1 /*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
28 #include <linux/fs.h>
29 #include <linux/io.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
32
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kasan.h>
37 #include <asm/kernel-pgtable.h>
38 #include <asm/sections.h>
39 #include <asm/setup.h>
40 #include <asm/sizes.h>
41 #include <asm/tlb.h>
42 #include <asm/memblock.h>
43 #include <asm/mmu_context.h>
44
45 #include "mm.h"
46
47 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
48
49 /*
50 * Empty_zero_page is a special page that is used for zero-initialized data
51 * and COW.
52 */
53 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
54 EXPORT_SYMBOL(empty_zero_page);
55
56 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
57 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
58 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
59
60 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
61 unsigned long size, pgprot_t vma_prot)
62 {
63 if (!pfn_valid(pfn))
64 return pgprot_noncached(vma_prot);
65 else if (file->f_flags & O_SYNC)
66 return pgprot_writecombine(vma_prot);
67 return vma_prot;
68 }
69 EXPORT_SYMBOL(phys_mem_access_prot);
70
71 static phys_addr_t __init early_pgtable_alloc(void)
72 {
73 phys_addr_t phys;
74 void *ptr;
75
76 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
77 BUG_ON(!phys);
78
79 /*
80 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
81 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
82 * any level of table.
83 */
84 ptr = pte_set_fixmap(phys);
85
86 memset(ptr, 0, PAGE_SIZE);
87
88 /*
89 * Implicit barriers also ensure the zeroed page is visible to the page
90 * table walker
91 */
92 pte_clear_fixmap();
93
94 return phys;
95 }
96
97 /*
98 * remap a PMD into pages
99 */
100 static void split_pmd(pmd_t *pmd, pte_t *pte)
101 {
102 unsigned long pfn = pmd_pfn(*pmd);
103 int i = 0;
104
105 do {
106 /*
107 * Need to have the least restrictive permissions available
108 * permissions will be fixed up later
109 */
110 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
111 pfn++;
112 } while (pte++, i++, i < PTRS_PER_PTE);
113 }
114
115 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
116 unsigned long end, unsigned long pfn,
117 pgprot_t prot,
118 phys_addr_t (*pgtable_alloc)(void))
119 {
120 pte_t *pte;
121
122 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
123 phys_addr_t pte_phys;
124 BUG_ON(!pgtable_alloc);
125 pte_phys = pgtable_alloc();
126 pte = pte_set_fixmap(pte_phys);
127 if (pmd_sect(*pmd))
128 split_pmd(pmd, pte);
129 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
130 flush_tlb_all();
131 pte_clear_fixmap();
132 }
133 BUG_ON(pmd_bad(*pmd));
134
135 pte = pte_set_fixmap_offset(pmd, addr);
136 do {
137 set_pte(pte, pfn_pte(pfn, prot));
138 pfn++;
139 } while (pte++, addr += PAGE_SIZE, addr != end);
140
141 pte_clear_fixmap();
142 }
143
144 static void split_pud(pud_t *old_pud, pmd_t *pmd)
145 {
146 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
147 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
148 int i = 0;
149
150 do {
151 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
152 addr += PMD_SIZE;
153 } while (pmd++, i++, i < PTRS_PER_PMD);
154 }
155
156 #ifdef CONFIG_DEBUG_PAGEALLOC
157 static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
158 {
159
160 /*
161 * If debug_page_alloc is enabled we must map the linear map
162 * using pages. However, other mappings created by
163 * create_mapping_noalloc must use sections in some cases. Allow
164 * sections to be used in those cases, where no pgtable_alloc
165 * function is provided.
166 */
167 return !pgtable_alloc || !debug_pagealloc_enabled();
168 }
169 #else
170 static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
171 {
172 return true;
173 }
174 #endif
175
176 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
177 phys_addr_t phys, pgprot_t prot,
178 phys_addr_t (*pgtable_alloc)(void))
179 {
180 pmd_t *pmd;
181 unsigned long next;
182
183 /*
184 * Check for initial section mappings in the pgd/pud and remove them.
185 */
186 if (pud_none(*pud) || pud_sect(*pud)) {
187 phys_addr_t pmd_phys;
188 BUG_ON(!pgtable_alloc);
189 pmd_phys = pgtable_alloc();
190 pmd = pmd_set_fixmap(pmd_phys);
191 if (pud_sect(*pud)) {
192 /*
193 * need to have the 1G of mappings continue to be
194 * present
195 */
196 split_pud(pud, pmd);
197 }
198 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
199 flush_tlb_all();
200 pmd_clear_fixmap();
201 }
202 BUG_ON(pud_bad(*pud));
203
204 pmd = pmd_set_fixmap_offset(pud, addr);
205 do {
206 next = pmd_addr_end(addr, end);
207 /* try section mapping first */
208 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
209 block_mappings_allowed(pgtable_alloc)) {
210 pmd_t old_pmd =*pmd;
211 set_pmd(pmd, __pmd(phys |
212 pgprot_val(mk_sect_prot(prot))));
213 /*
214 * Check for previous table entries created during
215 * boot (__create_page_tables) and flush them.
216 */
217 if (!pmd_none(old_pmd)) {
218 flush_tlb_all();
219 if (pmd_table(old_pmd)) {
220 phys_addr_t table = pmd_page_paddr(old_pmd);
221 if (!WARN_ON_ONCE(slab_is_available()))
222 memblock_free(table, PAGE_SIZE);
223 }
224 }
225 } else {
226 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
227 prot, pgtable_alloc);
228 }
229 phys += next - addr;
230 } while (pmd++, addr = next, addr != end);
231
232 pmd_clear_fixmap();
233 }
234
235 static inline bool use_1G_block(unsigned long addr, unsigned long next,
236 unsigned long phys)
237 {
238 if (PAGE_SHIFT != 12)
239 return false;
240
241 if (((addr | next | phys) & ~PUD_MASK) != 0)
242 return false;
243
244 return true;
245 }
246
247 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
248 phys_addr_t phys, pgprot_t prot,
249 phys_addr_t (*pgtable_alloc)(void))
250 {
251 pud_t *pud;
252 unsigned long next;
253
254 if (pgd_none(*pgd)) {
255 phys_addr_t pud_phys;
256 BUG_ON(!pgtable_alloc);
257 pud_phys = pgtable_alloc();
258 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
259 }
260 BUG_ON(pgd_bad(*pgd));
261
262 pud = pud_set_fixmap_offset(pgd, addr);
263 do {
264 next = pud_addr_end(addr, end);
265
266 /*
267 * For 4K granule only, attempt to put down a 1GB block
268 */
269 if (use_1G_block(addr, next, phys) &&
270 block_mappings_allowed(pgtable_alloc)) {
271 pud_t old_pud = *pud;
272 set_pud(pud, __pud(phys |
273 pgprot_val(mk_sect_prot(prot))));
274
275 /*
276 * If we have an old value for a pud, it will
277 * be pointing to a pmd table that we no longer
278 * need (from swapper_pg_dir).
279 *
280 * Look up the old pmd table and free it.
281 */
282 if (!pud_none(old_pud)) {
283 flush_tlb_all();
284 if (pud_table(old_pud)) {
285 phys_addr_t table = pud_page_paddr(old_pud);
286 if (!WARN_ON_ONCE(slab_is_available()))
287 memblock_free(table, PAGE_SIZE);
288 }
289 }
290 } else {
291 alloc_init_pmd(pud, addr, next, phys, prot,
292 pgtable_alloc);
293 }
294 phys += next - addr;
295 } while (pud++, addr = next, addr != end);
296
297 pud_clear_fixmap();
298 }
299
300 /*
301 * Create the page directory entries and any necessary page tables for the
302 * mapping specified by 'md'.
303 */
304 static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
305 phys_addr_t size, pgprot_t prot,
306 phys_addr_t (*pgtable_alloc)(void))
307 {
308 unsigned long addr, length, end, next;
309
310 /*
311 * If the virtual and physical address don't have the same offset
312 * within a page, we cannot map the region as the caller expects.
313 */
314 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
315 return;
316
317 phys &= PAGE_MASK;
318 addr = virt & PAGE_MASK;
319 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
320
321 end = addr + length;
322 do {
323 next = pgd_addr_end(addr, end);
324 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
325 phys += next - addr;
326 } while (pgd++, addr = next, addr != end);
327 }
328
329 static phys_addr_t late_pgtable_alloc(void)
330 {
331 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
332 BUG_ON(!ptr);
333
334 /* Ensure the zeroed page is visible to the page table walker */
335 dsb(ishst);
336 return __pa(ptr);
337 }
338
339 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
340 unsigned long virt, phys_addr_t size,
341 pgprot_t prot,
342 phys_addr_t (*alloc)(void))
343 {
344 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
345 }
346
347 /*
348 * This function can only be used to modify existing table entries,
349 * without allocating new levels of table. Note that this permits the
350 * creation of new section or page entries.
351 */
352 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
353 phys_addr_t size, pgprot_t prot)
354 {
355 if (virt < VMALLOC_START) {
356 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
357 &phys, virt);
358 return;
359 }
360 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
361 NULL);
362 }
363
364 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
365 unsigned long virt, phys_addr_t size,
366 pgprot_t prot)
367 {
368 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
369 late_pgtable_alloc);
370 }
371
372 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
373 phys_addr_t size, pgprot_t prot)
374 {
375 if (virt < VMALLOC_START) {
376 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
377 &phys, virt);
378 return;
379 }
380
381 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
382 late_pgtable_alloc);
383 }
384
385 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
386 {
387 unsigned long kernel_start = __pa(_stext);
388 unsigned long kernel_end = __pa(_etext);
389
390 /*
391 * Take care not to create a writable alias for the
392 * read-only text and rodata sections of the kernel image.
393 */
394
395 /* No overlap with the kernel text */
396 if (end < kernel_start || start >= kernel_end) {
397 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
398 end - start, PAGE_KERNEL,
399 early_pgtable_alloc);
400 return;
401 }
402
403 /*
404 * This block overlaps the kernel text mapping.
405 * Map the portion(s) which don't overlap.
406 */
407 if (start < kernel_start)
408 __create_pgd_mapping(pgd, start,
409 __phys_to_virt(start),
410 kernel_start - start, PAGE_KERNEL,
411 early_pgtable_alloc);
412 if (kernel_end < end)
413 __create_pgd_mapping(pgd, kernel_end,
414 __phys_to_virt(kernel_end),
415 end - kernel_end, PAGE_KERNEL,
416 early_pgtable_alloc);
417
418 /*
419 * Map the linear alias of the [_stext, _etext) interval as
420 * read-only/non-executable. This makes the contents of the
421 * region accessible to subsystems such as hibernate, but
422 * protects it from inadvertent modification or execution.
423 */
424 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
425 kernel_end - kernel_start, PAGE_KERNEL_RO,
426 early_pgtable_alloc);
427 }
428
429 static void __init map_mem(pgd_t *pgd)
430 {
431 struct memblock_region *reg;
432
433 /* map all the memory banks */
434 for_each_memblock(memory, reg) {
435 phys_addr_t start = reg->base;
436 phys_addr_t end = start + reg->size;
437
438 if (start >= end)
439 break;
440 if (memblock_is_nomap(reg))
441 continue;
442
443 __map_memblock(pgd, start, end);
444 }
445 }
446
447 void mark_rodata_ro(void)
448 {
449 if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
450 return;
451
452 create_mapping_late(__pa(_stext), (unsigned long)_stext,
453 (unsigned long)_etext - (unsigned long)_stext,
454 PAGE_KERNEL_ROX);
455 }
456
457 void fixup_init(void)
458 {
459 /*
460 * Unmap the __init region but leave the VM area in place. This
461 * prevents the region from being reused for kernel modules, which
462 * is not supported by kallsyms.
463 */
464 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
465 }
466
467 static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
468 pgprot_t prot, struct vm_struct *vma)
469 {
470 phys_addr_t pa_start = __pa(va_start);
471 unsigned long size = va_end - va_start;
472
473 BUG_ON(!PAGE_ALIGNED(pa_start));
474 BUG_ON(!PAGE_ALIGNED(size));
475
476 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
477 early_pgtable_alloc);
478
479 vma->addr = va_start;
480 vma->phys_addr = pa_start;
481 vma->size = size;
482 vma->flags = VM_MAP;
483 vma->caller = __builtin_return_address(0);
484
485 vm_area_add_early(vma);
486 }
487
488 /*
489 * Create fine-grained mappings for the kernel.
490 */
491 static void __init map_kernel(pgd_t *pgd)
492 {
493 static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
494
495 map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
496 map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
497 &vmlinux_init);
498 map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
499
500 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
501 /*
502 * The fixmap falls in a separate pgd to the kernel, and doesn't
503 * live in the carveout for the swapper_pg_dir. We can simply
504 * re-use the existing dir for the fixmap.
505 */
506 set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
507 *pgd_offset_k(FIXADDR_START));
508 } else if (CONFIG_PGTABLE_LEVELS > 3) {
509 /*
510 * The fixmap shares its top level pgd entry with the kernel
511 * mapping. This can really only occur when we are running
512 * with 16k/4 levels, so we can simply reuse the pud level
513 * entry instead.
514 */
515 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
516 set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
517 __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
518 pud_clear_fixmap();
519 } else {
520 BUG();
521 }
522
523 kasan_copy_shadow(pgd);
524 }
525
526 /*
527 * paging_init() sets up the page tables, initialises the zone memory
528 * maps and sets up the zero page.
529 */
530 void __init paging_init(void)
531 {
532 phys_addr_t pgd_phys = early_pgtable_alloc();
533 pgd_t *pgd = pgd_set_fixmap(pgd_phys);
534
535 map_kernel(pgd);
536 map_mem(pgd);
537
538 /*
539 * We want to reuse the original swapper_pg_dir so we don't have to
540 * communicate the new address to non-coherent secondaries in
541 * secondary_entry, and so cpu_switch_mm can generate the address with
542 * adrp+add rather than a load from some global variable.
543 *
544 * To do this we need to go via a temporary pgd.
545 */
546 cpu_replace_ttbr1(__va(pgd_phys));
547 memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
548 cpu_replace_ttbr1(swapper_pg_dir);
549
550 pgd_clear_fixmap();
551 memblock_free(pgd_phys, PAGE_SIZE);
552
553 /*
554 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
555 * allocated with it.
556 */
557 memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
558 SWAPPER_DIR_SIZE - PAGE_SIZE);
559
560 bootmem_init();
561 }
562
563 /*
564 * Check whether a kernel address is valid (derived from arch/x86/).
565 */
566 int kern_addr_valid(unsigned long addr)
567 {
568 pgd_t *pgd;
569 pud_t *pud;
570 pmd_t *pmd;
571 pte_t *pte;
572
573 if ((((long)addr) >> VA_BITS) != -1UL)
574 return 0;
575
576 pgd = pgd_offset_k(addr);
577 if (pgd_none(*pgd))
578 return 0;
579
580 pud = pud_offset(pgd, addr);
581 if (pud_none(*pud))
582 return 0;
583
584 if (pud_sect(*pud))
585 return pfn_valid(pud_pfn(*pud));
586
587 pmd = pmd_offset(pud, addr);
588 if (pmd_none(*pmd))
589 return 0;
590
591 if (pmd_sect(*pmd))
592 return pfn_valid(pmd_pfn(*pmd));
593
594 pte = pte_offset_kernel(pmd, addr);
595 if (pte_none(*pte))
596 return 0;
597
598 return pfn_valid(pte_pfn(*pte));
599 }
600 #ifdef CONFIG_SPARSEMEM_VMEMMAP
601 #if !ARM64_SWAPPER_USES_SECTION_MAPS
602 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
603 {
604 return vmemmap_populate_basepages(start, end, node);
605 }
606 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
607 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
608 {
609 unsigned long addr = start;
610 unsigned long next;
611 pgd_t *pgd;
612 pud_t *pud;
613 pmd_t *pmd;
614
615 do {
616 next = pmd_addr_end(addr, end);
617
618 pgd = vmemmap_pgd_populate(addr, node);
619 if (!pgd)
620 return -ENOMEM;
621
622 pud = vmemmap_pud_populate(pgd, addr, node);
623 if (!pud)
624 return -ENOMEM;
625
626 pmd = pmd_offset(pud, addr);
627 if (pmd_none(*pmd)) {
628 void *p = NULL;
629
630 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
631 if (!p)
632 return -ENOMEM;
633
634 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
635 } else
636 vmemmap_verify((pte_t *)pmd, node, addr, next);
637 } while (addr = next, addr != end);
638
639 return 0;
640 }
641 #endif /* CONFIG_ARM64_64K_PAGES */
642 void vmemmap_free(unsigned long start, unsigned long end)
643 {
644 }
645 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
646
647 static inline pud_t * fixmap_pud(unsigned long addr)
648 {
649 pgd_t *pgd = pgd_offset_k(addr);
650
651 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
652
653 return pud_offset_kimg(pgd, addr);
654 }
655
656 static inline pmd_t * fixmap_pmd(unsigned long addr)
657 {
658 pud_t *pud = fixmap_pud(addr);
659
660 BUG_ON(pud_none(*pud) || pud_bad(*pud));
661
662 return pmd_offset_kimg(pud, addr);
663 }
664
665 static inline pte_t * fixmap_pte(unsigned long addr)
666 {
667 return &bm_pte[pte_index(addr)];
668 }
669
670 void __init early_fixmap_init(void)
671 {
672 pgd_t *pgd;
673 pud_t *pud;
674 pmd_t *pmd;
675 unsigned long addr = FIXADDR_START;
676
677 pgd = pgd_offset_k(addr);
678 if (CONFIG_PGTABLE_LEVELS > 3 && !pgd_none(*pgd)) {
679 /*
680 * We only end up here if the kernel mapping and the fixmap
681 * share the top level pgd entry, which should only happen on
682 * 16k/4 levels configurations.
683 */
684 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
685 pud = pud_offset_kimg(pgd, addr);
686 } else {
687 pgd_populate(&init_mm, pgd, bm_pud);
688 pud = fixmap_pud(addr);
689 }
690 pud_populate(&init_mm, pud, bm_pmd);
691 pmd = fixmap_pmd(addr);
692 pmd_populate_kernel(&init_mm, pmd, bm_pte);
693
694 /*
695 * The boot-ioremap range spans multiple pmds, for which
696 * we are not prepared:
697 */
698 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
699 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
700
701 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
702 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
703 WARN_ON(1);
704 pr_warn("pmd %p != %p, %p\n",
705 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
706 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
707 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
708 fix_to_virt(FIX_BTMAP_BEGIN));
709 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
710 fix_to_virt(FIX_BTMAP_END));
711
712 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
713 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
714 }
715 }
716
717 void __set_fixmap(enum fixed_addresses idx,
718 phys_addr_t phys, pgprot_t flags)
719 {
720 unsigned long addr = __fix_to_virt(idx);
721 pte_t *pte;
722
723 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
724
725 pte = fixmap_pte(addr);
726
727 if (pgprot_val(flags)) {
728 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
729 } else {
730 pte_clear(&init_mm, addr, pte);
731 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
732 }
733 }
734
735 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
736 {
737 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
738 pgprot_t prot = PAGE_KERNEL_RO;
739 int size, offset;
740 void *dt_virt;
741
742 /*
743 * Check whether the physical FDT address is set and meets the minimum
744 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
745 * at least 8 bytes so that we can always access the size field of the
746 * FDT header after mapping the first chunk, double check here if that
747 * is indeed the case.
748 */
749 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
750 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
751 return NULL;
752
753 /*
754 * Make sure that the FDT region can be mapped without the need to
755 * allocate additional translation table pages, so that it is safe
756 * to call create_mapping_noalloc() this early.
757 *
758 * On 64k pages, the FDT will be mapped using PTEs, so we need to
759 * be in the same PMD as the rest of the fixmap.
760 * On 4k pages, we'll use section mappings for the FDT so we only
761 * have to be in the same PUD.
762 */
763 BUILD_BUG_ON(dt_virt_base % SZ_2M);
764
765 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
766 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
767
768 offset = dt_phys % SWAPPER_BLOCK_SIZE;
769 dt_virt = (void *)dt_virt_base + offset;
770
771 /* map the first chunk so we can read the size from the header */
772 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
773 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
774
775 if (fdt_check_header(dt_virt) != 0)
776 return NULL;
777
778 size = fdt_totalsize(dt_virt);
779 if (size > MAX_FDT_SIZE)
780 return NULL;
781
782 if (offset + size > SWAPPER_BLOCK_SIZE)
783 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
784 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
785
786 memblock_reserve(dt_phys, size);
787
788 return dt_virt;
789 }
790
791 int __init arch_ioremap_pud_supported(void)
792 {
793 /* only 4k granule supports level 1 block mappings */
794 return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
795 }
796
797 int __init arch_ioremap_pmd_supported(void)
798 {
799 return 1;
800 }
801
802 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
803 {
804 BUG_ON(phys & ~PUD_MASK);
805 set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
806 return 1;
807 }
808
809 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
810 {
811 BUG_ON(phys & ~PMD_MASK);
812 set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
813 return 1;
814 }
815
816 int pud_clear_huge(pud_t *pud)
817 {
818 if (!pud_sect(*pud))
819 return 0;
820 pud_clear(pud);
821 return 1;
822 }
823
824 int pmd_clear_huge(pmd_t *pmd)
825 {
826 if (!pmd_sect(*pmd))
827 return 0;
828 pmd_clear(pmd);
829 return 1;
830 }
This page took 0.065339 seconds and 5 git commands to generate.