3286385488714c5ab3c07bf11d85b1d3ad058421
[deliverable/linux.git] / arch / arm64 / mm / mmu.c
1 /*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29
30 #include <asm/cputype.h>
31 #include <asm/fixmap.h>
32 #include <asm/sections.h>
33 #include <asm/setup.h>
34 #include <asm/sizes.h>
35 #include <asm/tlb.h>
36 #include <asm/memblock.h>
37 #include <asm/mmu_context.h>
38
39 #include "mm.h"
40
41 /*
42 * Empty_zero_page is a special page that is used for zero-initialized data
43 * and COW.
44 */
45 struct page *empty_zero_page;
46 EXPORT_SYMBOL(empty_zero_page);
47
48 struct cachepolicy {
49 const char policy[16];
50 u64 mair;
51 u64 tcr;
52 };
53
54 static struct cachepolicy cache_policies[] __initdata = {
55 {
56 .policy = "uncached",
57 .mair = 0x44, /* inner, outer non-cacheable */
58 .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
59 }, {
60 .policy = "writethrough",
61 .mair = 0xaa, /* inner, outer write-through, read-allocate */
62 .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
63 }, {
64 .policy = "writeback",
65 .mair = 0xee, /* inner, outer write-back, read-allocate */
66 .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
67 }
68 };
69
70 /*
71 * These are useful for identifying cache coherency problems by allowing the
72 * cache or the cache and writebuffer to be turned off. It changes the Normal
73 * memory caching attributes in the MAIR_EL1 register.
74 */
75 static int __init early_cachepolicy(char *p)
76 {
77 int i;
78 u64 tmp;
79
80 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
81 int len = strlen(cache_policies[i].policy);
82
83 if (memcmp(p, cache_policies[i].policy, len) == 0)
84 break;
85 }
86 if (i == ARRAY_SIZE(cache_policies)) {
87 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
88 return 0;
89 }
90
91 flush_cache_all();
92
93 /*
94 * Modify MT_NORMAL attributes in MAIR_EL1.
95 */
96 asm volatile(
97 " mrs %0, mair_el1\n"
98 " bfi %0, %1, %2, #8\n"
99 " msr mair_el1, %0\n"
100 " isb\n"
101 : "=&r" (tmp)
102 : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
103
104 /*
105 * Modify TCR PTW cacheability attributes.
106 */
107 asm volatile(
108 " mrs %0, tcr_el1\n"
109 " bic %0, %0, %2\n"
110 " orr %0, %0, %1\n"
111 " msr tcr_el1, %0\n"
112 " isb\n"
113 : "=&r" (tmp)
114 : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
115
116 flush_cache_all();
117
118 return 0;
119 }
120 early_param("cachepolicy", early_cachepolicy);
121
122 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
123 unsigned long size, pgprot_t vma_prot)
124 {
125 if (!pfn_valid(pfn))
126 return pgprot_noncached(vma_prot);
127 else if (file->f_flags & O_SYNC)
128 return pgprot_writecombine(vma_prot);
129 return vma_prot;
130 }
131 EXPORT_SYMBOL(phys_mem_access_prot);
132
133 static void __init *early_alloc(unsigned long sz)
134 {
135 void *ptr = __va(memblock_alloc(sz, sz));
136 memset(ptr, 0, sz);
137 return ptr;
138 }
139
140 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
141 unsigned long end, unsigned long pfn,
142 pgprot_t prot)
143 {
144 pte_t *pte;
145
146 if (pmd_none(*pmd)) {
147 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
148 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
149 }
150 BUG_ON(pmd_bad(*pmd));
151
152 pte = pte_offset_kernel(pmd, addr);
153 do {
154 set_pte(pte, pfn_pte(pfn, prot));
155 pfn++;
156 } while (pte++, addr += PAGE_SIZE, addr != end);
157 }
158
159 static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
160 unsigned long addr, unsigned long end,
161 phys_addr_t phys, pgprot_t prot)
162 {
163 pmd_t *pmd;
164 unsigned long next;
165
166 /*
167 * Check for initial section mappings in the pgd/pud and remove them.
168 */
169 if (pud_none(*pud) || pud_bad(*pud)) {
170 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
171 pud_populate(mm, pud, pmd);
172 }
173
174 pmd = pmd_offset(pud, addr);
175 do {
176 next = pmd_addr_end(addr, end);
177 /* try section mapping first */
178 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
179 pmd_t old_pmd =*pmd;
180 set_pmd(pmd, __pmd(phys |
181 pgprot_val(mk_sect_prot(prot))));
182 /*
183 * Check for previous table entries created during
184 * boot (__create_page_tables) and flush them.
185 */
186 if (!pmd_none(old_pmd))
187 flush_tlb_all();
188 } else {
189 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
190 prot);
191 }
192 phys += next - addr;
193 } while (pmd++, addr = next, addr != end);
194 }
195
196 static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
197 unsigned long addr, unsigned long end,
198 phys_addr_t phys, pgprot_t prot)
199 {
200 pud_t *pud;
201 unsigned long next;
202
203 if (pgd_none(*pgd)) {
204 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
205 pgd_populate(mm, pgd, pud);
206 }
207 BUG_ON(pgd_bad(*pgd));
208
209 pud = pud_offset(pgd, addr);
210 do {
211 next = pud_addr_end(addr, end);
212
213 /*
214 * For 4K granule only, attempt to put down a 1GB block
215 */
216 if ((PAGE_SHIFT == 12) &&
217 ((addr | next | phys) & ~PUD_MASK) == 0) {
218 pud_t old_pud = *pud;
219 set_pud(pud, __pud(phys |
220 pgprot_val(mk_sect_prot(prot))));
221
222 /*
223 * If we have an old value for a pud, it will
224 * be pointing to a pmd table that we no longer
225 * need (from swapper_pg_dir).
226 *
227 * Look up the old pmd table and free it.
228 */
229 if (!pud_none(old_pud)) {
230 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
231 memblock_free(table, PAGE_SIZE);
232 flush_tlb_all();
233 }
234 } else {
235 alloc_init_pmd(mm, pud, addr, next, phys, prot);
236 }
237 phys += next - addr;
238 } while (pud++, addr = next, addr != end);
239 }
240
241 /*
242 * Create the page directory entries and any necessary page tables for the
243 * mapping specified by 'md'.
244 */
245 static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
246 phys_addr_t phys, unsigned long virt,
247 phys_addr_t size, pgprot_t prot)
248 {
249 unsigned long addr, length, end, next;
250
251 addr = virt & PAGE_MASK;
252 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
253
254 end = addr + length;
255 do {
256 next = pgd_addr_end(addr, end);
257 alloc_init_pud(mm, pgd, addr, next, phys, prot);
258 phys += next - addr;
259 } while (pgd++, addr = next, addr != end);
260 }
261
262 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
263 phys_addr_t size)
264 {
265 if (virt < VMALLOC_START) {
266 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
267 &phys, virt);
268 return;
269 }
270 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
271 size, PAGE_KERNEL_EXEC);
272 }
273
274 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
275 unsigned long virt, phys_addr_t size,
276 pgprot_t prot)
277 {
278 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot);
279 }
280
281 static void __init map_mem(void)
282 {
283 struct memblock_region *reg;
284 phys_addr_t limit;
285
286 /*
287 * Temporarily limit the memblock range. We need to do this as
288 * create_mapping requires puds, pmds and ptes to be allocated from
289 * memory addressable from the initial direct kernel mapping.
290 *
291 * The initial direct kernel mapping, located at swapper_pg_dir, gives
292 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
293 * PHYS_OFFSET (which must be aligned to 2MB as per
294 * Documentation/arm64/booting.txt).
295 */
296 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
297 limit = PHYS_OFFSET + PMD_SIZE;
298 else
299 limit = PHYS_OFFSET + PUD_SIZE;
300 memblock_set_current_limit(limit);
301
302 /* map all the memory banks */
303 for_each_memblock(memory, reg) {
304 phys_addr_t start = reg->base;
305 phys_addr_t end = start + reg->size;
306
307 if (start >= end)
308 break;
309
310 #ifndef CONFIG_ARM64_64K_PAGES
311 /*
312 * For the first memory bank align the start address and
313 * current memblock limit to prevent create_mapping() from
314 * allocating pte page tables from unmapped memory.
315 * When 64K pages are enabled, the pte page table for the
316 * first PGDIR_SIZE is already present in swapper_pg_dir.
317 */
318 if (start < limit)
319 start = ALIGN(start, PMD_SIZE);
320 if (end < limit) {
321 limit = end & PMD_MASK;
322 memblock_set_current_limit(limit);
323 }
324 #endif
325
326 create_mapping(start, __phys_to_virt(start), end - start);
327 }
328
329 /* Limit no longer required. */
330 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
331 }
332
333 /*
334 * paging_init() sets up the page tables, initialises the zone memory
335 * maps and sets up the zero page.
336 */
337 void __init paging_init(void)
338 {
339 void *zero_page;
340
341 map_mem();
342
343 /*
344 * Finally flush the caches and tlb to ensure that we're in a
345 * consistent state.
346 */
347 flush_cache_all();
348 flush_tlb_all();
349
350 /* allocate the zero page. */
351 zero_page = early_alloc(PAGE_SIZE);
352
353 bootmem_init();
354
355 empty_zero_page = virt_to_page(zero_page);
356
357 /*
358 * TTBR0 is only used for the identity mapping at this stage. Make it
359 * point to zero page to avoid speculatively fetching new entries.
360 */
361 cpu_set_reserved_ttbr0();
362 flush_tlb_all();
363 }
364
365 /*
366 * Enable the identity mapping to allow the MMU disabling.
367 */
368 void setup_mm_for_reboot(void)
369 {
370 cpu_switch_mm(idmap_pg_dir, &init_mm);
371 flush_tlb_all();
372 }
373
374 /*
375 * Check whether a kernel address is valid (derived from arch/x86/).
376 */
377 int kern_addr_valid(unsigned long addr)
378 {
379 pgd_t *pgd;
380 pud_t *pud;
381 pmd_t *pmd;
382 pte_t *pte;
383
384 if ((((long)addr) >> VA_BITS) != -1UL)
385 return 0;
386
387 pgd = pgd_offset_k(addr);
388 if (pgd_none(*pgd))
389 return 0;
390
391 pud = pud_offset(pgd, addr);
392 if (pud_none(*pud))
393 return 0;
394
395 if (pud_sect(*pud))
396 return pfn_valid(pud_pfn(*pud));
397
398 pmd = pmd_offset(pud, addr);
399 if (pmd_none(*pmd))
400 return 0;
401
402 if (pmd_sect(*pmd))
403 return pfn_valid(pmd_pfn(*pmd));
404
405 pte = pte_offset_kernel(pmd, addr);
406 if (pte_none(*pte))
407 return 0;
408
409 return pfn_valid(pte_pfn(*pte));
410 }
411 #ifdef CONFIG_SPARSEMEM_VMEMMAP
412 #ifdef CONFIG_ARM64_64K_PAGES
413 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
414 {
415 return vmemmap_populate_basepages(start, end, node);
416 }
417 #else /* !CONFIG_ARM64_64K_PAGES */
418 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
419 {
420 unsigned long addr = start;
421 unsigned long next;
422 pgd_t *pgd;
423 pud_t *pud;
424 pmd_t *pmd;
425
426 do {
427 next = pmd_addr_end(addr, end);
428
429 pgd = vmemmap_pgd_populate(addr, node);
430 if (!pgd)
431 return -ENOMEM;
432
433 pud = vmemmap_pud_populate(pgd, addr, node);
434 if (!pud)
435 return -ENOMEM;
436
437 pmd = pmd_offset(pud, addr);
438 if (pmd_none(*pmd)) {
439 void *p = NULL;
440
441 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
442 if (!p)
443 return -ENOMEM;
444
445 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
446 } else
447 vmemmap_verify((pte_t *)pmd, node, addr, next);
448 } while (addr = next, addr != end);
449
450 return 0;
451 }
452 #endif /* CONFIG_ARM64_64K_PAGES */
453 void vmemmap_free(unsigned long start, unsigned long end)
454 {
455 }
456 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
457
458 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
459 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
460 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
461 #endif
462 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
463 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
464 #endif
465
466 static inline pud_t * fixmap_pud(unsigned long addr)
467 {
468 pgd_t *pgd = pgd_offset_k(addr);
469
470 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
471
472 return pud_offset(pgd, addr);
473 }
474
475 static inline pmd_t * fixmap_pmd(unsigned long addr)
476 {
477 pud_t *pud = fixmap_pud(addr);
478
479 BUG_ON(pud_none(*pud) || pud_bad(*pud));
480
481 return pmd_offset(pud, addr);
482 }
483
484 static inline pte_t * fixmap_pte(unsigned long addr)
485 {
486 pmd_t *pmd = fixmap_pmd(addr);
487
488 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
489
490 return pte_offset_kernel(pmd, addr);
491 }
492
493 void __init early_fixmap_init(void)
494 {
495 pgd_t *pgd;
496 pud_t *pud;
497 pmd_t *pmd;
498 unsigned long addr = FIXADDR_START;
499
500 pgd = pgd_offset_k(addr);
501 pgd_populate(&init_mm, pgd, bm_pud);
502 pud = pud_offset(pgd, addr);
503 pud_populate(&init_mm, pud, bm_pmd);
504 pmd = pmd_offset(pud, addr);
505 pmd_populate_kernel(&init_mm, pmd, bm_pte);
506
507 /*
508 * The boot-ioremap range spans multiple pmds, for which
509 * we are not preparted:
510 */
511 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
512 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
513
514 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
515 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
516 WARN_ON(1);
517 pr_warn("pmd %p != %p, %p\n",
518 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
519 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
520 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
521 fix_to_virt(FIX_BTMAP_BEGIN));
522 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
523 fix_to_virt(FIX_BTMAP_END));
524
525 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
526 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
527 }
528 }
529
530 void __set_fixmap(enum fixed_addresses idx,
531 phys_addr_t phys, pgprot_t flags)
532 {
533 unsigned long addr = __fix_to_virt(idx);
534 pte_t *pte;
535
536 if (idx >= __end_of_fixed_addresses) {
537 BUG();
538 return;
539 }
540
541 pte = fixmap_pte(addr);
542
543 if (pgprot_val(flags)) {
544 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
545 } else {
546 pte_clear(&init_mm, addr, pte);
547 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
548 }
549 }
This page took 0.072906 seconds and 4 git commands to generate.