arm64: remove broken cachepolicy code
[deliverable/linux.git] / arch / arm64 / mm / mmu.c
CommitLineData
c1cc1552
CM
1/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/mman.h>
25#include <linux/nodemask.h>
26#include <linux/memblock.h>
27#include <linux/fs.h>
2475ff9d 28#include <linux/io.h>
c1cc1552
CM
29
30#include <asm/cputype.h>
af86e597 31#include <asm/fixmap.h>
c1cc1552
CM
32#include <asm/sections.h>
33#include <asm/setup.h>
34#include <asm/sizes.h>
35#include <asm/tlb.h>
c79b954b 36#include <asm/memblock.h>
c1cc1552
CM
37#include <asm/mmu_context.h>
38
39#include "mm.h"
40
41/*
42 * Empty_zero_page is a special page that is used for zero-initialized data
43 * and COW.
44 */
45struct page *empty_zero_page;
46EXPORT_SYMBOL(empty_zero_page);
47
c1cc1552
CM
48pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
49 unsigned long size, pgprot_t vma_prot)
50{
51 if (!pfn_valid(pfn))
52 return pgprot_noncached(vma_prot);
53 else if (file->f_flags & O_SYNC)
54 return pgprot_writecombine(vma_prot);
55 return vma_prot;
56}
57EXPORT_SYMBOL(phys_mem_access_prot);
58
59static void __init *early_alloc(unsigned long sz)
60{
61 void *ptr = __va(memblock_alloc(sz, sz));
62 memset(ptr, 0, sz);
63 return ptr;
64}
65
66static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
d7ecbddf
MS
67 unsigned long end, unsigned long pfn,
68 pgprot_t prot)
c1cc1552
CM
69{
70 pte_t *pte;
71
72 if (pmd_none(*pmd)) {
73 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
74 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
75 }
76 BUG_ON(pmd_bad(*pmd));
77
78 pte = pte_offset_kernel(pmd, addr);
79 do {
d7ecbddf 80 set_pte(pte, pfn_pte(pfn, prot));
c1cc1552
CM
81 pfn++;
82 } while (pte++, addr += PAGE_SIZE, addr != end);
83}
84
e1e1fdda
AB
85static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
86 unsigned long addr, unsigned long end,
8ce837ce 87 phys_addr_t phys, pgprot_t prot)
c1cc1552
CM
88{
89 pmd_t *pmd;
90 unsigned long next;
91
92 /*
93 * Check for initial section mappings in the pgd/pud and remove them.
94 */
95 if (pud_none(*pud) || pud_bad(*pud)) {
96 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
e1e1fdda 97 pud_populate(mm, pud, pmd);
c1cc1552
CM
98 }
99
100 pmd = pmd_offset(pud, addr);
101 do {
102 next = pmd_addr_end(addr, end);
103 /* try section mapping first */
a55f9929
CM
104 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
105 pmd_t old_pmd =*pmd;
8ce837ce
AB
106 set_pmd(pmd, __pmd(phys |
107 pgprot_val(mk_sect_prot(prot))));
a55f9929
CM
108 /*
109 * Check for previous table entries created during
110 * boot (__create_page_tables) and flush them.
111 */
112 if (!pmd_none(old_pmd))
113 flush_tlb_all();
114 } else {
d7ecbddf 115 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
8ce837ce 116 prot);
a55f9929 117 }
c1cc1552
CM
118 phys += next - addr;
119 } while (pmd++, addr = next, addr != end);
120}
121
e1e1fdda
AB
122static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
123 unsigned long addr, unsigned long end,
8ce837ce 124 phys_addr_t phys, pgprot_t prot)
c1cc1552 125{
c79b954b 126 pud_t *pud;
c1cc1552
CM
127 unsigned long next;
128
c79b954b
JL
129 if (pgd_none(*pgd)) {
130 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
e1e1fdda 131 pgd_populate(mm, pgd, pud);
c79b954b
JL
132 }
133 BUG_ON(pgd_bad(*pgd));
134
135 pud = pud_offset(pgd, addr);
c1cc1552
CM
136 do {
137 next = pud_addr_end(addr, end);
206a2a73
SC
138
139 /*
140 * For 4K granule only, attempt to put down a 1GB block
141 */
8ce837ce 142 if ((PAGE_SHIFT == 12) &&
206a2a73
SC
143 ((addr | next | phys) & ~PUD_MASK) == 0) {
144 pud_t old_pud = *pud;
8ce837ce
AB
145 set_pud(pud, __pud(phys |
146 pgprot_val(mk_sect_prot(prot))));
206a2a73
SC
147
148 /*
149 * If we have an old value for a pud, it will
150 * be pointing to a pmd table that we no longer
151 * need (from swapper_pg_dir).
152 *
153 * Look up the old pmd table and free it.
154 */
155 if (!pud_none(old_pud)) {
156 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
157 memblock_free(table, PAGE_SIZE);
158 flush_tlb_all();
159 }
160 } else {
8ce837ce 161 alloc_init_pmd(mm, pud, addr, next, phys, prot);
206a2a73 162 }
c1cc1552
CM
163 phys += next - addr;
164 } while (pud++, addr = next, addr != end);
165}
166
167/*
168 * Create the page directory entries and any necessary page tables for the
169 * mapping specified by 'md'.
170 */
e1e1fdda
AB
171static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
172 phys_addr_t phys, unsigned long virt,
8ce837ce 173 phys_addr_t size, pgprot_t prot)
c1cc1552
CM
174{
175 unsigned long addr, length, end, next;
c1cc1552
CM
176
177 addr = virt & PAGE_MASK;
178 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
179
c1cc1552
CM
180 end = addr + length;
181 do {
182 next = pgd_addr_end(addr, end);
8ce837ce 183 alloc_init_pud(mm, pgd, addr, next, phys, prot);
c1cc1552
CM
184 phys += next - addr;
185 } while (pgd++, addr = next, addr != end);
186}
187
d7ecbddf
MS
188static void __init create_mapping(phys_addr_t phys, unsigned long virt,
189 phys_addr_t size)
190{
191 if (virt < VMALLOC_START) {
192 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
193 &phys, virt);
194 return;
195 }
e1e1fdda 196 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
8ce837ce 197 size, PAGE_KERNEL_EXEC);
d7ecbddf
MS
198}
199
8ce837ce
AB
200void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
201 unsigned long virt, phys_addr_t size,
202 pgprot_t prot)
203{
204 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot);
d7ecbddf
MS
205}
206
c1cc1552
CM
207static void __init map_mem(void)
208{
209 struct memblock_region *reg;
e25208f7 210 phys_addr_t limit;
c1cc1552 211
f6bc87c3
SC
212 /*
213 * Temporarily limit the memblock range. We need to do this as
214 * create_mapping requires puds, pmds and ptes to be allocated from
215 * memory addressable from the initial direct kernel mapping.
216 *
3dec0fe4
CM
217 * The initial direct kernel mapping, located at swapper_pg_dir, gives
218 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
219 * PHYS_OFFSET (which must be aligned to 2MB as per
220 * Documentation/arm64/booting.txt).
f6bc87c3 221 */
3dec0fe4
CM
222 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
223 limit = PHYS_OFFSET + PMD_SIZE;
224 else
225 limit = PHYS_OFFSET + PUD_SIZE;
e25208f7 226 memblock_set_current_limit(limit);
f6bc87c3 227
c1cc1552
CM
228 /* map all the memory banks */
229 for_each_memblock(memory, reg) {
230 phys_addr_t start = reg->base;
231 phys_addr_t end = start + reg->size;
232
233 if (start >= end)
234 break;
235
e25208f7
CM
236#ifndef CONFIG_ARM64_64K_PAGES
237 /*
238 * For the first memory bank align the start address and
239 * current memblock limit to prevent create_mapping() from
240 * allocating pte page tables from unmapped memory.
241 * When 64K pages are enabled, the pte page table for the
242 * first PGDIR_SIZE is already present in swapper_pg_dir.
243 */
244 if (start < limit)
245 start = ALIGN(start, PMD_SIZE);
246 if (end < limit) {
247 limit = end & PMD_MASK;
248 memblock_set_current_limit(limit);
249 }
250#endif
251
c1cc1552
CM
252 create_mapping(start, __phys_to_virt(start), end - start);
253 }
f6bc87c3
SC
254
255 /* Limit no longer required. */
256 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
c1cc1552
CM
257}
258
259/*
260 * paging_init() sets up the page tables, initialises the zone memory
261 * maps and sets up the zero page.
262 */
263void __init paging_init(void)
264{
265 void *zero_page;
266
c1cc1552
CM
267 map_mem();
268
269 /*
270 * Finally flush the caches and tlb to ensure that we're in a
271 * consistent state.
272 */
273 flush_cache_all();
274 flush_tlb_all();
275
276 /* allocate the zero page. */
277 zero_page = early_alloc(PAGE_SIZE);
278
279 bootmem_init();
280
281 empty_zero_page = virt_to_page(zero_page);
c1cc1552
CM
282
283 /*
284 * TTBR0 is only used for the identity mapping at this stage. Make it
285 * point to zero page to avoid speculatively fetching new entries.
286 */
287 cpu_set_reserved_ttbr0();
288 flush_tlb_all();
289}
290
291/*
292 * Enable the identity mapping to allow the MMU disabling.
293 */
294void setup_mm_for_reboot(void)
295{
296 cpu_switch_mm(idmap_pg_dir, &init_mm);
297 flush_tlb_all();
298}
299
300/*
301 * Check whether a kernel address is valid (derived from arch/x86/).
302 */
303int kern_addr_valid(unsigned long addr)
304{
305 pgd_t *pgd;
306 pud_t *pud;
307 pmd_t *pmd;
308 pte_t *pte;
309
310 if ((((long)addr) >> VA_BITS) != -1UL)
311 return 0;
312
313 pgd = pgd_offset_k(addr);
314 if (pgd_none(*pgd))
315 return 0;
316
317 pud = pud_offset(pgd, addr);
318 if (pud_none(*pud))
319 return 0;
320
206a2a73
SC
321 if (pud_sect(*pud))
322 return pfn_valid(pud_pfn(*pud));
323
c1cc1552
CM
324 pmd = pmd_offset(pud, addr);
325 if (pmd_none(*pmd))
326 return 0;
327
da6e4cb6
DA
328 if (pmd_sect(*pmd))
329 return pfn_valid(pmd_pfn(*pmd));
330
c1cc1552
CM
331 pte = pte_offset_kernel(pmd, addr);
332 if (pte_none(*pte))
333 return 0;
334
335 return pfn_valid(pte_pfn(*pte));
336}
337#ifdef CONFIG_SPARSEMEM_VMEMMAP
338#ifdef CONFIG_ARM64_64K_PAGES
0aad818b 339int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 340{
0aad818b 341 return vmemmap_populate_basepages(start, end, node);
c1cc1552
CM
342}
343#else /* !CONFIG_ARM64_64K_PAGES */
0aad818b 344int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 345{
0aad818b 346 unsigned long addr = start;
c1cc1552
CM
347 unsigned long next;
348 pgd_t *pgd;
349 pud_t *pud;
350 pmd_t *pmd;
351
352 do {
353 next = pmd_addr_end(addr, end);
354
355 pgd = vmemmap_pgd_populate(addr, node);
356 if (!pgd)
357 return -ENOMEM;
358
359 pud = vmemmap_pud_populate(pgd, addr, node);
360 if (!pud)
361 return -ENOMEM;
362
363 pmd = pmd_offset(pud, addr);
364 if (pmd_none(*pmd)) {
365 void *p = NULL;
366
367 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
368 if (!p)
369 return -ENOMEM;
370
a501e324 371 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
c1cc1552
CM
372 } else
373 vmemmap_verify((pte_t *)pmd, node, addr, next);
374 } while (addr = next, addr != end);
375
376 return 0;
377}
378#endif /* CONFIG_ARM64_64K_PAGES */
0aad818b 379void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
380{
381}
c1cc1552 382#endif /* CONFIG_SPARSEMEM_VMEMMAP */
af86e597
LA
383
384static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
385#if CONFIG_ARM64_PGTABLE_LEVELS > 2
386static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
387#endif
388#if CONFIG_ARM64_PGTABLE_LEVELS > 3
389static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
390#endif
391
392static inline pud_t * fixmap_pud(unsigned long addr)
393{
394 pgd_t *pgd = pgd_offset_k(addr);
395
396 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
397
398 return pud_offset(pgd, addr);
399}
400
401static inline pmd_t * fixmap_pmd(unsigned long addr)
402{
403 pud_t *pud = fixmap_pud(addr);
404
405 BUG_ON(pud_none(*pud) || pud_bad(*pud));
406
407 return pmd_offset(pud, addr);
408}
409
410static inline pte_t * fixmap_pte(unsigned long addr)
411{
412 pmd_t *pmd = fixmap_pmd(addr);
413
414 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
415
416 return pte_offset_kernel(pmd, addr);
417}
418
419void __init early_fixmap_init(void)
420{
421 pgd_t *pgd;
422 pud_t *pud;
423 pmd_t *pmd;
424 unsigned long addr = FIXADDR_START;
425
426 pgd = pgd_offset_k(addr);
427 pgd_populate(&init_mm, pgd, bm_pud);
428 pud = pud_offset(pgd, addr);
429 pud_populate(&init_mm, pud, bm_pmd);
430 pmd = pmd_offset(pud, addr);
431 pmd_populate_kernel(&init_mm, pmd, bm_pte);
432
433 /*
434 * The boot-ioremap range spans multiple pmds, for which
435 * we are not preparted:
436 */
437 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
438 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
439
440 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
441 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
442 WARN_ON(1);
443 pr_warn("pmd %p != %p, %p\n",
444 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
445 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
446 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
447 fix_to_virt(FIX_BTMAP_BEGIN));
448 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
449 fix_to_virt(FIX_BTMAP_END));
450
451 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
452 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
453 }
454}
455
456void __set_fixmap(enum fixed_addresses idx,
457 phys_addr_t phys, pgprot_t flags)
458{
459 unsigned long addr = __fix_to_virt(idx);
460 pte_t *pte;
461
462 if (idx >= __end_of_fixed_addresses) {
463 BUG();
464 return;
465 }
466
467 pte = fixmap_pte(addr);
468
469 if (pgprot_val(flags)) {
470 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
471 } else {
472 pte_clear(&init_mm, addr, pte);
473 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
474 }
475}
This page took 0.18712 seconds and 5 git commands to generate.