Merge remote-tracking branches 'regulator/topic/88pm800', 'regulator/topic/ab8500...
[deliverable/linux.git] / arch / arm64 / mm / mmu.c
1 /*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29
30 #include <asm/cputype.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/sizes.h>
34 #include <asm/tlb.h>
35 #include <asm/mmu_context.h>
36
37 #include "mm.h"
38
39 /*
40 * Empty_zero_page is a special page that is used for zero-initialized data
41 * and COW.
42 */
43 struct page *empty_zero_page;
44 EXPORT_SYMBOL(empty_zero_page);
45
46 struct cachepolicy {
47 const char policy[16];
48 u64 mair;
49 u64 tcr;
50 };
51
52 static struct cachepolicy cache_policies[] __initdata = {
53 {
54 .policy = "uncached",
55 .mair = 0x44, /* inner, outer non-cacheable */
56 .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
57 }, {
58 .policy = "writethrough",
59 .mair = 0xaa, /* inner, outer write-through, read-allocate */
60 .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
61 }, {
62 .policy = "writeback",
63 .mair = 0xee, /* inner, outer write-back, read-allocate */
64 .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
65 }
66 };
67
68 /*
69 * These are useful for identifying cache coherency problems by allowing the
70 * cache or the cache and writebuffer to be turned off. It changes the Normal
71 * memory caching attributes in the MAIR_EL1 register.
72 */
73 static int __init early_cachepolicy(char *p)
74 {
75 int i;
76 u64 tmp;
77
78 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
79 int len = strlen(cache_policies[i].policy);
80
81 if (memcmp(p, cache_policies[i].policy, len) == 0)
82 break;
83 }
84 if (i == ARRAY_SIZE(cache_policies)) {
85 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
86 return 0;
87 }
88
89 flush_cache_all();
90
91 /*
92 * Modify MT_NORMAL attributes in MAIR_EL1.
93 */
94 asm volatile(
95 " mrs %0, mair_el1\n"
96 " bfi %0, %1, #%2, #8\n"
97 " msr mair_el1, %0\n"
98 " isb\n"
99 : "=&r" (tmp)
100 : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
101
102 /*
103 * Modify TCR PTW cacheability attributes.
104 */
105 asm volatile(
106 " mrs %0, tcr_el1\n"
107 " bic %0, %0, %2\n"
108 " orr %0, %0, %1\n"
109 " msr tcr_el1, %0\n"
110 " isb\n"
111 : "=&r" (tmp)
112 : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
113
114 flush_cache_all();
115
116 return 0;
117 }
118 early_param("cachepolicy", early_cachepolicy);
119
120 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
121 unsigned long size, pgprot_t vma_prot)
122 {
123 if (!pfn_valid(pfn))
124 return pgprot_noncached(vma_prot);
125 else if (file->f_flags & O_SYNC)
126 return pgprot_writecombine(vma_prot);
127 return vma_prot;
128 }
129 EXPORT_SYMBOL(phys_mem_access_prot);
130
131 static void __init *early_alloc(unsigned long sz)
132 {
133 void *ptr = __va(memblock_alloc(sz, sz));
134 memset(ptr, 0, sz);
135 return ptr;
136 }
137
138 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
139 unsigned long end, unsigned long pfn,
140 pgprot_t prot)
141 {
142 pte_t *pte;
143
144 if (pmd_none(*pmd)) {
145 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
146 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
147 }
148 BUG_ON(pmd_bad(*pmd));
149
150 pte = pte_offset_kernel(pmd, addr);
151 do {
152 set_pte(pte, pfn_pte(pfn, prot));
153 pfn++;
154 } while (pte++, addr += PAGE_SIZE, addr != end);
155 }
156
157 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
158 unsigned long end, phys_addr_t phys,
159 int map_io)
160 {
161 pmd_t *pmd;
162 unsigned long next;
163 pmdval_t prot_sect;
164 pgprot_t prot_pte;
165
166 if (map_io) {
167 prot_sect = PROT_SECT_DEVICE_nGnRE;
168 prot_pte = __pgprot(PROT_DEVICE_nGnRE);
169 } else {
170 prot_sect = PROT_SECT_NORMAL_EXEC;
171 prot_pte = PAGE_KERNEL_EXEC;
172 }
173
174 /*
175 * Check for initial section mappings in the pgd/pud and remove them.
176 */
177 if (pud_none(*pud) || pud_bad(*pud)) {
178 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
179 pud_populate(&init_mm, pud, pmd);
180 }
181
182 pmd = pmd_offset(pud, addr);
183 do {
184 next = pmd_addr_end(addr, end);
185 /* try section mapping first */
186 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
187 pmd_t old_pmd =*pmd;
188 set_pmd(pmd, __pmd(phys | prot_sect));
189 /*
190 * Check for previous table entries created during
191 * boot (__create_page_tables) and flush them.
192 */
193 if (!pmd_none(old_pmd))
194 flush_tlb_all();
195 } else {
196 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
197 prot_pte);
198 }
199 phys += next - addr;
200 } while (pmd++, addr = next, addr != end);
201 }
202
203 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
204 unsigned long end, unsigned long phys,
205 int map_io)
206 {
207 pud_t *pud = pud_offset(pgd, addr);
208 unsigned long next;
209
210 do {
211 next = pud_addr_end(addr, end);
212
213 /*
214 * For 4K granule only, attempt to put down a 1GB block
215 */
216 if (!map_io && (PAGE_SHIFT == 12) &&
217 ((addr | next | phys) & ~PUD_MASK) == 0) {
218 pud_t old_pud = *pud;
219 set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
220
221 /*
222 * If we have an old value for a pud, it will
223 * be pointing to a pmd table that we no longer
224 * need (from swapper_pg_dir).
225 *
226 * Look up the old pmd table and free it.
227 */
228 if (!pud_none(old_pud)) {
229 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
230 memblock_free(table, PAGE_SIZE);
231 flush_tlb_all();
232 }
233 } else {
234 alloc_init_pmd(pud, addr, next, phys, map_io);
235 }
236 phys += next - addr;
237 } while (pud++, addr = next, addr != end);
238 }
239
240 /*
241 * Create the page directory entries and any necessary page tables for the
242 * mapping specified by 'md'.
243 */
244 static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
245 unsigned long virt, phys_addr_t size,
246 int map_io)
247 {
248 unsigned long addr, length, end, next;
249
250 addr = virt & PAGE_MASK;
251 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
252
253 end = addr + length;
254 do {
255 next = pgd_addr_end(addr, end);
256 alloc_init_pud(pgd, addr, next, phys, map_io);
257 phys += next - addr;
258 } while (pgd++, addr = next, addr != end);
259 }
260
261 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
262 phys_addr_t size)
263 {
264 if (virt < VMALLOC_START) {
265 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
266 &phys, virt);
267 return;
268 }
269 __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
270 }
271
272 void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
273 {
274 if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
275 pr_warn("BUG: not creating id mapping for %pa\n", &addr);
276 return;
277 }
278 __create_mapping(&idmap_pg_dir[pgd_index(addr)],
279 addr, addr, size, map_io);
280 }
281
282 static void __init map_mem(void)
283 {
284 struct memblock_region *reg;
285 phys_addr_t limit;
286
287 /*
288 * Temporarily limit the memblock range. We need to do this as
289 * create_mapping requires puds, pmds and ptes to be allocated from
290 * memory addressable from the initial direct kernel mapping.
291 *
292 * The initial direct kernel mapping, located at swapper_pg_dir,
293 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
294 * aligned to 2MB as per Documentation/arm64/booting.txt).
295 */
296 limit = PHYS_OFFSET + PGDIR_SIZE;
297 memblock_set_current_limit(limit);
298
299 /* map all the memory banks */
300 for_each_memblock(memory, reg) {
301 phys_addr_t start = reg->base;
302 phys_addr_t end = start + reg->size;
303
304 if (start >= end)
305 break;
306
307 #ifndef CONFIG_ARM64_64K_PAGES
308 /*
309 * For the first memory bank align the start address and
310 * current memblock limit to prevent create_mapping() from
311 * allocating pte page tables from unmapped memory.
312 * When 64K pages are enabled, the pte page table for the
313 * first PGDIR_SIZE is already present in swapper_pg_dir.
314 */
315 if (start < limit)
316 start = ALIGN(start, PMD_SIZE);
317 if (end < limit) {
318 limit = end & PMD_MASK;
319 memblock_set_current_limit(limit);
320 }
321 #endif
322
323 create_mapping(start, __phys_to_virt(start), end - start);
324 }
325
326 /* Limit no longer required. */
327 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
328 }
329
330 /*
331 * paging_init() sets up the page tables, initialises the zone memory
332 * maps and sets up the zero page.
333 */
334 void __init paging_init(void)
335 {
336 void *zero_page;
337
338 map_mem();
339
340 /*
341 * Finally flush the caches and tlb to ensure that we're in a
342 * consistent state.
343 */
344 flush_cache_all();
345 flush_tlb_all();
346
347 /* allocate the zero page. */
348 zero_page = early_alloc(PAGE_SIZE);
349
350 bootmem_init();
351
352 empty_zero_page = virt_to_page(zero_page);
353
354 /*
355 * TTBR0 is only used for the identity mapping at this stage. Make it
356 * point to zero page to avoid speculatively fetching new entries.
357 */
358 cpu_set_reserved_ttbr0();
359 flush_tlb_all();
360 }
361
362 /*
363 * Enable the identity mapping to allow the MMU disabling.
364 */
365 void setup_mm_for_reboot(void)
366 {
367 cpu_switch_mm(idmap_pg_dir, &init_mm);
368 flush_tlb_all();
369 }
370
371 /*
372 * Check whether a kernel address is valid (derived from arch/x86/).
373 */
374 int kern_addr_valid(unsigned long addr)
375 {
376 pgd_t *pgd;
377 pud_t *pud;
378 pmd_t *pmd;
379 pte_t *pte;
380
381 if ((((long)addr) >> VA_BITS) != -1UL)
382 return 0;
383
384 pgd = pgd_offset_k(addr);
385 if (pgd_none(*pgd))
386 return 0;
387
388 pud = pud_offset(pgd, addr);
389 if (pud_none(*pud))
390 return 0;
391
392 if (pud_sect(*pud))
393 return pfn_valid(pud_pfn(*pud));
394
395 pmd = pmd_offset(pud, addr);
396 if (pmd_none(*pmd))
397 return 0;
398
399 if (pmd_sect(*pmd))
400 return pfn_valid(pmd_pfn(*pmd));
401
402 pte = pte_offset_kernel(pmd, addr);
403 if (pte_none(*pte))
404 return 0;
405
406 return pfn_valid(pte_pfn(*pte));
407 }
408 #ifdef CONFIG_SPARSEMEM_VMEMMAP
409 #ifdef CONFIG_ARM64_64K_PAGES
410 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
411 {
412 return vmemmap_populate_basepages(start, end, node);
413 }
414 #else /* !CONFIG_ARM64_64K_PAGES */
415 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
416 {
417 unsigned long addr = start;
418 unsigned long next;
419 pgd_t *pgd;
420 pud_t *pud;
421 pmd_t *pmd;
422
423 do {
424 next = pmd_addr_end(addr, end);
425
426 pgd = vmemmap_pgd_populate(addr, node);
427 if (!pgd)
428 return -ENOMEM;
429
430 pud = vmemmap_pud_populate(pgd, addr, node);
431 if (!pud)
432 return -ENOMEM;
433
434 pmd = pmd_offset(pud, addr);
435 if (pmd_none(*pmd)) {
436 void *p = NULL;
437
438 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
439 if (!p)
440 return -ENOMEM;
441
442 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
443 } else
444 vmemmap_verify((pte_t *)pmd, node, addr, next);
445 } while (addr = next, addr != end);
446
447 return 0;
448 }
449 #endif /* CONFIG_ARM64_64K_PAGES */
450 void vmemmap_free(unsigned long start, unsigned long end)
451 {
452 }
453 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
This page took 0.045628 seconds and 6 git commands to generate.