[ARM] xsc3: add highmem support to L2 cache handling code
[deliverable/linux.git] / arch / arm / mm / mmu.c
CommitLineData
d111e8f9
RK
1/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ae8f1541 10#include <linux/module.h>
d111e8f9
RK
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/nodemask.h>
17
0ba8b9b2 18#include <asm/cputype.h>
d111e8f9 19#include <asm/mach-types.h>
37efe642 20#include <asm/sections.h>
d111e8f9
RK
21#include <asm/setup.h>
22#include <asm/sizes.h>
23#include <asm/tlb.h>
d73cd428 24#include <asm/highmem.h>
d111e8f9
RK
25
26#include <asm/mach/arch.h>
27#include <asm/mach/map.h>
28
29#include "mm.h"
30
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32
d111e8f9
RK
33/*
34 * empty_zero_page is a special page that is used for
35 * zero-initialized data and COW.
36 */
37struct page *empty_zero_page;
3653f3ab 38EXPORT_SYMBOL(empty_zero_page);
d111e8f9
RK
39
40/*
41 * The pmd table for the upper-most set of pages.
42 */
43pmd_t *top_pmd;
44
ae8f1541
RK
45#define CPOLICY_UNCACHED 0
46#define CPOLICY_BUFFERED 1
47#define CPOLICY_WRITETHROUGH 2
48#define CPOLICY_WRITEBACK 3
49#define CPOLICY_WRITEALLOC 4
50
51static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
52static unsigned int ecc_mask __initdata = 0;
44b18693 53pgprot_t pgprot_user;
ae8f1541
RK
54pgprot_t pgprot_kernel;
55
44b18693 56EXPORT_SYMBOL(pgprot_user);
ae8f1541
RK
57EXPORT_SYMBOL(pgprot_kernel);
58
59struct cachepolicy {
60 const char policy[16];
61 unsigned int cr_mask;
62 unsigned int pmd;
63 unsigned int pte;
64};
65
66static struct cachepolicy cache_policies[] __initdata = {
67 {
68 .policy = "uncached",
69 .cr_mask = CR_W|CR_C,
70 .pmd = PMD_SECT_UNCACHED,
bb30f36f 71 .pte = L_PTE_MT_UNCACHED,
ae8f1541
RK
72 }, {
73 .policy = "buffered",
74 .cr_mask = CR_C,
75 .pmd = PMD_SECT_BUFFERED,
bb30f36f 76 .pte = L_PTE_MT_BUFFERABLE,
ae8f1541
RK
77 }, {
78 .policy = "writethrough",
79 .cr_mask = 0,
80 .pmd = PMD_SECT_WT,
bb30f36f 81 .pte = L_PTE_MT_WRITETHROUGH,
ae8f1541
RK
82 }, {
83 .policy = "writeback",
84 .cr_mask = 0,
85 .pmd = PMD_SECT_WB,
bb30f36f 86 .pte = L_PTE_MT_WRITEBACK,
ae8f1541
RK
87 }, {
88 .policy = "writealloc",
89 .cr_mask = 0,
90 .pmd = PMD_SECT_WBWA,
bb30f36f 91 .pte = L_PTE_MT_WRITEALLOC,
ae8f1541
RK
92 }
93};
94
95/*
6cbdc8c5 96 * These are useful for identifying cache coherency
ae8f1541
RK
97 * problems by allowing the cache or the cache and
98 * writebuffer to be turned off. (Note: the write
99 * buffer should not be on and the cache off).
100 */
101static void __init early_cachepolicy(char **p)
102{
103 int i;
104
105 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
106 int len = strlen(cache_policies[i].policy);
107
108 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
109 cachepolicy = i;
110 cr_alignment &= ~cache_policies[i].cr_mask;
111 cr_no_alignment &= ~cache_policies[i].cr_mask;
112 *p += len;
113 break;
114 }
115 }
116 if (i == ARRAY_SIZE(cache_policies))
117 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
11179d8c
CM
118 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
119 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
120 cachepolicy = CPOLICY_WRITEBACK;
121 }
ae8f1541
RK
122 flush_cache_all();
123 set_cr(cr_alignment);
124}
125__early_param("cachepolicy=", early_cachepolicy);
126
127static void __init early_nocache(char **__unused)
128{
129 char *p = "buffered";
130 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
131 early_cachepolicy(&p);
132}
133__early_param("nocache", early_nocache);
134
135static void __init early_nowrite(char **__unused)
136{
137 char *p = "uncached";
138 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
139 early_cachepolicy(&p);
140}
141__early_param("nowb", early_nowrite);
142
143static void __init early_ecc(char **p)
144{
145 if (memcmp(*p, "on", 2) == 0) {
146 ecc_mask = PMD_PROTECTION;
147 *p += 2;
148 } else if (memcmp(*p, "off", 3) == 0) {
149 ecc_mask = 0;
150 *p += 3;
151 }
152}
153__early_param("ecc=", early_ecc);
154
155static int __init noalign_setup(char *__unused)
156{
157 cr_alignment &= ~CR_A;
158 cr_no_alignment &= ~CR_A;
159 set_cr(cr_alignment);
160 return 1;
161}
162__setup("noalign", noalign_setup);
163
255d1f86
RK
164#ifndef CONFIG_SMP
165void adjust_cr(unsigned long mask, unsigned long set)
166{
167 unsigned long flags;
168
169 mask &= ~CR_A;
170
171 set &= mask;
172
173 local_irq_save(flags);
174
175 cr_no_alignment = (cr_no_alignment & ~mask) | set;
176 cr_alignment = (cr_alignment & ~mask) | set;
177
178 set_cr((get_cr() & ~mask) | set);
179
180 local_irq_restore(flags);
181}
182#endif
183
0af92bef 184#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
b1cce6b1 185#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
0af92bef 186
b29e9f5e 187static struct mem_type mem_types[] = {
0af92bef 188 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
bb30f36f
RK
189 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
190 L_PTE_SHARED,
0af92bef 191 .prot_l1 = PMD_TYPE_TABLE,
b1cce6b1 192 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
0af92bef
RK
193 .domain = DOMAIN_IO,
194 },
195 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
bb30f36f 196 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
0af92bef 197 .prot_l1 = PMD_TYPE_TABLE,
b1cce6b1 198 .prot_sect = PROT_SECT_DEVICE,
0af92bef
RK
199 .domain = DOMAIN_IO,
200 },
201 [MT_DEVICE_CACHED] = { /* ioremap_cached */
bb30f36f 202 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
0af92bef
RK
203 .prot_l1 = PMD_TYPE_TABLE,
204 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
205 .domain = DOMAIN_IO,
206 },
1ad77a87 207 [MT_DEVICE_WC] = { /* ioremap_wc */
bb30f36f 208 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
0af92bef 209 .prot_l1 = PMD_TYPE_TABLE,
b1cce6b1 210 .prot_sect = PROT_SECT_DEVICE,
0af92bef 211 .domain = DOMAIN_IO,
ae8f1541 212 },
ebb4c658
RK
213 [MT_UNCACHED] = {
214 .prot_pte = PROT_PTE_DEVICE,
215 .prot_l1 = PMD_TYPE_TABLE,
216 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
217 .domain = DOMAIN_IO,
218 },
ae8f1541 219 [MT_CACHECLEAN] = {
9ef79635 220 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
ae8f1541
RK
221 .domain = DOMAIN_KERNEL,
222 },
223 [MT_MINICLEAN] = {
9ef79635 224 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
ae8f1541
RK
225 .domain = DOMAIN_KERNEL,
226 },
227 [MT_LOW_VECTORS] = {
228 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
229 L_PTE_EXEC,
230 .prot_l1 = PMD_TYPE_TABLE,
231 .domain = DOMAIN_USER,
232 },
233 [MT_HIGH_VECTORS] = {
234 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
235 L_PTE_USER | L_PTE_EXEC,
236 .prot_l1 = PMD_TYPE_TABLE,
237 .domain = DOMAIN_USER,
238 },
239 [MT_MEMORY] = {
9ef79635 240 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
ae8f1541
RK
241 .domain = DOMAIN_KERNEL,
242 },
243 [MT_ROM] = {
9ef79635 244 .prot_sect = PMD_TYPE_SECT,
ae8f1541
RK
245 .domain = DOMAIN_KERNEL,
246 },
ae8f1541
RK
247};
248
b29e9f5e
RK
249const struct mem_type *get_mem_type(unsigned int type)
250{
251 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
252}
253
ae8f1541
RK
254/*
255 * Adjust the PMD section entries according to the CPU in use.
256 */
257static void __init build_mem_type_table(void)
258{
259 struct cachepolicy *cp;
260 unsigned int cr = get_cr();
bb30f36f 261 unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
ae8f1541
RK
262 int cpu_arch = cpu_architecture();
263 int i;
264
11179d8c 265 if (cpu_arch < CPU_ARCH_ARMv6) {
ae8f1541 266#if defined(CONFIG_CPU_DCACHE_DISABLE)
11179d8c
CM
267 if (cachepolicy > CPOLICY_BUFFERED)
268 cachepolicy = CPOLICY_BUFFERED;
ae8f1541 269#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
11179d8c
CM
270 if (cachepolicy > CPOLICY_WRITETHROUGH)
271 cachepolicy = CPOLICY_WRITETHROUGH;
ae8f1541 272#endif
11179d8c 273 }
ae8f1541
RK
274 if (cpu_arch < CPU_ARCH_ARMv5) {
275 if (cachepolicy >= CPOLICY_WRITEALLOC)
276 cachepolicy = CPOLICY_WRITEBACK;
277 ecc_mask = 0;
278 }
bb30f36f
RK
279#ifdef CONFIG_SMP
280 cachepolicy = CPOLICY_WRITEALLOC;
281#endif
ae8f1541 282
1ad77a87 283 /*
b1cce6b1
RK
284 * Strip out features not present on earlier architectures.
285 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
286 * without extended page tables don't have the 'Shared' bit.
1ad77a87 287 */
b1cce6b1
RK
288 if (cpu_arch < CPU_ARCH_ARMv5)
289 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
290 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
291 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
292 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
293 mem_types[i].prot_sect &= ~PMD_SECT_S;
ae8f1541
RK
294
295 /*
b1cce6b1
RK
296 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
297 * "update-able on write" bit on ARM610). However, Xscale and
298 * Xscale3 require this bit to be cleared.
ae8f1541 299 */
b1cce6b1 300 if (cpu_is_xscale() || cpu_is_xsc3()) {
9ef79635 301 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
ae8f1541 302 mem_types[i].prot_sect &= ~PMD_BIT4;
9ef79635
RK
303 mem_types[i].prot_l1 &= ~PMD_BIT4;
304 }
305 } else if (cpu_arch < CPU_ARCH_ARMv6) {
306 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
ae8f1541
RK
307 if (mem_types[i].prot_l1)
308 mem_types[i].prot_l1 |= PMD_BIT4;
9ef79635
RK
309 if (mem_types[i].prot_sect)
310 mem_types[i].prot_sect |= PMD_BIT4;
311 }
312 }
ae8f1541 313
b1cce6b1
RK
314 /*
315 * Mark the device areas according to the CPU/architecture.
316 */
317 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
318 if (!cpu_is_xsc3()) {
319 /*
320 * Mark device regions on ARMv6+ as execute-never
321 * to prevent speculative instruction fetches.
322 */
323 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
324 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
325 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
326 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
327 }
328 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
329 /*
330 * For ARMv7 with TEX remapping,
331 * - shared device is SXCB=1100
332 * - nonshared device is SXCB=0100
333 * - write combine device mem is SXCB=0001
334 * (Uncached Normal memory)
335 */
336 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
337 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
338 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
339 } else if (cpu_is_xsc3()) {
340 /*
341 * For Xscale3,
342 * - shared device is TEXCB=00101
343 * - nonshared device is TEXCB=01000
344 * - write combine device mem is TEXCB=00100
345 * (Inner/Outer Uncacheable in xsc3 parlance)
346 */
347 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
348 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
349 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
350 } else {
351 /*
352 * For ARMv6 and ARMv7 without TEX remapping,
353 * - shared device is TEXCB=00001
354 * - nonshared device is TEXCB=01000
355 * - write combine device mem is TEXCB=00100
356 * (Uncached Normal in ARMv6 parlance).
357 */
358 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
359 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
360 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
361 }
362 } else {
363 /*
364 * On others, write combining is "Uncached/Buffered"
365 */
366 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
367 }
368
369 /*
370 * Now deal with the memory-type mappings
371 */
ae8f1541 372 cp = &cache_policies[cachepolicy];
bb30f36f
RK
373 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
374
375#ifndef CONFIG_SMP
376 /*
377 * Only use write-through for non-SMP systems
378 */
379 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
380 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
381#endif
ae8f1541
RK
382
383 /*
384 * Enable CPU-specific coherency if supported.
385 * (Only available on XSC3 at the moment.)
386 */
b1cce6b1
RK
387 if (arch_is_coherent() && cpu_is_xsc3())
388 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
ae8f1541
RK
389
390 /*
391 * ARMv6 and above have extended page tables.
392 */
393 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
ae8f1541
RK
394 /*
395 * Mark cache clean areas and XIP ROM read only
396 * from SVC mode and no access from userspace.
397 */
398 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
399 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
400 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
401
ae8f1541
RK
402#ifdef CONFIG_SMP
403 /*
404 * Mark memory with the "shared" attribute for SMP systems
405 */
406 user_pgprot |= L_PTE_SHARED;
407 kern_pgprot |= L_PTE_SHARED;
bb30f36f 408 vecs_pgprot |= L_PTE_SHARED;
ae8f1541
RK
409 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
410#endif
411 }
412
413 for (i = 0; i < 16; i++) {
414 unsigned long v = pgprot_val(protection_map[i]);
bb30f36f 415 protection_map[i] = __pgprot(v | user_pgprot);
ae8f1541
RK
416 }
417
bb30f36f
RK
418 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
419 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
ae8f1541 420
44b18693 421 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
ae8f1541
RK
422 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
423 L_PTE_DIRTY | L_PTE_WRITE |
424 L_PTE_EXEC | kern_pgprot);
425
426 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
427 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
428 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
429 mem_types[MT_ROM].prot_sect |= cp->pmd;
430
431 switch (cp->pmd) {
432 case PMD_SECT_WT:
433 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
434 break;
435 case PMD_SECT_WB:
436 case PMD_SECT_WBWA:
437 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
438 break;
439 }
440 printk("Memory policy: ECC %sabled, Data cache %s\n",
441 ecc_mask ? "en" : "dis", cp->policy);
2497f0a8
RK
442
443 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
444 struct mem_type *t = &mem_types[i];
445 if (t->prot_l1)
446 t->prot_l1 |= PMD_DOMAIN(t->domain);
447 if (t->prot_sect)
448 t->prot_sect |= PMD_DOMAIN(t->domain);
449 }
ae8f1541
RK
450}
451
452#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
453
24e6c699
RK
454static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
455 unsigned long end, unsigned long pfn,
456 const struct mem_type *type)
ae8f1541 457{
24e6c699 458 pte_t *pte;
ae8f1541 459
24e6c699
RK
460 if (pmd_none(*pmd)) {
461 pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
462 __pmd_populate(pmd, __pa(pte) | type->prot_l1);
463 }
ae8f1541 464
24e6c699
RK
465 pte = pte_offset_kernel(pmd, addr);
466 do {
40d192b6 467 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
24e6c699
RK
468 pfn++;
469 } while (pte++, addr += PAGE_SIZE, addr != end);
ae8f1541
RK
470}
471
24e6c699
RK
472static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
473 unsigned long end, unsigned long phys,
474 const struct mem_type *type)
ae8f1541 475{
24e6c699 476 pmd_t *pmd = pmd_offset(pgd, addr);
ae8f1541 477
24e6c699
RK
478 /*
479 * Try a section mapping - end, addr and phys must all be aligned
480 * to a section boundary. Note that PMDs refer to the individual
481 * L1 entries, whereas PGDs refer to a group of L1 entries making
482 * up one logical pointer to an L2 table.
483 */
484 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
485 pmd_t *p = pmd;
ae8f1541 486
24e6c699
RK
487 if (addr & SECTION_SIZE)
488 pmd++;
489
490 do {
491 *pmd = __pmd(phys | type->prot_sect);
492 phys += SECTION_SIZE;
493 } while (pmd++, addr += SECTION_SIZE, addr != end);
ae8f1541 494
24e6c699
RK
495 flush_pmd_entry(p);
496 } else {
497 /*
498 * No need to loop; pte's aren't interested in the
499 * individual L1 entries.
500 */
501 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
502 }
ae8f1541
RK
503}
504
4a56c1e4
RK
505static void __init create_36bit_mapping(struct map_desc *md,
506 const struct mem_type *type)
507{
508 unsigned long phys, addr, length, end;
509 pgd_t *pgd;
510
511 addr = md->virtual;
512 phys = (unsigned long)__pfn_to_phys(md->pfn);
513 length = PAGE_ALIGN(md->length);
514
515 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
516 printk(KERN_ERR "MM: CPU does not support supersection "
517 "mapping for 0x%08llx at 0x%08lx\n",
518 __pfn_to_phys((u64)md->pfn), addr);
519 return;
520 }
521
522 /* N.B. ARMv6 supersections are only defined to work with domain 0.
523 * Since domain assignments can in fact be arbitrary, the
524 * 'domain == 0' check below is required to insure that ARMv6
525 * supersections are only allocated for domain 0 regardless
526 * of the actual domain assignments in use.
527 */
528 if (type->domain) {
529 printk(KERN_ERR "MM: invalid domain in supersection "
530 "mapping for 0x%08llx at 0x%08lx\n",
531 __pfn_to_phys((u64)md->pfn), addr);
532 return;
533 }
534
535 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
536 printk(KERN_ERR "MM: cannot create mapping for "
537 "0x%08llx at 0x%08lx invalid alignment\n",
538 __pfn_to_phys((u64)md->pfn), addr);
539 return;
540 }
541
542 /*
543 * Shift bits [35:32] of address into bits [23:20] of PMD
544 * (See ARMv6 spec).
545 */
546 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
547
548 pgd = pgd_offset_k(addr);
549 end = addr + length;
550 do {
551 pmd_t *pmd = pmd_offset(pgd, addr);
552 int i;
553
554 for (i = 0; i < 16; i++)
555 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
556
557 addr += SUPERSECTION_SIZE;
558 phys += SUPERSECTION_SIZE;
559 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
560 } while (addr != end);
561}
562
ae8f1541
RK
563/*
564 * Create the page directory entries and any necessary
565 * page tables for the mapping specified by `md'. We
566 * are able to cope here with varying sizes and address
567 * offsets, and we take full advantage of sections and
568 * supersections.
569 */
570void __init create_mapping(struct map_desc *md)
571{
24e6c699 572 unsigned long phys, addr, length, end;
d5c98176 573 const struct mem_type *type;
24e6c699 574 pgd_t *pgd;
ae8f1541
RK
575
576 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
577 printk(KERN_WARNING "BUG: not creating mapping for "
578 "0x%08llx at 0x%08lx in user region\n",
579 __pfn_to_phys((u64)md->pfn), md->virtual);
580 return;
581 }
582
583 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
584 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
585 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
586 "overlaps vmalloc space\n",
587 __pfn_to_phys((u64)md->pfn), md->virtual);
588 }
589
d5c98176 590 type = &mem_types[md->type];
ae8f1541
RK
591
592 /*
593 * Catch 36-bit addresses
594 */
4a56c1e4
RK
595 if (md->pfn >= 0x100000) {
596 create_36bit_mapping(md, type);
597 return;
ae8f1541
RK
598 }
599
7b9c7b4d 600 addr = md->virtual & PAGE_MASK;
24e6c699 601 phys = (unsigned long)__pfn_to_phys(md->pfn);
7b9c7b4d 602 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
ae8f1541 603
24e6c699 604 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
ae8f1541
RK
605 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
606 "be mapped using pages, ignoring.\n",
24e6c699 607 __pfn_to_phys(md->pfn), addr);
ae8f1541
RK
608 return;
609 }
610
24e6c699
RK
611 pgd = pgd_offset_k(addr);
612 end = addr + length;
613 do {
614 unsigned long next = pgd_addr_end(addr, end);
ae8f1541 615
24e6c699 616 alloc_init_section(pgd, addr, next, phys, type);
ae8f1541 617
24e6c699
RK
618 phys += next - addr;
619 addr = next;
620 } while (pgd++, addr != end);
ae8f1541
RK
621}
622
623/*
624 * Create the architecture specific mappings
625 */
626void __init iotable_init(struct map_desc *io_desc, int nr)
627{
628 int i;
629
630 for (i = 0; i < nr; i++)
631 create_mapping(io_desc + i);
632}
633
6c5da7ac
RK
634static unsigned long __initdata vmalloc_reserve = SZ_128M;
635
636/*
637 * vmalloc=size forces the vmalloc area to be exactly 'size'
638 * bytes. This can be used to increase (or decrease) the vmalloc
639 * area - the default is 128m.
640 */
641static void __init early_vmalloc(char **arg)
642{
643 vmalloc_reserve = memparse(*arg, arg);
644
645 if (vmalloc_reserve < SZ_16M) {
646 vmalloc_reserve = SZ_16M;
647 printk(KERN_WARNING
648 "vmalloc area too small, limiting to %luMB\n",
649 vmalloc_reserve >> 20);
650 }
9210807c
NP
651
652 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
653 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
654 printk(KERN_WARNING
655 "vmalloc area is too big, limiting to %luMB\n",
656 vmalloc_reserve >> 20);
657 }
6c5da7ac
RK
658}
659__early_param("vmalloc=", early_vmalloc);
660
661#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
662
4b5f32ce 663static void __init sanity_check_meminfo(void)
60296c71 664{
eca73214 665 int i, j;
60296c71 666
4b5f32ce 667 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
a1bbaec0
NP
668 struct membank *bank = &meminfo.bank[j];
669 *bank = meminfo.bank[i];
60296c71 670
a1bbaec0
NP
671#ifdef CONFIG_HIGHMEM
672 /*
673 * Split those memory banks which are partially overlapping
674 * the vmalloc area greatly simplifying things later.
675 */
676 if (__va(bank->start) < VMALLOC_MIN &&
677 bank->size > VMALLOC_MIN - __va(bank->start)) {
678 if (meminfo.nr_banks >= NR_BANKS) {
679 printk(KERN_CRIT "NR_BANKS too low, "
680 "ignoring high memory\n");
681 } else {
682 memmove(bank + 1, bank,
683 (meminfo.nr_banks - i) * sizeof(*bank));
684 meminfo.nr_banks++;
685 i++;
686 bank[1].size -= VMALLOC_MIN - __va(bank->start);
687 bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
688 j++;
689 }
690 bank->size = VMALLOC_MIN - __va(bank->start);
691 }
692#else
693 /*
694 * Check whether this memory bank would entirely overlap
695 * the vmalloc area.
696 */
3fd9825c
NP
697 if (__va(bank->start) >= VMALLOC_MIN ||
698 __va(bank->start) < PAGE_OFFSET) {
a1bbaec0
NP
699 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
700 "(vmalloc region overlap).\n",
701 bank->start, bank->start + bank->size - 1);
702 continue;
703 }
60296c71 704
a1bbaec0
NP
705 /*
706 * Check whether this memory bank would partially overlap
707 * the vmalloc area.
708 */
709 if (__va(bank->start + bank->size) > VMALLOC_MIN ||
710 __va(bank->start + bank->size) < __va(bank->start)) {
711 unsigned long newsize = VMALLOC_MIN - __va(bank->start);
712 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
713 "to -%.8lx (vmalloc region overlap).\n",
714 bank->start, bank->start + bank->size - 1,
715 bank->start + newsize - 1);
716 bank->size = newsize;
717 }
718#endif
719 j++;
60296c71 720 }
4b5f32ce 721 meminfo.nr_banks = j;
60296c71
LB
722}
723
4b5f32ce 724static inline void prepare_page_table(void)
d111e8f9
RK
725{
726 unsigned long addr;
727
728 /*
729 * Clear out all the mappings below the kernel image.
730 */
ab4f2ee1 731 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
d111e8f9
RK
732 pmd_clear(pmd_off_k(addr));
733
734#ifdef CONFIG_XIP_KERNEL
735 /* The XIP kernel is mapped in the module area -- skip over it */
37efe642 736 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
d111e8f9
RK
737#endif
738 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
739 pmd_clear(pmd_off_k(addr));
740
741 /*
742 * Clear out all the kernel space mappings, except for the first
743 * memory bank, up to the end of the vmalloc region.
744 */
4b5f32ce 745 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
d111e8f9
RK
746 addr < VMALLOC_END; addr += PGDIR_SIZE)
747 pmd_clear(pmd_off_k(addr));
748}
749
750/*
751 * Reserve the various regions of node 0
752 */
753void __init reserve_node_zero(pg_data_t *pgdat)
754{
755 unsigned long res_size = 0;
756
757 /*
758 * Register the kernel text and data with bootmem.
759 * Note that this can only be in node 0.
760 */
761#ifdef CONFIG_XIP_KERNEL
37efe642 762 reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
72a7fe39 763 BOOTMEM_DEFAULT);
d111e8f9 764#else
37efe642 765 reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
72a7fe39 766 BOOTMEM_DEFAULT);
d111e8f9
RK
767#endif
768
769 /*
770 * Reserve the page tables. These are already in use,
771 * and can only be in node 0.
772 */
773 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
72a7fe39 774 PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
d111e8f9
RK
775
776 /*
777 * Hmm... This should go elsewhere, but we really really need to
778 * stop things allocating the low memory; ideally we need a better
779 * implementation of GFP_DMA which does not assume that DMA-able
780 * memory starts at zero.
781 */
782 if (machine_is_integrator() || machine_is_cintegrator())
783 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
784
785 /*
786 * These should likewise go elsewhere. They pre-reserve the
787 * screen memory region at the start of main system memory.
788 */
789 if (machine_is_edb7211())
790 res_size = 0x00020000;
791 if (machine_is_p720t())
792 res_size = 0x00014000;
793
bbf6f280
BD
794 /* H1940 and RX3715 need to reserve this for suspend */
795
796 if (machine_is_h1940() || machine_is_rx3715()) {
72a7fe39
BW
797 reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
798 BOOTMEM_DEFAULT);
799 reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
800 BOOTMEM_DEFAULT);
9073341c
BD
801 }
802
d111e8f9
RK
803#ifdef CONFIG_SA1111
804 /*
805 * Because of the SA1111 DMA bug, we want to preserve our
806 * precious DMA-able memory...
807 */
808 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
809#endif
810 if (res_size)
72a7fe39
BW
811 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
812 BOOTMEM_DEFAULT);
d111e8f9
RK
813}
814
815/*
816 * Set up device the mappings. Since we clear out the page tables for all
817 * mappings above VMALLOC_END, we will remove any debug device mappings.
818 * This means you have to be careful how you debug this function, or any
819 * called function. This means you can't use any function or debugging
820 * method which may touch any device, otherwise the kernel _will_ crash.
821 */
822static void __init devicemaps_init(struct machine_desc *mdesc)
823{
824 struct map_desc map;
825 unsigned long addr;
826 void *vectors;
827
828 /*
829 * Allocate the vector page early.
830 */
831 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
d111e8f9
RK
832
833 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
834 pmd_clear(pmd_off_k(addr));
835
836 /*
837 * Map the kernel if it is XIP.
838 * It is always first in the modulearea.
839 */
840#ifdef CONFIG_XIP_KERNEL
841 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
ab4f2ee1 842 map.virtual = MODULES_VADDR;
37efe642 843 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
d111e8f9
RK
844 map.type = MT_ROM;
845 create_mapping(&map);
846#endif
847
848 /*
849 * Map the cache flushing regions.
850 */
851#ifdef FLUSH_BASE
852 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
853 map.virtual = FLUSH_BASE;
854 map.length = SZ_1M;
855 map.type = MT_CACHECLEAN;
856 create_mapping(&map);
857#endif
858#ifdef FLUSH_BASE_MINICACHE
859 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
860 map.virtual = FLUSH_BASE_MINICACHE;
861 map.length = SZ_1M;
862 map.type = MT_MINICLEAN;
863 create_mapping(&map);
864#endif
865
866 /*
867 * Create a mapping for the machine vectors at the high-vectors
868 * location (0xffff0000). If we aren't using high-vectors, also
869 * create a mapping at the low-vectors virtual address.
870 */
871 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
872 map.virtual = 0xffff0000;
873 map.length = PAGE_SIZE;
874 map.type = MT_HIGH_VECTORS;
875 create_mapping(&map);
876
877 if (!vectors_high()) {
878 map.virtual = 0;
879 map.type = MT_LOW_VECTORS;
880 create_mapping(&map);
881 }
882
883 /*
884 * Ask the machine support to map in the statically mapped devices.
885 */
886 if (mdesc->map_io)
887 mdesc->map_io();
888
889 /*
890 * Finally flush the caches and tlb to ensure that we're in a
891 * consistent state wrt the writebuffer. This also ensures that
892 * any write-allocated cache lines in the vector page are written
893 * back. After this point, we can start to touch devices again.
894 */
895 local_flush_tlb_all();
896 flush_cache_all();
897}
898
d73cd428
NP
899static void __init kmap_init(void)
900{
901#ifdef CONFIG_HIGHMEM
902 pmd_t *pmd = pmd_off_k(PKMAP_BASE);
903 pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
904 BUG_ON(!pmd_none(*pmd) || !pte);
905 __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
906 pkmap_page_table = pte + PTRS_PER_PTE;
907#endif
908}
909
d111e8f9
RK
910/*
911 * paging_init() sets up the page tables, initialises the zone memory
912 * maps, and sets up the zero page, bad page and bad page tables.
913 */
4b5f32ce 914void __init paging_init(struct machine_desc *mdesc)
d111e8f9
RK
915{
916 void *zero_page;
917
918 build_mem_type_table();
4b5f32ce
NP
919 sanity_check_meminfo();
920 prepare_page_table();
921 bootmem_init();
d111e8f9 922 devicemaps_init(mdesc);
d73cd428 923 kmap_init();
d111e8f9
RK
924
925 top_pmd = pmd_off_k(0xffff0000);
926
927 /*
6ce1b871
JL
928 * allocate the zero page. Note that this always succeeds and
929 * returns a zeroed result.
d111e8f9
RK
930 */
931 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
d111e8f9
RK
932 empty_zero_page = virt_to_page(zero_page);
933 flush_dcache_page(empty_zero_page);
934}
ae8f1541
RK
935
936/*
937 * In order to soft-boot, we need to insert a 1:1 mapping in place of
938 * the user-mode pages. This will then ensure that we have predictable
939 * results when turning the mmu off
940 */
941void setup_mm_for_reboot(char mode)
942{
943 unsigned long base_pmdval;
944 pgd_t *pgd;
945 int i;
946
947 if (current->mm && current->mm->pgd)
948 pgd = current->mm->pgd;
949 else
950 pgd = init_mm.pgd;
951
952 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
953 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
954 base_pmdval |= PMD_BIT4;
955
956 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
957 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
958 pmd_t *pmd;
959
960 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
961 pmd[0] = __pmd(pmdval);
962 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
963 flush_pmd_entry(pmd);
964 }
965}
This page took 0.744121 seconds and 5 git commands to generate.