Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/mm-armv.c | |
3 | * | |
90072059 | 4 | * Copyright (C) 1998-2005 Russell King |
1da177e4 LT |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Page table sludge for ARM v3 and v4 processor architectures. | |
11 | */ | |
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/mm.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/bootmem.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/nodemask.h> | |
18 | ||
19 | #include <asm/pgalloc.h> | |
20 | #include <asm/page.h> | |
1da177e4 LT |
21 | #include <asm/setup.h> |
22 | #include <asm/tlbflush.h> | |
23 | ||
24 | #include <asm/mach/map.h> | |
25 | ||
26 | #define CPOLICY_UNCACHED 0 | |
27 | #define CPOLICY_BUFFERED 1 | |
28 | #define CPOLICY_WRITETHROUGH 2 | |
29 | #define CPOLICY_WRITEBACK 3 | |
30 | #define CPOLICY_WRITEALLOC 4 | |
31 | ||
32 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | |
33 | static unsigned int ecc_mask __initdata = 0; | |
34 | pgprot_t pgprot_kernel; | |
35 | ||
36 | EXPORT_SYMBOL(pgprot_kernel); | |
37 | ||
c4e1f6f6 RK |
38 | pmd_t *top_pmd; |
39 | ||
1da177e4 LT |
40 | struct cachepolicy { |
41 | const char policy[16]; | |
42 | unsigned int cr_mask; | |
43 | unsigned int pmd; | |
44 | unsigned int pte; | |
45 | }; | |
46 | ||
47 | static struct cachepolicy cache_policies[] __initdata = { | |
48 | { | |
49 | .policy = "uncached", | |
50 | .cr_mask = CR_W|CR_C, | |
51 | .pmd = PMD_SECT_UNCACHED, | |
52 | .pte = 0, | |
53 | }, { | |
54 | .policy = "buffered", | |
55 | .cr_mask = CR_C, | |
56 | .pmd = PMD_SECT_BUFFERED, | |
57 | .pte = PTE_BUFFERABLE, | |
58 | }, { | |
59 | .policy = "writethrough", | |
60 | .cr_mask = 0, | |
61 | .pmd = PMD_SECT_WT, | |
62 | .pte = PTE_CACHEABLE, | |
63 | }, { | |
64 | .policy = "writeback", | |
65 | .cr_mask = 0, | |
66 | .pmd = PMD_SECT_WB, | |
67 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | |
68 | }, { | |
69 | .policy = "writealloc", | |
70 | .cr_mask = 0, | |
71 | .pmd = PMD_SECT_WBWA, | |
72 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | |
73 | } | |
74 | }; | |
75 | ||
76 | /* | |
77 | * These are useful for identifing cache coherency | |
78 | * problems by allowing the cache or the cache and | |
79 | * writebuffer to be turned off. (Note: the write | |
80 | * buffer should not be on and the cache off). | |
81 | */ | |
82 | static void __init early_cachepolicy(char **p) | |
83 | { | |
84 | int i; | |
85 | ||
86 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | |
87 | int len = strlen(cache_policies[i].policy); | |
88 | ||
89 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | |
90 | cachepolicy = i; | |
91 | cr_alignment &= ~cache_policies[i].cr_mask; | |
92 | cr_no_alignment &= ~cache_policies[i].cr_mask; | |
93 | *p += len; | |
94 | break; | |
95 | } | |
96 | } | |
97 | if (i == ARRAY_SIZE(cache_policies)) | |
98 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | |
99 | flush_cache_all(); | |
100 | set_cr(cr_alignment); | |
101 | } | |
102 | ||
103 | static void __init early_nocache(char **__unused) | |
104 | { | |
105 | char *p = "buffered"; | |
106 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | |
107 | early_cachepolicy(&p); | |
108 | } | |
109 | ||
110 | static void __init early_nowrite(char **__unused) | |
111 | { | |
112 | char *p = "uncached"; | |
113 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | |
114 | early_cachepolicy(&p); | |
115 | } | |
116 | ||
117 | static void __init early_ecc(char **p) | |
118 | { | |
119 | if (memcmp(*p, "on", 2) == 0) { | |
120 | ecc_mask = PMD_PROTECTION; | |
121 | *p += 2; | |
122 | } else if (memcmp(*p, "off", 3) == 0) { | |
123 | ecc_mask = 0; | |
124 | *p += 3; | |
125 | } | |
126 | } | |
127 | ||
128 | __early_param("nocache", early_nocache); | |
129 | __early_param("nowb", early_nowrite); | |
130 | __early_param("cachepolicy=", early_cachepolicy); | |
131 | __early_param("ecc=", early_ecc); | |
132 | ||
133 | static int __init noalign_setup(char *__unused) | |
134 | { | |
135 | cr_alignment &= ~CR_A; | |
136 | cr_no_alignment &= ~CR_A; | |
137 | set_cr(cr_alignment); | |
138 | return 1; | |
139 | } | |
140 | ||
141 | __setup("noalign", noalign_setup); | |
142 | ||
143 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | |
144 | ||
155bb144 RK |
145 | static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) |
146 | { | |
147 | return pmd_offset(pgd, virt); | |
148 | } | |
149 | ||
150 | static inline pmd_t *pmd_off_k(unsigned long virt) | |
151 | { | |
152 | return pmd_off(pgd_offset_k(virt), virt); | |
153 | } | |
154 | ||
1da177e4 LT |
155 | /* |
156 | * need to get a 16k page for level 1 | |
157 | */ | |
158 | pgd_t *get_pgd_slow(struct mm_struct *mm) | |
159 | { | |
160 | pgd_t *new_pgd, *init_pgd; | |
161 | pmd_t *new_pmd, *init_pmd; | |
162 | pte_t *new_pte, *init_pte; | |
163 | ||
164 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); | |
165 | if (!new_pgd) | |
166 | goto no_pgd; | |
167 | ||
168 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | |
169 | ||
a343e607 RK |
170 | /* |
171 | * Copy over the kernel and IO PGD entries | |
172 | */ | |
1da177e4 | 173 | init_pgd = pgd_offset_k(0); |
a343e607 RK |
174 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, |
175 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | |
176 | ||
177 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | |
1da177e4 LT |
178 | |
179 | if (!vectors_high()) { | |
1da177e4 LT |
180 | /* |
181 | * On ARM, first page must always be allocated since it | |
182 | * contains the machine vectors. | |
183 | */ | |
184 | new_pmd = pmd_alloc(mm, new_pgd, 0); | |
185 | if (!new_pmd) | |
186 | goto no_pmd; | |
187 | ||
188 | new_pte = pte_alloc_map(mm, new_pmd, 0); | |
189 | if (!new_pte) | |
190 | goto no_pte; | |
191 | ||
192 | init_pmd = pmd_offset(init_pgd, 0); | |
193 | init_pte = pte_offset_map_nested(init_pmd, 0); | |
194 | set_pte(new_pte, *init_pte); | |
195 | pte_unmap_nested(init_pte); | |
196 | pte_unmap(new_pte); | |
1da177e4 LT |
197 | } |
198 | ||
1da177e4 LT |
199 | return new_pgd; |
200 | ||
201 | no_pte: | |
1da177e4 | 202 | pmd_free(new_pmd); |
1da177e4 | 203 | no_pmd: |
1da177e4 | 204 | free_pages((unsigned long)new_pgd, 2); |
1da177e4 LT |
205 | no_pgd: |
206 | return NULL; | |
207 | } | |
208 | ||
209 | void free_pgd_slow(pgd_t *pgd) | |
210 | { | |
211 | pmd_t *pmd; | |
212 | struct page *pte; | |
213 | ||
214 | if (!pgd) | |
215 | return; | |
216 | ||
217 | /* pgd is always present and good */ | |
155bb144 | 218 | pmd = pmd_off(pgd, 0); |
1da177e4 LT |
219 | if (pmd_none(*pmd)) |
220 | goto free; | |
221 | if (pmd_bad(*pmd)) { | |
222 | pmd_ERROR(*pmd); | |
223 | pmd_clear(pmd); | |
224 | goto free; | |
225 | } | |
226 | ||
227 | pte = pmd_page(*pmd); | |
228 | pmd_clear(pmd); | |
df849a15 | 229 | dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); |
4c21e2f2 | 230 | pte_lock_deinit(pte); |
1da177e4 LT |
231 | pte_free(pte); |
232 | pmd_free(pmd); | |
233 | free: | |
234 | free_pages((unsigned long) pgd, 2); | |
235 | } | |
236 | ||
237 | /* | |
238 | * Create a SECTION PGD between VIRT and PHYS in domain | |
239 | * DOMAIN with protection PROT. This operates on half- | |
240 | * pgdir entry increments. | |
241 | */ | |
242 | static inline void | |
243 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) | |
244 | { | |
155bb144 | 245 | pmd_t *pmdp = pmd_off_k(virt); |
1da177e4 | 246 | |
1da177e4 LT |
247 | if (virt & (1 << 20)) |
248 | pmdp++; | |
249 | ||
250 | *pmdp = __pmd(phys | prot); | |
251 | flush_pmd_entry(pmdp); | |
252 | } | |
253 | ||
254 | /* | |
255 | * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT | |
256 | */ | |
257 | static inline void | |
258 | alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) | |
259 | { | |
260 | int i; | |
261 | ||
262 | for (i = 0; i < 16; i += 1) { | |
083bc6b3 | 263 | alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); |
1da177e4 LT |
264 | |
265 | virt += (PGDIR_SIZE / 2); | |
1da177e4 LT |
266 | } |
267 | } | |
268 | ||
269 | /* | |
270 | * Add a PAGE mapping between VIRT and PHYS in domain | |
271 | * DOMAIN with protection PROT. Note that due to the | |
272 | * way we map the PTEs, we must allocate two PTE_SIZE'd | |
273 | * blocks - one for the Linux pte table, and one for | |
274 | * the hardware pte table. | |
275 | */ | |
276 | static inline void | |
277 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) | |
278 | { | |
155bb144 | 279 | pmd_t *pmdp = pmd_off_k(virt); |
1da177e4 LT |
280 | pte_t *ptep; |
281 | ||
1da177e4 | 282 | if (pmd_none(*pmdp)) { |
1da177e4 LT |
283 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * |
284 | sizeof(pte_t)); | |
285 | ||
08f4ffb3 | 286 | __pmd_populate(pmdp, __pa(ptep) | prot_l1); |
1da177e4 LT |
287 | } |
288 | ptep = pte_offset_kernel(pmdp, virt); | |
289 | ||
290 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); | |
291 | } | |
292 | ||
1da177e4 LT |
293 | struct mem_types { |
294 | unsigned int prot_pte; | |
295 | unsigned int prot_l1; | |
296 | unsigned int prot_sect; | |
297 | unsigned int domain; | |
298 | }; | |
299 | ||
300 | static struct mem_types mem_types[] __initdata = { | |
301 | [MT_DEVICE] = { | |
302 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
303 | L_PTE_WRITE, | |
304 | .prot_l1 = PMD_TYPE_TABLE, | |
8799ee9f | 305 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | |
1da177e4 LT |
306 | PMD_SECT_AP_WRITE, |
307 | .domain = DOMAIN_IO, | |
308 | }, | |
309 | [MT_CACHECLEAN] = { | |
8799ee9f | 310 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4, |
1da177e4 LT |
311 | .domain = DOMAIN_KERNEL, |
312 | }, | |
313 | [MT_MINICLEAN] = { | |
8799ee9f | 314 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, |
1da177e4 LT |
315 | .domain = DOMAIN_KERNEL, |
316 | }, | |
317 | [MT_LOW_VECTORS] = { | |
318 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
319 | L_PTE_EXEC, | |
320 | .prot_l1 = PMD_TYPE_TABLE, | |
321 | .domain = DOMAIN_USER, | |
322 | }, | |
323 | [MT_HIGH_VECTORS] = { | |
324 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
325 | L_PTE_USER | L_PTE_EXEC, | |
326 | .prot_l1 = PMD_TYPE_TABLE, | |
327 | .domain = DOMAIN_USER, | |
328 | }, | |
329 | [MT_MEMORY] = { | |
8799ee9f | 330 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, |
1da177e4 LT |
331 | .domain = DOMAIN_KERNEL, |
332 | }, | |
333 | [MT_ROM] = { | |
8799ee9f | 334 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4, |
1da177e4 LT |
335 | .domain = DOMAIN_KERNEL, |
336 | }, | |
337 | [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | |
338 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
339 | L_PTE_WRITE, | |
340 | .prot_l1 = PMD_TYPE_TABLE, | |
8799ee9f | 341 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | |
1da177e4 LT |
342 | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | |
343 | PMD_SECT_TEX(1), | |
344 | .domain = DOMAIN_IO, | |
7efb8300 GD |
345 | }, |
346 | [MT_NONSHARED_DEVICE] = { | |
347 | .prot_l1 = PMD_TYPE_TABLE, | |
8799ee9f | 348 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV | |
7efb8300 GD |
349 | PMD_SECT_AP_WRITE, |
350 | .domain = DOMAIN_IO, | |
1da177e4 LT |
351 | } |
352 | }; | |
353 | ||
354 | /* | |
355 | * Adjust the PMD section entries according to the CPU in use. | |
356 | */ | |
90072059 | 357 | void __init build_mem_type_table(void) |
1da177e4 LT |
358 | { |
359 | struct cachepolicy *cp; | |
360 | unsigned int cr = get_cr(); | |
cd03adb0 | 361 | unsigned int user_pgprot, kern_pgprot; |
1da177e4 LT |
362 | int cpu_arch = cpu_architecture(); |
363 | int i; | |
364 | ||
365 | #if defined(CONFIG_CPU_DCACHE_DISABLE) | |
366 | if (cachepolicy > CPOLICY_BUFFERED) | |
367 | cachepolicy = CPOLICY_BUFFERED; | |
368 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | |
369 | if (cachepolicy > CPOLICY_WRITETHROUGH) | |
370 | cachepolicy = CPOLICY_WRITETHROUGH; | |
371 | #endif | |
372 | if (cpu_arch < CPU_ARCH_ARMv5) { | |
373 | if (cachepolicy >= CPOLICY_WRITEALLOC) | |
374 | cachepolicy = CPOLICY_WRITEBACK; | |
375 | ecc_mask = 0; | |
376 | } | |
377 | ||
8799ee9f RK |
378 | /* |
379 | * Xscale must not have PMD bit 4 set for section mappings. | |
380 | */ | |
381 | if (cpu_is_xscale()) | |
382 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) | |
383 | mem_types[i].prot_sect &= ~PMD_BIT4; | |
384 | ||
385 | /* | |
386 | * ARMv5 and lower, excluding Xscale, bit 4 must be set for | |
387 | * page tables. | |
388 | */ | |
389 | if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale()) | |
390 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) | |
1da177e4 LT |
391 | if (mem_types[i].prot_l1) |
392 | mem_types[i].prot_l1 |= PMD_BIT4; | |
1da177e4 | 393 | |
6626a707 | 394 | cp = &cache_policies[cachepolicy]; |
cd03adb0 | 395 | kern_pgprot = user_pgprot = cp->pte; |
6626a707 | 396 | |
23759dc6 LB |
397 | /* |
398 | * Enable CPU-specific coherency if supported. | |
399 | * (Only available on XSC3 at the moment.) | |
400 | */ | |
401 | if (arch_is_coherent()) { | |
402 | if (cpu_is_xsc3()) { | |
403 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | |
404 | mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT; | |
405 | } | |
406 | } | |
407 | ||
1da177e4 LT |
408 | /* |
409 | * ARMv6 and above have extended page tables. | |
410 | */ | |
411 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | |
412 | /* | |
413 | * bit 4 becomes XN which we must clear for the | |
414 | * kernel memory mapping. | |
415 | */ | |
8799ee9f RK |
416 | mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN; |
417 | mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN; | |
cd03adb0 | 418 | |
1da177e4 | 419 | /* |
ca315159 GD |
420 | * Mark cache clean areas and XIP ROM read only |
421 | * from SVC mode and no access from userspace. | |
1da177e4 | 422 | */ |
ca315159 | 423 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
1da177e4 LT |
424 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
425 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
186efd52 | 426 | |
6626a707 RK |
427 | /* |
428 | * Mark the device area as "shared device" | |
429 | */ | |
186efd52 RK |
430 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; |
431 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | |
1da177e4 | 432 | |
6626a707 RK |
433 | /* |
434 | * User pages need to be mapped with the ASID | |
435 | * (iow, non-global) | |
436 | */ | |
437 | user_pgprot |= L_PTE_ASID; | |
cd03adb0 RK |
438 | |
439 | #ifdef CONFIG_SMP | |
440 | /* | |
441 | * Mark memory with the "shared" attribute for SMP systems | |
442 | */ | |
443 | user_pgprot |= L_PTE_SHARED; | |
444 | kern_pgprot |= L_PTE_SHARED; | |
445 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | |
446 | #endif | |
6626a707 | 447 | } |
1da177e4 | 448 | |
cd03adb0 RK |
449 | for (i = 0; i < 16; i++) { |
450 | unsigned long v = pgprot_val(protection_map[i]); | |
451 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | |
452 | protection_map[i] = __pgprot(v); | |
453 | } | |
454 | ||
455 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | |
456 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | |
457 | ||
1da177e4 | 458 | if (cpu_arch >= CPU_ARCH_ARMv5) { |
cd03adb0 RK |
459 | #ifndef CONFIG_SMP |
460 | /* | |
461 | * Only use write-through for non-SMP systems | |
462 | */ | |
463 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | |
464 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | |
465 | #endif | |
1da177e4 | 466 | } else { |
1da177e4 LT |
467 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); |
468 | } | |
469 | ||
cd03adb0 RK |
470 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
471 | L_PTE_DIRTY | L_PTE_WRITE | | |
472 | L_PTE_EXEC | kern_pgprot); | |
473 | ||
1da177e4 LT |
474 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
475 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | |
476 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | |
477 | mem_types[MT_ROM].prot_sect |= cp->pmd; | |
478 | ||
1da177e4 LT |
479 | switch (cp->pmd) { |
480 | case PMD_SECT_WT: | |
481 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | |
482 | break; | |
483 | case PMD_SECT_WB: | |
484 | case PMD_SECT_WBWA: | |
485 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | |
486 | break; | |
487 | } | |
488 | printk("Memory policy: ECC %sabled, Data cache %s\n", | |
489 | ecc_mask ? "en" : "dis", cp->policy); | |
490 | } | |
491 | ||
492 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | |
493 | ||
494 | /* | |
495 | * Create the page directory entries and any necessary | |
496 | * page tables for the mapping specified by `md'. We | |
497 | * are able to cope here with varying sizes and address | |
498 | * offsets, and we take full advantage of sections and | |
499 | * supersections. | |
500 | */ | |
90072059 | 501 | void __init create_mapping(struct map_desc *md) |
1da177e4 LT |
502 | { |
503 | unsigned long virt, length; | |
504 | int prot_sect, prot_l1, domain; | |
505 | pgprot_t prot_pte; | |
0b7cd62e | 506 | unsigned long off = (u32)__pfn_to_phys(md->pfn); |
1da177e4 LT |
507 | |
508 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | |
509 | printk(KERN_WARNING "BUG: not creating mapping for " | |
24bcc2f4 | 510 | "0x%08llx at 0x%08lx in user region\n", |
0b7cd62e | 511 | __pfn_to_phys((u64)md->pfn), md->virtual); |
1da177e4 LT |
512 | return; |
513 | } | |
514 | ||
515 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | |
516 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | |
24bcc2f4 | 517 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " |
1da177e4 | 518 | "overlaps vmalloc space\n", |
0b7cd62e | 519 | __pfn_to_phys((u64)md->pfn), md->virtual); |
1da177e4 LT |
520 | } |
521 | ||
522 | domain = mem_types[md->type].domain; | |
523 | prot_pte = __pgprot(mem_types[md->type].prot_pte); | |
524 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | |
525 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | |
526 | ||
0b7cd62e DS |
527 | /* |
528 | * Catch 36-bit addresses | |
529 | */ | |
530 | if(md->pfn >= 0x100000) { | |
531 | if(domain) { | |
532 | printk(KERN_ERR "MM: invalid domain in supersection " | |
24bcc2f4 | 533 | "mapping for 0x%08llx at 0x%08lx\n", |
0b7cd62e DS |
534 | __pfn_to_phys((u64)md->pfn), md->virtual); |
535 | return; | |
536 | } | |
537 | if((md->virtual | md->length | __pfn_to_phys(md->pfn)) | |
538 | & ~SUPERSECTION_MASK) { | |
539 | printk(KERN_ERR "MM: cannot create mapping for " | |
24bcc2f4 | 540 | "0x%08llx at 0x%08lx invalid alignment\n", |
0b7cd62e DS |
541 | __pfn_to_phys((u64)md->pfn), md->virtual); |
542 | return; | |
543 | } | |
544 | ||
545 | /* | |
546 | * Shift bits [35:32] of address into bits [23:20] of PMD | |
547 | * (See ARMv6 spec). | |
548 | */ | |
549 | off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | |
550 | } | |
551 | ||
1da177e4 | 552 | virt = md->virtual; |
0b7cd62e | 553 | off -= virt; |
1da177e4 LT |
554 | length = md->length; |
555 | ||
556 | if (mem_types[md->type].prot_l1 == 0 && | |
557 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | |
558 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | |
559 | "be mapped using pages, ignoring.\n", | |
9769c246 | 560 | __pfn_to_phys(md->pfn), md->virtual); |
1da177e4 LT |
561 | return; |
562 | } | |
563 | ||
564 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | |
565 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | |
566 | ||
567 | virt += PAGE_SIZE; | |
568 | length -= PAGE_SIZE; | |
569 | } | |
570 | ||
571 | /* N.B. ARMv6 supersections are only defined to work with domain 0. | |
572 | * Since domain assignments can in fact be arbitrary, the | |
573 | * 'domain == 0' check below is required to insure that ARMv6 | |
574 | * supersections are only allocated for domain 0 regardless | |
575 | * of the actual domain assignments in use. | |
576 | */ | |
23bdf86a LB |
577 | if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3()) |
578 | && domain == 0) { | |
0b7cd62e DS |
579 | /* |
580 | * Align to supersection boundary if !high pages. | |
581 | * High pages have already been checked for proper | |
582 | * alignment above and they will fail the SUPSERSECTION_MASK | |
583 | * check because of the way the address is encoded into | |
584 | * offset. | |
585 | */ | |
586 | if (md->pfn <= 0x100000) { | |
587 | while ((virt & ~SUPERSECTION_MASK || | |
588 | (virt + off) & ~SUPERSECTION_MASK) && | |
589 | length >= (PGDIR_SIZE / 2)) { | |
590 | alloc_init_section(virt, virt + off, prot_sect); | |
591 | ||
592 | virt += (PGDIR_SIZE / 2); | |
593 | length -= (PGDIR_SIZE / 2); | |
594 | } | |
1da177e4 LT |
595 | } |
596 | ||
597 | while (length >= SUPERSECTION_SIZE) { | |
598 | alloc_init_supersection(virt, virt + off, prot_sect); | |
599 | ||
600 | virt += SUPERSECTION_SIZE; | |
601 | length -= SUPERSECTION_SIZE; | |
602 | } | |
603 | } | |
604 | ||
605 | /* | |
606 | * A section mapping covers half a "pgdir" entry. | |
607 | */ | |
608 | while (length >= (PGDIR_SIZE / 2)) { | |
609 | alloc_init_section(virt, virt + off, prot_sect); | |
610 | ||
611 | virt += (PGDIR_SIZE / 2); | |
612 | length -= (PGDIR_SIZE / 2); | |
613 | } | |
614 | ||
615 | while (length >= PAGE_SIZE) { | |
616 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | |
617 | ||
618 | virt += PAGE_SIZE; | |
619 | length -= PAGE_SIZE; | |
620 | } | |
621 | } | |
622 | ||
623 | /* | |
624 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | |
625 | * the user-mode pages. This will then ensure that we have predictable | |
626 | * results when turning the mmu off | |
627 | */ | |
628 | void setup_mm_for_reboot(char mode) | |
629 | { | |
103461a8 | 630 | unsigned long base_pmdval; |
1da177e4 | 631 | pgd_t *pgd; |
1da177e4 | 632 | int i; |
1da177e4 LT |
633 | |
634 | if (current->mm && current->mm->pgd) | |
635 | pgd = current->mm->pgd; | |
636 | else | |
637 | pgd = init_mm.pgd; | |
638 | ||
103461a8 | 639 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
5cedae9c | 640 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
103461a8 RK |
641 | base_pmdval |= PMD_BIT4; |
642 | ||
643 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | |
644 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | |
645 | pmd_t *pmd; | |
646 | ||
155bb144 | 647 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); |
1da177e4 LT |
648 | pmd[0] = __pmd(pmdval); |
649 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | |
650 | flush_pmd_entry(pmd); | |
651 | } | |
652 | } | |
653 | ||
1da177e4 LT |
654 | /* |
655 | * Create the architecture specific mappings | |
656 | */ | |
657 | void __init iotable_init(struct map_desc *io_desc, int nr) | |
658 | { | |
659 | int i; | |
660 | ||
661 | for (i = 0; i < nr; i++) | |
662 | create_mapping(io_desc + i); | |
663 | } |