Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * | |
6 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/module.h> |
10 | #include <linux/signal.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/mman.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/hugetlb.h> | |
20 | #include <linux/swap.h> | |
21 | #include <linux/smp.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/pagemap.h> | |
c9cf5528 | 25 | #include <linux/poison.h> |
1da177e4 LT |
26 | #include <linux/bootmem.h> |
27 | #include <linux/slab.h> | |
28 | #include <linux/proc_fs.h> | |
29 | #include <linux/efi.h> | |
05039b92 | 30 | #include <linux/memory_hotplug.h> |
27d99f7e | 31 | #include <linux/initrd.h> |
55b2355e | 32 | #include <linux/cpumask.h> |
1da177e4 LT |
33 | |
34 | #include <asm/processor.h> | |
35 | #include <asm/system.h> | |
36 | #include <asm/uaccess.h> | |
37 | #include <asm/pgtable.h> | |
38 | #include <asm/dma.h> | |
39 | #include <asm/fixmap.h> | |
40 | #include <asm/e820.h> | |
41 | #include <asm/apic.h> | |
42 | #include <asm/tlb.h> | |
43 | #include <asm/tlbflush.h> | |
44 | #include <asm/sections.h> | |
45 | ||
46 | unsigned int __VMALLOC_RESERVE = 128 << 20; | |
47 | ||
48 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
49 | unsigned long highstart_pfn, highend_pfn; | |
50 | ||
51 | static int noinline do_test_wp_bit(void); | |
52 | ||
53 | /* | |
54 | * Creates a middle page table and puts a pointer to it in the | |
55 | * given global directory entry. This only returns the gd entry | |
56 | * in non-PAE compilation mode, since the middle layer is folded. | |
57 | */ | |
58 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | |
59 | { | |
60 | pud_t *pud; | |
61 | pmd_t *pmd_table; | |
62 | ||
63 | #ifdef CONFIG_X86_PAE | |
64 | pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
65 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | |
66 | pud = pud_offset(pgd, 0); | |
67 | if (pmd_table != pmd_offset(pud, 0)) | |
68 | BUG(); | |
69 | #else | |
70 | pud = pud_offset(pgd, 0); | |
71 | pmd_table = pmd_offset(pud, 0); | |
72 | #endif | |
73 | ||
74 | return pmd_table; | |
75 | } | |
76 | ||
77 | /* | |
78 | * Create a page table and place a pointer to it in a middle page | |
79 | * directory entry. | |
80 | */ | |
81 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
82 | { | |
83 | if (pmd_none(*pmd)) { | |
84 | pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
85 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | |
86 | if (page_table != pte_offset_kernel(pmd, 0)) | |
87 | BUG(); | |
88 | ||
89 | return page_table; | |
90 | } | |
91 | ||
92 | return pte_offset_kernel(pmd, 0); | |
93 | } | |
94 | ||
95 | /* | |
96 | * This function initializes a certain range of kernel virtual memory | |
97 | * with new bootmem page tables, everywhere page tables are missing in | |
98 | * the given range. | |
99 | */ | |
100 | ||
101 | /* | |
102 | * NOTE: The pagetables are allocated contiguous on the physical space | |
103 | * so we can cache the place of the first one and move around without | |
104 | * checking the pgd every time. | |
105 | */ | |
106 | static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) | |
107 | { | |
108 | pgd_t *pgd; | |
109 | pud_t *pud; | |
110 | pmd_t *pmd; | |
111 | int pgd_idx, pmd_idx; | |
112 | unsigned long vaddr; | |
113 | ||
114 | vaddr = start; | |
115 | pgd_idx = pgd_index(vaddr); | |
116 | pmd_idx = pmd_index(vaddr); | |
117 | pgd = pgd_base + pgd_idx; | |
118 | ||
119 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | |
120 | if (pgd_none(*pgd)) | |
121 | one_md_table_init(pgd); | |
122 | pud = pud_offset(pgd, vaddr); | |
123 | pmd = pmd_offset(pud, vaddr); | |
124 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { | |
125 | if (pmd_none(*pmd)) | |
126 | one_page_table_init(pmd); | |
127 | ||
128 | vaddr += PMD_SIZE; | |
129 | } | |
130 | pmd_idx = 0; | |
131 | } | |
132 | } | |
133 | ||
134 | static inline int is_kernel_text(unsigned long addr) | |
135 | { | |
136 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | |
137 | return 1; | |
138 | return 0; | |
139 | } | |
140 | ||
141 | /* | |
142 | * This maps the physical memory to kernel virtual address space, a total | |
143 | * of max_low_pfn pages, by creating page tables starting from address | |
144 | * PAGE_OFFSET. | |
145 | */ | |
146 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |
147 | { | |
148 | unsigned long pfn; | |
149 | pgd_t *pgd; | |
150 | pmd_t *pmd; | |
151 | pte_t *pte; | |
152 | int pgd_idx, pmd_idx, pte_ofs; | |
153 | ||
154 | pgd_idx = pgd_index(PAGE_OFFSET); | |
155 | pgd = pgd_base + pgd_idx; | |
156 | pfn = 0; | |
157 | ||
158 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | |
159 | pmd = one_md_table_init(pgd); | |
160 | if (pfn >= max_low_pfn) | |
161 | continue; | |
162 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { | |
163 | unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; | |
164 | ||
165 | /* Map with big pages if possible, otherwise create normal page tables. */ | |
166 | if (cpu_has_pse) { | |
167 | unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; | |
168 | ||
169 | if (is_kernel_text(address) || is_kernel_text(address2)) | |
170 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); | |
171 | else | |
172 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); | |
173 | pfn += PTRS_PER_PTE; | |
174 | } else { | |
175 | pte = one_page_table_init(pmd); | |
176 | ||
177 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) { | |
178 | if (is_kernel_text(address)) | |
179 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | |
180 | else | |
181 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); | |
182 | } | |
183 | } | |
184 | } | |
185 | } | |
186 | } | |
187 | ||
188 | static inline int page_kills_ppro(unsigned long pagenr) | |
189 | { | |
190 | if (pagenr >= 0x70000 && pagenr <= 0x7003F) | |
191 | return 1; | |
192 | return 0; | |
193 | } | |
194 | ||
195 | extern int is_available_memory(efi_memory_desc_t *); | |
196 | ||
5b505b90 | 197 | int page_is_ram(unsigned long pagenr) |
1da177e4 LT |
198 | { |
199 | int i; | |
200 | unsigned long addr, end; | |
201 | ||
202 | if (efi_enabled) { | |
203 | efi_memory_desc_t *md; | |
7ae65fd3 | 204 | void *p; |
1da177e4 | 205 | |
7ae65fd3 MT |
206 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
207 | md = p; | |
1da177e4 LT |
208 | if (!is_available_memory(md)) |
209 | continue; | |
210 | addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT; | |
211 | end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT; | |
212 | ||
213 | if ((pagenr >= addr) && (pagenr < end)) | |
214 | return 1; | |
215 | } | |
216 | return 0; | |
217 | } | |
218 | ||
219 | for (i = 0; i < e820.nr_map; i++) { | |
220 | ||
221 | if (e820.map[i].type != E820_RAM) /* not usable memory */ | |
222 | continue; | |
223 | /* | |
224 | * !!!FIXME!!! Some BIOSen report areas as RAM that | |
225 | * are not. Notably the 640->1Mb area. We need a sanity | |
226 | * check here. | |
227 | */ | |
228 | addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT; | |
229 | end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT; | |
230 | if ((pagenr >= addr) && (pagenr < end)) | |
231 | return 1; | |
232 | } | |
233 | return 0; | |
234 | } | |
235 | ||
236 | #ifdef CONFIG_HIGHMEM | |
237 | pte_t *kmap_pte; | |
238 | pgprot_t kmap_prot; | |
239 | ||
240 | #define kmap_get_fixmap_pte(vaddr) \ | |
241 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) | |
242 | ||
243 | static void __init kmap_init(void) | |
244 | { | |
245 | unsigned long kmap_vstart; | |
246 | ||
247 | /* cache the first kmap pte */ | |
248 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | |
249 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
250 | ||
251 | kmap_prot = PAGE_KERNEL; | |
252 | } | |
253 | ||
254 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | |
255 | { | |
256 | pgd_t *pgd; | |
257 | pud_t *pud; | |
258 | pmd_t *pmd; | |
259 | pte_t *pte; | |
260 | unsigned long vaddr; | |
261 | ||
262 | vaddr = PKMAP_BASE; | |
263 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | |
264 | ||
265 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
266 | pud = pud_offset(pgd, vaddr); | |
267 | pmd = pmd_offset(pud, vaddr); | |
268 | pte = pte_offset_kernel(pmd, vaddr); | |
269 | pkmap_page_table = pte; | |
270 | } | |
271 | ||
c09b4240 | 272 | static void __meminit free_new_highpage(struct page *page) |
05039b92 | 273 | { |
7835e98b | 274 | init_page_count(page); |
05039b92 DH |
275 | __free_page(page); |
276 | totalhigh_pages++; | |
277 | } | |
278 | ||
279 | void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) | |
1da177e4 LT |
280 | { |
281 | if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { | |
282 | ClearPageReserved(page); | |
05039b92 | 283 | free_new_highpage(page); |
1da177e4 LT |
284 | } else |
285 | SetPageReserved(page); | |
286 | } | |
287 | ||
05039b92 DH |
288 | static int add_one_highpage_hotplug(struct page *page, unsigned long pfn) |
289 | { | |
290 | free_new_highpage(page); | |
291 | totalram_pages++; | |
292 | #ifdef CONFIG_FLATMEM | |
293 | max_mapnr = max(pfn, max_mapnr); | |
294 | #endif | |
295 | num_physpages++; | |
296 | return 0; | |
297 | } | |
298 | ||
299 | /* | |
300 | * Not currently handling the NUMA case. | |
301 | * Assuming single node and all memory that | |
302 | * has been added dynamically that would be | |
303 | * onlined here is in HIGHMEM | |
304 | */ | |
305 | void online_page(struct page *page) | |
306 | { | |
307 | ClearPageReserved(page); | |
308 | add_one_highpage_hotplug(page, page_to_pfn(page)); | |
309 | } | |
310 | ||
311 | ||
05b79bdc AW |
312 | #ifdef CONFIG_NUMA |
313 | extern void set_highmem_pages_init(int); | |
314 | #else | |
1da177e4 LT |
315 | static void __init set_highmem_pages_init(int bad_ppro) |
316 | { | |
317 | int pfn; | |
318 | for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) | |
05039b92 | 319 | add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); |
1da177e4 LT |
320 | totalram_pages += totalhigh_pages; |
321 | } | |
05b79bdc | 322 | #endif /* CONFIG_FLATMEM */ |
1da177e4 LT |
323 | |
324 | #else | |
325 | #define kmap_init() do { } while (0) | |
326 | #define permanent_kmaps_init(pgd_base) do { } while (0) | |
327 | #define set_highmem_pages_init(bad_ppro) do { } while (0) | |
328 | #endif /* CONFIG_HIGHMEM */ | |
329 | ||
330 | unsigned long long __PAGE_KERNEL = _PAGE_KERNEL; | |
129f6946 | 331 | EXPORT_SYMBOL(__PAGE_KERNEL); |
1da177e4 LT |
332 | unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; |
333 | ||
05b79bdc | 334 | #ifdef CONFIG_NUMA |
1da177e4 | 335 | extern void __init remap_numa_kva(void); |
05b79bdc AW |
336 | #else |
337 | #define remap_numa_kva() do {} while (0) | |
1da177e4 LT |
338 | #endif |
339 | ||
340 | static void __init pagetable_init (void) | |
341 | { | |
342 | unsigned long vaddr; | |
343 | pgd_t *pgd_base = swapper_pg_dir; | |
344 | ||
345 | #ifdef CONFIG_X86_PAE | |
346 | int i; | |
347 | /* Init entries of the first-level page table to the zero page */ | |
348 | for (i = 0; i < PTRS_PER_PGD; i++) | |
349 | set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | |
350 | #endif | |
351 | ||
352 | /* Enable PSE if available */ | |
353 | if (cpu_has_pse) { | |
354 | set_in_cr4(X86_CR4_PSE); | |
355 | } | |
356 | ||
357 | /* Enable PGE if available */ | |
358 | if (cpu_has_pge) { | |
359 | set_in_cr4(X86_CR4_PGE); | |
360 | __PAGE_KERNEL |= _PAGE_GLOBAL; | |
361 | __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; | |
362 | } | |
363 | ||
364 | kernel_physical_mapping_init(pgd_base); | |
365 | remap_numa_kva(); | |
366 | ||
367 | /* | |
368 | * Fixed mappings, only the page table structure has to be | |
369 | * created - mappings will be set by set_fixmap(): | |
370 | */ | |
371 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
372 | page_table_range_init(vaddr, 0, pgd_base); | |
373 | ||
374 | permanent_kmaps_init(pgd_base); | |
375 | ||
376 | #ifdef CONFIG_X86_PAE | |
377 | /* | |
378 | * Add low memory identity-mappings - SMP needs it when | |
379 | * starting up on an AP from real-mode. In the non-PAE | |
380 | * case we already have these mappings through head.S. | |
381 | * All user-space mappings are explicitly cleared after | |
382 | * SMP startup. | |
383 | */ | |
c9b02a24 | 384 | set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]); |
1da177e4 LT |
385 | #endif |
386 | } | |
387 | ||
55b2355e | 388 | #if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP) |
1da177e4 LT |
389 | /* |
390 | * Swap suspend & friends need this for resume because things like the intel-agp | |
391 | * driver might have split up a kernel 4MB mapping. | |
392 | */ | |
393 | char __nosavedata swsusp_pg_dir[PAGE_SIZE] | |
394 | __attribute__ ((aligned (PAGE_SIZE))); | |
395 | ||
396 | static inline void save_pg_dir(void) | |
397 | { | |
398 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | |
399 | } | |
400 | #else | |
401 | static inline void save_pg_dir(void) | |
402 | { | |
403 | } | |
404 | #endif | |
405 | ||
406 | void zap_low_mappings (void) | |
407 | { | |
408 | int i; | |
409 | ||
410 | save_pg_dir(); | |
411 | ||
412 | /* | |
413 | * Zap initial low-memory mappings. | |
414 | * | |
415 | * Note that "pgd_clear()" doesn't do it for | |
416 | * us, because pgd_clear() is a no-op on i386. | |
417 | */ | |
418 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | |
419 | #ifdef CONFIG_X86_PAE | |
420 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | |
421 | #else | |
422 | set_pgd(swapper_pg_dir+i, __pgd(0)); | |
423 | #endif | |
424 | flush_tlb_all(); | |
425 | } | |
426 | ||
427 | static int disable_nx __initdata = 0; | |
6c231b7b | 428 | u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; |
1da177e4 LT |
429 | |
430 | /* | |
431 | * noexec = on|off | |
432 | * | |
433 | * Control non executable mappings. | |
434 | * | |
435 | * on Enable | |
436 | * off Disable | |
437 | */ | |
1a3f239d | 438 | static int __init noexec_setup(char *str) |
1da177e4 | 439 | { |
1a3f239d RR |
440 | if (!str || !strcmp(str, "on")) { |
441 | if (cpu_has_nx) { | |
442 | __supported_pte_mask |= _PAGE_NX; | |
443 | disable_nx = 0; | |
444 | } | |
445 | } else if (!strcmp(str,"off")) { | |
1da177e4 LT |
446 | disable_nx = 1; |
447 | __supported_pte_mask &= ~_PAGE_NX; | |
1a3f239d RR |
448 | } else |
449 | return -EINVAL; | |
450 | ||
451 | return 0; | |
1da177e4 | 452 | } |
1a3f239d | 453 | early_param("noexec", noexec_setup); |
1da177e4 LT |
454 | |
455 | int nx_enabled = 0; | |
456 | #ifdef CONFIG_X86_PAE | |
457 | ||
458 | static void __init set_nx(void) | |
459 | { | |
460 | unsigned int v[4], l, h; | |
461 | ||
462 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | |
463 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | |
464 | if ((v[3] & (1 << 20)) && !disable_nx) { | |
465 | rdmsr(MSR_EFER, l, h); | |
466 | l |= EFER_NX; | |
467 | wrmsr(MSR_EFER, l, h); | |
468 | nx_enabled = 1; | |
469 | __supported_pte_mask |= _PAGE_NX; | |
470 | } | |
471 | } | |
472 | } | |
473 | ||
474 | /* | |
475 | * Enables/disables executability of a given kernel page and | |
476 | * returns the previous setting. | |
477 | */ | |
478 | int __init set_kernel_exec(unsigned long vaddr, int enable) | |
479 | { | |
480 | pte_t *pte; | |
481 | int ret = 1; | |
482 | ||
483 | if (!nx_enabled) | |
484 | goto out; | |
485 | ||
486 | pte = lookup_address(vaddr); | |
487 | BUG_ON(!pte); | |
488 | ||
489 | if (!pte_exec_kernel(*pte)) | |
490 | ret = 0; | |
491 | ||
492 | if (enable) | |
493 | pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | |
494 | else | |
495 | pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); | |
789e6ac0 | 496 | pte_update_defer(&init_mm, vaddr, pte); |
1da177e4 LT |
497 | __flush_tlb_all(); |
498 | out: | |
499 | return ret; | |
500 | } | |
501 | ||
502 | #endif | |
503 | ||
504 | /* | |
505 | * paging_init() sets up the page tables - note that the first 8MB are | |
506 | * already mapped by head.S. | |
507 | * | |
508 | * This routines also unmaps the page at virtual kernel address 0, so | |
509 | * that we can trap those pesky NULL-reference errors in the kernel. | |
510 | */ | |
511 | void __init paging_init(void) | |
512 | { | |
513 | #ifdef CONFIG_X86_PAE | |
514 | set_nx(); | |
515 | if (nx_enabled) | |
516 | printk("NX (Execute Disable) protection: active\n"); | |
517 | #endif | |
518 | ||
519 | pagetable_init(); | |
520 | ||
521 | load_cr3(swapper_pg_dir); | |
522 | ||
523 | #ifdef CONFIG_X86_PAE | |
524 | /* | |
525 | * We will bail out later - printk doesn't work right now so | |
526 | * the user would just see a hanging kernel. | |
527 | */ | |
528 | if (cpu_has_pae) | |
529 | set_in_cr4(X86_CR4_PAE); | |
530 | #endif | |
531 | __flush_tlb_all(); | |
532 | ||
533 | kmap_init(); | |
534 | } | |
535 | ||
536 | /* | |
537 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | |
538 | * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This | |
539 | * used to involve black magic jumps to work around some nasty CPU bugs, | |
540 | * but fortunately the switch to using exceptions got rid of all that. | |
541 | */ | |
542 | ||
543 | static void __init test_wp_bit(void) | |
544 | { | |
545 | printk("Checking if this processor honours the WP bit even in supervisor mode... "); | |
546 | ||
547 | /* Any page-aligned address will do, the test is non-destructive */ | |
548 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | |
549 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | |
550 | clear_fixmap(FIX_WP_TEST); | |
551 | ||
552 | if (!boot_cpu_data.wp_works_ok) { | |
553 | printk("No.\n"); | |
554 | #ifdef CONFIG_X86_WP_WORKS_OK | |
555 | panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | |
556 | #endif | |
557 | } else { | |
558 | printk("Ok.\n"); | |
559 | } | |
560 | } | |
561 | ||
1da177e4 LT |
562 | static struct kcore_list kcore_mem, kcore_vmalloc; |
563 | ||
564 | void __init mem_init(void) | |
565 | { | |
566 | extern int ppro_with_ram_bug(void); | |
567 | int codesize, reservedpages, datasize, initsize; | |
568 | int tmp; | |
569 | int bad_ppro; | |
570 | ||
05b79bdc | 571 | #ifdef CONFIG_FLATMEM |
8d8f3cbe | 572 | BUG_ON(!mem_map); |
1da177e4 LT |
573 | #endif |
574 | ||
575 | bad_ppro = ppro_with_ram_bug(); | |
576 | ||
577 | #ifdef CONFIG_HIGHMEM | |
578 | /* check that fixmap and pkmap do not overlap */ | |
579 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | |
580 | printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); | |
581 | printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", | |
582 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); | |
583 | BUG(); | |
584 | } | |
585 | #endif | |
586 | ||
1da177e4 LT |
587 | /* this will put all low memory onto the freelists */ |
588 | totalram_pages += free_all_bootmem(); | |
589 | ||
590 | reservedpages = 0; | |
591 | for (tmp = 0; tmp < max_low_pfn; tmp++) | |
592 | /* | |
593 | * Only count reserved RAM pages | |
594 | */ | |
595 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | |
596 | reservedpages++; | |
597 | ||
598 | set_highmem_pages_init(bad_ppro); | |
599 | ||
600 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
601 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
602 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
603 | ||
604 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | |
605 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
606 | VMALLOC_END-VMALLOC_START); | |
607 | ||
608 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | |
609 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | |
610 | num_physpages << (PAGE_SHIFT-10), | |
611 | codesize >> 10, | |
612 | reservedpages << (PAGE_SHIFT-10), | |
613 | datasize >> 10, | |
614 | initsize >> 10, | |
615 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | |
616 | ); | |
617 | ||
052e7994 JF |
618 | #if 1 /* double-sanity-check paranoia */ |
619 | printk("virtual kernel memory layout:\n" | |
620 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
621 | #ifdef CONFIG_HIGHMEM | |
622 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
623 | #endif | |
624 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
625 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
626 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
627 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
628 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
629 | FIXADDR_START, FIXADDR_TOP, | |
630 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
631 | ||
632 | #ifdef CONFIG_HIGHMEM | |
633 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | |
634 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
635 | #endif | |
636 | ||
637 | VMALLOC_START, VMALLOC_END, | |
638 | (VMALLOC_END - VMALLOC_START) >> 20, | |
639 | ||
640 | (unsigned long)__va(0), (unsigned long)high_memory, | |
641 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
642 | ||
643 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | |
644 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, | |
645 | ||
646 | (unsigned long)&_etext, (unsigned long)&_edata, | |
647 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
648 | ||
649 | (unsigned long)&_text, (unsigned long)&_etext, | |
650 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
651 | ||
652 | #ifdef CONFIG_HIGHMEM | |
653 | BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | |
654 | BUG_ON(VMALLOC_END > PKMAP_BASE); | |
655 | #endif | |
656 | BUG_ON(VMALLOC_START > VMALLOC_END); | |
657 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | |
658 | #endif /* double-sanity-check paranoia */ | |
659 | ||
1da177e4 LT |
660 | #ifdef CONFIG_X86_PAE |
661 | if (!cpu_has_pae) | |
662 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | |
663 | #endif | |
664 | if (boot_cpu_data.wp_works_ok < 0) | |
665 | test_wp_bit(); | |
666 | ||
667 | /* | |
668 | * Subtle. SMP is doing it's boot stuff late (because it has to | |
669 | * fork idle threads) - but it also needs low mappings for the | |
670 | * protected-mode entry to work. We zap these entries only after | |
671 | * the WP-bit has been tested. | |
672 | */ | |
673 | #ifndef CONFIG_SMP | |
674 | zap_low_mappings(); | |
675 | #endif | |
676 | } | |
677 | ||
05039b92 DH |
678 | /* |
679 | * this is for the non-NUMA, single node SMP system case. | |
680 | * Specifically, in the case of x86, we will always add | |
681 | * memory to the highmem for now. | |
682 | */ | |
ad8f5797 | 683 | #ifdef CONFIG_MEMORY_HOTPLUG |
05039b92 | 684 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
bc02af93 | 685 | int arch_add_memory(int nid, u64 start, u64 size) |
05039b92 DH |
686 | { |
687 | struct pglist_data *pgdata = &contig_page_data; | |
776ed98b | 688 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
05039b92 DH |
689 | unsigned long start_pfn = start >> PAGE_SHIFT; |
690 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
691 | ||
692 | return __add_pages(zone, start_pfn, nr_pages); | |
693 | } | |
694 | ||
695 | int remove_memory(u64 start, u64 size) | |
696 | { | |
697 | return -EINVAL; | |
698 | } | |
699 | #endif | |
9d99aaa3 | 700 | #endif |
05039b92 | 701 | |
e18b890b CL |
702 | struct kmem_cache *pgd_cache; |
703 | struct kmem_cache *pmd_cache; | |
1da177e4 LT |
704 | |
705 | void __init pgtable_cache_init(void) | |
706 | { | |
707 | if (PTRS_PER_PMD > 1) { | |
708 | pmd_cache = kmem_cache_create("pmd", | |
709 | PTRS_PER_PMD*sizeof(pmd_t), | |
710 | PTRS_PER_PMD*sizeof(pmd_t), | |
711 | 0, | |
712 | pmd_ctor, | |
713 | NULL); | |
714 | if (!pmd_cache) | |
715 | panic("pgtable_cache_init(): cannot create pmd cache"); | |
716 | } | |
717 | pgd_cache = kmem_cache_create("pgd", | |
718 | PTRS_PER_PGD*sizeof(pgd_t), | |
719 | PTRS_PER_PGD*sizeof(pgd_t), | |
720 | 0, | |
721 | pgd_ctor, | |
722 | PTRS_PER_PMD == 1 ? pgd_dtor : NULL); | |
723 | if (!pgd_cache) | |
724 | panic("pgtable_cache_init(): Cannot create pgd cache"); | |
725 | } | |
726 | ||
727 | /* | |
728 | * This function cannot be __init, since exceptions don't work in that | |
729 | * section. Put this after the callers, so that it cannot be inlined. | |
730 | */ | |
731 | static int noinline do_test_wp_bit(void) | |
732 | { | |
733 | char tmp_reg; | |
734 | int flag; | |
735 | ||
736 | __asm__ __volatile__( | |
737 | " movb %0,%1 \n" | |
738 | "1: movb %1,%0 \n" | |
739 | " xorl %2,%2 \n" | |
740 | "2: \n" | |
741 | ".section __ex_table,\"a\"\n" | |
742 | " .align 4 \n" | |
743 | " .long 1b,2b \n" | |
744 | ".previous \n" | |
745 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), | |
746 | "=q" (tmp_reg), | |
747 | "=r" (flag) | |
748 | :"2" (1) | |
749 | :"memory"); | |
750 | ||
751 | return flag; | |
752 | } | |
753 | ||
63aaf308 AV |
754 | #ifdef CONFIG_DEBUG_RODATA |
755 | ||
63aaf308 AV |
756 | void mark_rodata_ro(void) |
757 | { | |
a581c2a4 | 758 | unsigned long addr = (unsigned long)__start_rodata; |
63aaf308 | 759 | |
a581c2a4 | 760 | for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE) |
63aaf308 AV |
761 | change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO); |
762 | ||
a581c2a4 HC |
763 | printk("Write protecting the kernel read-only data: %uk\n", |
764 | (__end_rodata - __start_rodata) >> 10); | |
63aaf308 AV |
765 | |
766 | /* | |
767 | * change_page_attr() requires a global_flush_tlb() call after it. | |
768 | * We do this after the printk so that if something went wrong in the | |
769 | * change, the printk gets out at least to give a better debug hint | |
770 | * of who is the culprit. | |
771 | */ | |
772 | global_flush_tlb(); | |
773 | } | |
774 | #endif | |
775 | ||
9a0b5817 GH |
776 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
777 | { | |
778 | unsigned long addr; | |
779 | ||
780 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | |
781 | ClearPageReserved(virt_to_page(addr)); | |
782 | init_page_count(virt_to_page(addr)); | |
c9cf5528 | 783 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
9a0b5817 GH |
784 | free_page(addr); |
785 | totalram_pages++; | |
786 | } | |
787 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | |
788 | } | |
789 | ||
790 | void free_initmem(void) | |
791 | { | |
792 | free_init_pages("unused kernel memory", | |
793 | (unsigned long)(&__init_begin), | |
794 | (unsigned long)(&__init_end)); | |
795 | } | |
63aaf308 | 796 | |
1da177e4 LT |
797 | #ifdef CONFIG_BLK_DEV_INITRD |
798 | void free_initrd_mem(unsigned long start, unsigned long end) | |
799 | { | |
9a0b5817 | 800 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
801 | } |
802 | #endif | |
9a0b5817 | 803 |