x86, cpa: make the kernel physical mapping initialization a two pass sequence
[deliverable/linux.git] / arch / x86 / mm / init_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 *
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
7
1da177e4
LT
8#include <linux/module.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/hugetlb.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/highmem.h>
23#include <linux/pagemap.h>
6fb14755 24#include <linux/pfn.h>
c9cf5528 25#include <linux/poison.h>
1da177e4
LT
26#include <linux/bootmem.h>
27#include <linux/slab.h>
28#include <linux/proc_fs.h>
05039b92 29#include <linux/memory_hotplug.h>
27d99f7e 30#include <linux/initrd.h>
55b2355e 31#include <linux/cpumask.h>
1da177e4 32
f832ff18 33#include <asm/asm.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <asm/system.h>
36#include <asm/uaccess.h>
37#include <asm/pgtable.h>
38#include <asm/dma.h>
39#include <asm/fixmap.h>
40#include <asm/e820.h>
41#include <asm/apic.h>
8550eb99 42#include <asm/bugs.h>
1da177e4
LT
43#include <asm/tlb.h>
44#include <asm/tlbflush.h>
a5a19c63 45#include <asm/pgalloc.h>
1da177e4 46#include <asm/sections.h>
b239fb25 47#include <asm/paravirt.h>
551889a6 48#include <asm/setup.h>
7bfeab9a 49#include <asm/cacheflush.h>
1da177e4
LT
50
51unsigned int __VMALLOC_RESERVE = 128 << 20;
52
f361a450 53unsigned long max_low_pfn_mapped;
67794292 54unsigned long max_pfn_mapped;
7d1116a9 55
1da177e4
LT
56DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57unsigned long highstart_pfn, highend_pfn;
58
8550eb99 59static noinline int do_test_wp_bit(void);
1da177e4 60
4e29684c
YL
61
62static unsigned long __initdata table_start;
63static unsigned long __meminitdata table_end;
64static unsigned long __meminitdata table_top;
65
66static int __initdata after_init_bootmem;
67
68static __init void *alloc_low_page(unsigned long *phys)
69{
70 unsigned long pfn = table_end++;
71 void *adr;
72
73 if (pfn >= table_top)
74 panic("alloc_low_page: ran out of memory");
75
76 adr = __va(pfn * PAGE_SIZE);
77 memset(adr, 0, PAGE_SIZE);
78 *phys = pfn * PAGE_SIZE;
79 return adr;
80}
81
1da177e4
LT
82/*
83 * Creates a middle page table and puts a pointer to it in the
84 * given global directory entry. This only returns the gd entry
85 * in non-PAE compilation mode, since the middle layer is folded.
86 */
87static pmd_t * __init one_md_table_init(pgd_t *pgd)
88{
89 pud_t *pud;
90 pmd_t *pmd_table;
8550eb99 91
1da177e4 92#ifdef CONFIG_X86_PAE
4e29684c 93 unsigned long phys;
b239fb25 94 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
4e29684c
YL
95 if (after_init_bootmem)
96 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
97 else
98 pmd_table = (pmd_t *)alloc_low_page(&phys);
6944a9c8 99 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
b239fb25
JF
100 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
101 pud = pud_offset(pgd, 0);
8550eb99 102 BUG_ON(pmd_table != pmd_offset(pud, 0));
b239fb25
JF
103 }
104#endif
1da177e4
LT
105 pud = pud_offset(pgd, 0);
106 pmd_table = pmd_offset(pud, 0);
8550eb99 107
1da177e4
LT
108 return pmd_table;
109}
110
111/*
112 * Create a page table and place a pointer to it in a middle page
8550eb99 113 * directory entry:
1da177e4
LT
114 */
115static pte_t * __init one_page_table_init(pmd_t *pmd)
116{
b239fb25 117 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
509a80c4
IM
118 pte_t *page_table = NULL;
119
4e29684c 120 if (after_init_bootmem) {
509a80c4 121#ifdef CONFIG_DEBUG_PAGEALLOC
4e29684c 122 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
509a80c4 123#endif
4e29684c
YL
124 if (!page_table)
125 page_table =
509a80c4 126 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
4e29684c
YL
127 } else {
128 unsigned long phys;
129 page_table = (pte_t *)alloc_low_page(&phys);
8550eb99 130 }
b239fb25 131
6944a9c8 132 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
1da177e4 133 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
b239fb25 134 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
1da177e4 135 }
509a80c4 136
1da177e4
LT
137 return pte_offset_kernel(pmd, 0);
138}
139
140/*
8550eb99 141 * This function initializes a certain range of kernel virtual memory
1da177e4
LT
142 * with new bootmem page tables, everywhere page tables are missing in
143 * the given range.
8550eb99
IM
144 *
145 * NOTE: The pagetables are allocated contiguous on the physical space
146 * so we can cache the place of the first one and move around without
1da177e4
LT
147 * checking the pgd every time.
148 */
8550eb99
IM
149static void __init
150page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
1da177e4 151{
1da177e4
LT
152 int pgd_idx, pmd_idx;
153 unsigned long vaddr;
8550eb99
IM
154 pgd_t *pgd;
155 pmd_t *pmd;
1da177e4
LT
156
157 vaddr = start;
158 pgd_idx = pgd_index(vaddr);
159 pmd_idx = pmd_index(vaddr);
160 pgd = pgd_base + pgd_idx;
161
162 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
b239fb25
JF
163 pmd = one_md_table_init(pgd);
164 pmd = pmd + pmd_index(vaddr);
8550eb99
IM
165 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
166 pmd++, pmd_idx++) {
b239fb25 167 one_page_table_init(pmd);
1da177e4
LT
168
169 vaddr += PMD_SIZE;
170 }
171 pmd_idx = 0;
172 }
173}
174
175static inline int is_kernel_text(unsigned long addr)
176{
177 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
178 return 1;
179 return 0;
180}
181
182/*
8550eb99
IM
183 * This maps the physical memory to kernel virtual address space, a total
184 * of max_low_pfn pages, by creating page tables starting from address
185 * PAGE_OFFSET:
1da177e4 186 */
4e29684c 187static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
a04ad82d
YL
188 unsigned long start_pfn,
189 unsigned long end_pfn,
190 int use_pse)
1da177e4 191{
8550eb99 192 int pgd_idx, pmd_idx, pte_ofs;
1da177e4
LT
193 unsigned long pfn;
194 pgd_t *pgd;
195 pmd_t *pmd;
196 pte_t *pte;
a2699e47
SS
197 unsigned pages_2m, pages_4k;
198 int mapping_iter;
199
200 /*
201 * First iteration will setup identity mapping using large/small pages
202 * based on use_pse, with other attributes same as set by
203 * the early code in head_32.S
204 *
205 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
206 * as desired for the kernel identity mapping.
207 *
208 * This two pass mechanism conforms to the TLB app note which says:
209 *
210 * "Software should not write to a paging-structure entry in a way
211 * that would change, for any linear address, both the page size
212 * and either the page frame or attributes."
213 */
214 mapping_iter = 1;
1da177e4 215
a04ad82d
YL
216 if (!cpu_has_pse)
217 use_pse = 0;
1da177e4 218
a2699e47
SS
219repeat:
220 pages_2m = pages_4k = 0;
a04ad82d
YL
221 pfn = start_pfn;
222 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
223 pgd = pgd_base + pgd_idx;
1da177e4
LT
224 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
225 pmd = one_md_table_init(pgd);
8550eb99 226
a04ad82d
YL
227 if (pfn >= end_pfn)
228 continue;
229#ifdef CONFIG_X86_PAE
230 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
231 pmd += pmd_idx;
232#else
233 pmd_idx = 0;
234#endif
235 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
f3f20de8 236 pmd++, pmd_idx++) {
8550eb99 237 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
1da177e4 238
8550eb99
IM
239 /*
240 * Map with big pages if possible, otherwise
241 * create normal page tables:
242 */
a04ad82d 243 if (use_pse) {
8550eb99 244 unsigned int addr2;
f3f20de8 245 pgprot_t prot = PAGE_KERNEL_LARGE;
a2699e47
SS
246 /*
247 * first pass will use the same initial
248 * identity mapping attribute + _PAGE_PSE.
249 */
250 pgprot_t init_prot =
251 __pgprot(PTE_IDENT_ATTR |
252 _PAGE_PSE);
f3f20de8 253
8550eb99 254 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
f3f20de8
JF
255 PAGE_OFFSET + PAGE_SIZE-1;
256
8550eb99
IM
257 if (is_kernel_text(addr) ||
258 is_kernel_text(addr2))
f3f20de8
JF
259 prot = PAGE_KERNEL_LARGE_EXEC;
260
ce0c0e50 261 pages_2m++;
a2699e47
SS
262 if (mapping_iter == 1)
263 set_pmd(pmd, pfn_pmd(pfn, init_prot));
264 else
265 set_pmd(pmd, pfn_pmd(pfn, prot));
b239fb25 266
1da177e4 267 pfn += PTRS_PER_PTE;
8550eb99
IM
268 continue;
269 }
270 pte = one_page_table_init(pmd);
1da177e4 271
a04ad82d
YL
272 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
273 pte += pte_ofs;
274 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
8550eb99
IM
275 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
276 pgprot_t prot = PAGE_KERNEL;
a2699e47
SS
277 /*
278 * first pass will use the same initial
279 * identity mapping attribute.
280 */
281 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
f3f20de8 282
8550eb99
IM
283 if (is_kernel_text(addr))
284 prot = PAGE_KERNEL_EXEC;
f3f20de8 285
ce0c0e50 286 pages_4k++;
a2699e47
SS
287 if (mapping_iter == 1)
288 set_pte(pte, pfn_pte(pfn, init_prot));
289 else
290 set_pte(pte, pfn_pte(pfn, prot));
1da177e4
LT
291 }
292 }
293 }
a2699e47
SS
294 if (mapping_iter == 1) {
295 /*
296 * update direct mapping page count only in the first
297 * iteration.
298 */
299 update_page_count(PG_LEVEL_2M, pages_2m);
300 update_page_count(PG_LEVEL_4K, pages_4k);
301
302 /*
303 * local global flush tlb, which will flush the previous
304 * mappings present in both small and large page TLB's.
305 */
306 __flush_tlb_all();
307
308 /*
309 * Second iteration will set the actual desired PTE attributes.
310 */
311 mapping_iter = 2;
312 goto repeat;
313 }
1da177e4
LT
314}
315
ae531c26
AV
316/*
317 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
318 * is valid. The argument is a physical page number.
319 *
320 *
321 * On x86, access has to be given to the first megabyte of ram because that area
322 * contains bios code and data regions used by X and dosemu and similar apps.
323 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
324 * mmio resources as well as potential bios/acpi data regions.
325 */
326int devmem_is_allowed(unsigned long pagenr)
327{
328 if (pagenr <= 256)
329 return 1;
330 if (!page_is_ram(pagenr))
331 return 1;
332 return 0;
333}
334
1da177e4
LT
335#ifdef CONFIG_HIGHMEM
336pte_t *kmap_pte;
337pgprot_t kmap_prot;
338
8550eb99
IM
339static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
340{
341 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
342 vaddr), vaddr), vaddr);
343}
1da177e4
LT
344
345static void __init kmap_init(void)
346{
347 unsigned long kmap_vstart;
348
8550eb99
IM
349 /*
350 * Cache the first kmap pte:
351 */
1da177e4
LT
352 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
353 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
354
355 kmap_prot = PAGE_KERNEL;
356}
357
358static void __init permanent_kmaps_init(pgd_t *pgd_base)
359{
8550eb99 360 unsigned long vaddr;
1da177e4
LT
361 pgd_t *pgd;
362 pud_t *pud;
363 pmd_t *pmd;
364 pte_t *pte;
1da177e4
LT
365
366 vaddr = PKMAP_BASE;
367 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
368
369 pgd = swapper_pg_dir + pgd_index(vaddr);
370 pud = pud_offset(pgd, vaddr);
371 pmd = pmd_offset(pud, vaddr);
372 pte = pte_offset_kernel(pmd, vaddr);
8550eb99 373 pkmap_page_table = pte;
1da177e4
LT
374}
375
cc9f7a0c 376static void __init add_one_highpage_init(struct page *page, int pfn)
1da177e4 377{
cc9f7a0c
YL
378 ClearPageReserved(page);
379 init_page_count(page);
380 __free_page(page);
381 totalhigh_pages++;
1da177e4
LT
382}
383
b5bc6c0e
YL
384struct add_highpages_data {
385 unsigned long start_pfn;
386 unsigned long end_pfn;
b5bc6c0e
YL
387};
388
d52d53b8 389static int __init add_highpages_work_fn(unsigned long start_pfn,
b5bc6c0e 390 unsigned long end_pfn, void *datax)
1da177e4 391{
b5bc6c0e
YL
392 int node_pfn;
393 struct page *page;
394 unsigned long final_start_pfn, final_end_pfn;
395 struct add_highpages_data *data;
8550eb99 396
b5bc6c0e 397 data = (struct add_highpages_data *)datax;
b5bc6c0e
YL
398
399 final_start_pfn = max(start_pfn, data->start_pfn);
400 final_end_pfn = min(end_pfn, data->end_pfn);
401 if (final_start_pfn >= final_end_pfn)
d52d53b8 402 return 0;
b5bc6c0e
YL
403
404 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
405 node_pfn++) {
406 if (!pfn_valid(node_pfn))
407 continue;
408 page = pfn_to_page(node_pfn);
cc9f7a0c 409 add_one_highpage_init(page, node_pfn);
23be8c7d 410 }
b5bc6c0e 411
d52d53b8
YL
412 return 0;
413
b5bc6c0e
YL
414}
415
416void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
cc9f7a0c 417 unsigned long end_pfn)
b5bc6c0e
YL
418{
419 struct add_highpages_data data;
420
421 data.start_pfn = start_pfn;
422 data.end_pfn = end_pfn;
b5bc6c0e
YL
423
424 work_with_active_regions(nid, add_highpages_work_fn, &data);
425}
426
8550eb99 427#ifndef CONFIG_NUMA
cc9f7a0c 428static void __init set_highmem_pages_init(void)
1da177e4 429{
cc9f7a0c 430 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
b5bc6c0e 431
1da177e4
LT
432 totalram_pages += totalhigh_pages;
433}
8550eb99 434#endif /* !CONFIG_NUMA */
1da177e4
LT
435
436#else
8550eb99
IM
437# define kmap_init() do { } while (0)
438# define permanent_kmaps_init(pgd_base) do { } while (0)
cc9f7a0c 439# define set_highmem_pages_init() do { } while (0)
1da177e4
LT
440#endif /* CONFIG_HIGHMEM */
441
b239fb25 442void __init native_pagetable_setup_start(pgd_t *base)
1da177e4 443{
551889a6
IC
444 unsigned long pfn, va;
445 pgd_t *pgd;
446 pud_t *pud;
447 pmd_t *pmd;
448 pte_t *pte;
b239fb25
JF
449
450 /*
551889a6
IC
451 * Remove any mappings which extend past the end of physical
452 * memory from the boot time page table:
b239fb25 453 */
551889a6
IC
454 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
455 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
456 pgd = base + pgd_index(va);
457 if (!pgd_present(*pgd))
458 break;
459
460 pud = pud_offset(pgd, va);
461 pmd = pmd_offset(pud, va);
462 if (!pmd_present(*pmd))
463 break;
464
465 pte = pte_offset_kernel(pmd, va);
466 if (!pte_present(*pte))
467 break;
468
469 pte_clear(NULL, va, pte);
470 }
6944a9c8 471 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
b239fb25
JF
472}
473
474void __init native_pagetable_setup_done(pgd_t *base)
475{
b239fb25
JF
476}
477
478/*
479 * Build a proper pagetable for the kernel mappings. Up until this
480 * point, we've been running on some set of pagetables constructed by
481 * the boot process.
482 *
483 * If we're booting on native hardware, this will be a pagetable
551889a6
IC
484 * constructed in arch/x86/kernel/head_32.S. The root of the
485 * pagetable will be swapper_pg_dir.
b239fb25
JF
486 *
487 * If we're booting paravirtualized under a hypervisor, then there are
488 * more options: we may already be running PAE, and the pagetable may
489 * or may not be based in swapper_pg_dir. In any case,
490 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
491 * appropriately for the rest of the initialization to work.
492 *
493 * In general, pagetable_init() assumes that the pagetable may already
494 * be partially populated, and so it avoids stomping on any existing
495 * mappings.
496 */
e7b37895 497static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
b239fb25 498{
8550eb99 499 unsigned long vaddr, end;
b239fb25 500
1da177e4
LT
501 /*
502 * Fixed mappings, only the page table structure has to be
503 * created - mappings will be set by set_fixmap():
504 */
beacfaac 505 early_ioremap_clear();
1da177e4 506 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
b239fb25
JF
507 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
508 page_table_range_init(vaddr, end, pgd_base);
beacfaac 509 early_ioremap_reset();
e7b37895
YL
510}
511
512static void __init pagetable_init(void)
513{
514 pgd_t *pgd_base = swapper_pg_dir;
515
516 paravirt_pagetable_setup_start(pgd_base);
1da177e4
LT
517
518 permanent_kmaps_init(pgd_base);
519
b239fb25 520 paravirt_pagetable_setup_done(pgd_base);
1da177e4
LT
521}
522
a6eb84bc 523#ifdef CONFIG_ACPI_SLEEP
1da177e4 524/*
a6eb84bc 525 * ACPI suspend needs this for resume, because things like the intel-agp
1da177e4
LT
526 * driver might have split up a kernel 4MB mapping.
527 */
a6eb84bc 528char swsusp_pg_dir[PAGE_SIZE]
8550eb99 529 __attribute__ ((aligned(PAGE_SIZE)));
1da177e4
LT
530
531static inline void save_pg_dir(void)
532{
533 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
534}
a6eb84bc 535#else /* !CONFIG_ACPI_SLEEP */
1da177e4
LT
536static inline void save_pg_dir(void)
537{
538}
a6eb84bc 539#endif /* !CONFIG_ACPI_SLEEP */
1da177e4 540
8550eb99 541void zap_low_mappings(void)
1da177e4
LT
542{
543 int i;
544
1da177e4
LT
545 /*
546 * Zap initial low-memory mappings.
547 *
548 * Note that "pgd_clear()" doesn't do it for
549 * us, because pgd_clear() is a no-op on i386.
550 */
68db065c 551 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
1da177e4
LT
552#ifdef CONFIG_X86_PAE
553 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
554#else
555 set_pgd(swapper_pg_dir+i, __pgd(0));
556#endif
8550eb99 557 }
1da177e4
LT
558 flush_tlb_all();
559}
560
8550eb99 561int nx_enabled;
d5321abe 562
ef5e94af 563pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
6fdc05d4
JF
564EXPORT_SYMBOL_GPL(__supported_pte_mask);
565
d5321abe
JB
566#ifdef CONFIG_X86_PAE
567
8550eb99 568static int disable_nx __initdata;
1da177e4
LT
569
570/*
571 * noexec = on|off
572 *
573 * Control non executable mappings.
574 *
575 * on Enable
576 * off Disable
577 */
1a3f239d 578static int __init noexec_setup(char *str)
1da177e4 579{
1a3f239d
RR
580 if (!str || !strcmp(str, "on")) {
581 if (cpu_has_nx) {
582 __supported_pte_mask |= _PAGE_NX;
583 disable_nx = 0;
584 }
8550eb99
IM
585 } else {
586 if (!strcmp(str, "off")) {
587 disable_nx = 1;
588 __supported_pte_mask &= ~_PAGE_NX;
589 } else {
590 return -EINVAL;
591 }
592 }
1a3f239d
RR
593
594 return 0;
1da177e4 595}
1a3f239d 596early_param("noexec", noexec_setup);
1da177e4 597
1da177e4
LT
598static void __init set_nx(void)
599{
600 unsigned int v[4], l, h;
601
602 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
603 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
8550eb99 604
1da177e4
LT
605 if ((v[3] & (1 << 20)) && !disable_nx) {
606 rdmsr(MSR_EFER, l, h);
607 l |= EFER_NX;
608 wrmsr(MSR_EFER, l, h);
609 nx_enabled = 1;
610 __supported_pte_mask |= _PAGE_NX;
611 }
612 }
613}
1da177e4
LT
614#endif
615
90d967e0
YL
616/* user-defined highmem size */
617static unsigned int highmem_pages = -1;
618
619/*
620 * highmem=size forces highmem to be exactly 'size' bytes.
621 * This works even on boxes that have no highmem otherwise.
622 * This also works to reduce highmem size on bigger boxes.
623 */
624static int __init parse_highmem(char *arg)
625{
626 if (!arg)
627 return -EINVAL;
628
629 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
630 return 0;
631}
632early_param("highmem", parse_highmem);
633
634/*
635 * Determine low and high memory ranges:
636 */
2ec65f8b 637void __init find_low_pfn_range(void)
90d967e0 638{
2ec65f8b
YL
639 /* it could update max_pfn */
640
346cafec 641 /* max_low_pfn is 0, we already have early_res support */
90d967e0
YL
642
643 max_low_pfn = max_pfn;
644 if (max_low_pfn > MAXMEM_PFN) {
645 if (highmem_pages == -1)
646 highmem_pages = max_pfn - MAXMEM_PFN;
647 if (highmem_pages + MAXMEM_PFN < max_pfn)
648 max_pfn = MAXMEM_PFN + highmem_pages;
649 if (highmem_pages + MAXMEM_PFN > max_pfn) {
650 printk(KERN_WARNING "only %luMB highmem pages "
651 "available, ignoring highmem size of %uMB.\n",
652 pages_to_mb(max_pfn - MAXMEM_PFN),
653 pages_to_mb(highmem_pages));
654 highmem_pages = 0;
655 }
656 max_low_pfn = MAXMEM_PFN;
657#ifndef CONFIG_HIGHMEM
658 /* Maximum memory usable is what is directly addressable */
659 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
660 MAXMEM>>20);
661 if (max_pfn > MAX_NONPAE_PFN)
662 printk(KERN_WARNING
663 "Use a HIGHMEM64G enabled kernel.\n");
664 else
665 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
666 max_pfn = MAXMEM_PFN;
667#else /* !CONFIG_HIGHMEM */
668#ifndef CONFIG_HIGHMEM64G
669 if (max_pfn > MAX_NONPAE_PFN) {
670 max_pfn = MAX_NONPAE_PFN;
671 printk(KERN_WARNING "Warning only 4GB will be used."
672 "Use a HIGHMEM64G enabled kernel.\n");
673 }
674#endif /* !CONFIG_HIGHMEM64G */
675#endif /* !CONFIG_HIGHMEM */
676 } else {
677 if (highmem_pages == -1)
678 highmem_pages = 0;
679#ifdef CONFIG_HIGHMEM
680 if (highmem_pages >= max_pfn) {
681 printk(KERN_ERR "highmem size specified (%uMB) is "
682 "bigger than pages available (%luMB)!.\n",
683 pages_to_mb(highmem_pages),
684 pages_to_mb(max_pfn));
685 highmem_pages = 0;
686 }
687 if (highmem_pages) {
688 if (max_low_pfn - highmem_pages <
689 64*1024*1024/PAGE_SIZE){
690 printk(KERN_ERR "highmem size %uMB results in "
691 "smaller than 64MB lowmem, ignoring it.\n"
692 , pages_to_mb(highmem_pages));
693 highmem_pages = 0;
694 }
695 max_low_pfn -= highmem_pages;
696 }
697#else
698 if (highmem_pages)
699 printk(KERN_ERR "ignoring highmem size on non-highmem"
700 " kernel!\n");
701#endif
702 }
90d967e0
YL
703}
704
b2ac82a0 705#ifndef CONFIG_NEED_MULTIPLE_NODES
2ec65f8b 706void __init initmem_init(unsigned long start_pfn,
b2ac82a0
YL
707 unsigned long end_pfn)
708{
b2ac82a0
YL
709#ifdef CONFIG_HIGHMEM
710 highstart_pfn = highend_pfn = max_pfn;
711 if (max_pfn > max_low_pfn)
712 highstart_pfn = max_low_pfn;
713 memory_present(0, 0, highend_pfn);
cb95a13a 714 e820_register_active_regions(0, 0, highend_pfn);
b2ac82a0
YL
715 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
716 pages_to_mb(highend_pfn - highstart_pfn));
717 num_physpages = highend_pfn;
718 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
719#else
720 memory_present(0, 0, max_low_pfn);
cb95a13a 721 e820_register_active_regions(0, 0, max_low_pfn);
b2ac82a0
YL
722 num_physpages = max_low_pfn;
723 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
724#endif
725#ifdef CONFIG_FLATMEM
726 max_mapnr = num_physpages;
727#endif
728 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
729 pages_to_mb(max_low_pfn));
730
731 setup_bootmem_allocator();
b2ac82a0 732}
cb95a13a 733#endif /* !CONFIG_NEED_MULTIPLE_NODES */
b2ac82a0 734
cb95a13a 735static void __init zone_sizes_init(void)
b2ac82a0
YL
736{
737 unsigned long max_zone_pfns[MAX_NR_ZONES];
738 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
739 max_zone_pfns[ZONE_DMA] =
740 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
741 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
b2ac82a0
YL
742#ifdef CONFIG_HIGHMEM
743 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
b2ac82a0
YL
744#endif
745
746 free_area_init_nodes(max_zone_pfns);
747}
b2ac82a0 748
b2ac82a0
YL
749void __init setup_bootmem_allocator(void)
750{
751 int i;
752 unsigned long bootmap_size, bootmap;
753 /*
754 * Initialize the boot-time allocator (with low memory only):
755 */
756 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
757 bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
758 max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
759 PAGE_SIZE);
760 if (bootmap == -1L)
761 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
762 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
225c37d7 763
346cafec
YL
764 /* don't touch min_low_pfn */
765 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
766 min_low_pfn, max_low_pfn);
b2ac82a0
YL
767 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
768 max_pfn_mapped<<PAGE_SHIFT);
769 printk(KERN_INFO " low ram: %08lx - %08lx\n",
770 min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
771 printk(KERN_INFO " bootmap %08lx - %08lx\n",
772 bootmap, bootmap + bootmap_size);
773 for_each_online_node(i)
774 free_bootmem_with_active_regions(i, max_low_pfn);
775 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
776
4e29684c 777 after_init_bootmem = 1;
b2ac82a0
YL
778}
779
4e29684c
YL
780static void __init find_early_table_space(unsigned long end)
781{
7482b0e9 782 unsigned long puds, pmds, ptes, tables, start;
4e29684c
YL
783
784 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
785 tables = PAGE_ALIGN(puds * sizeof(pud_t));
786
787 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
788 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
789
7482b0e9
YL
790 if (cpu_has_pse) {
791 unsigned long extra;
a04ad82d
YL
792
793 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
794 extra += PMD_SIZE;
7482b0e9
YL
795 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
796 } else
797 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
798
799 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
8207c257 800
a04ad82d
YL
801 /* for fixmap */
802 tables += PAGE_SIZE * 2;
803
4e29684c
YL
804 /*
805 * RED-PEN putting page tables only on node 0 could
806 * cause a hotspot and fill up ZONE_DMA. The page tables
807 * need roughly 0.5KB per GB.
808 */
809 start = 0x7000;
810 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
811 tables, PAGE_SIZE);
812 if (table_start == -1UL)
813 panic("Cannot find space for the kernel page tables");
814
815 table_start >>= PAGE_SHIFT;
816 table_end = table_start;
817 table_top = table_start + (tables>>PAGE_SHIFT);
818
819 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
820 end, table_start << PAGE_SHIFT,
821 (table_start << PAGE_SHIFT) + tables);
822}
823
824unsigned long __init_refok init_memory_mapping(unsigned long start,
825 unsigned long end)
826{
827 pgd_t *pgd_base = swapper_pg_dir;
a04ad82d
YL
828 unsigned long start_pfn, end_pfn;
829 unsigned long big_page_start;
4e29684c
YL
830
831 /*
832 * Find space for the kernel direct mapping tables.
833 */
834 if (!after_init_bootmem)
835 find_early_table_space(end);
836
837#ifdef CONFIG_X86_PAE
838 set_nx();
839 if (nx_enabled)
840 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
841#endif
842
843 /* Enable PSE if available */
844 if (cpu_has_pse)
845 set_in_cr4(X86_CR4_PSE);
846
847 /* Enable PGE if available */
848 if (cpu_has_pge) {
849 set_in_cr4(X86_CR4_PGE);
ef5e94af 850 __supported_pte_mask |= _PAGE_GLOBAL;
4e29684c
YL
851 }
852
a04ad82d
YL
853 /*
854 * Don't use a large page for the first 2/4MB of memory
855 * because there are often fixed size MTRRs in there
856 * and overlapping MTRRs into large pages can cause
857 * slowdowns.
858 */
859 big_page_start = PMD_SIZE;
860
861 if (start < big_page_start) {
862 start_pfn = start >> PAGE_SHIFT;
863 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
864 } else {
865 /* head is not big page alignment ? */
866 start_pfn = start >> PAGE_SHIFT;
867 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
868 << (PMD_SHIFT - PAGE_SHIFT);
869 }
870 if (start_pfn < end_pfn)
871 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
872
873 /* big page range */
874 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
875 << (PMD_SHIFT - PAGE_SHIFT);
876 if (start_pfn < (big_page_start >> PAGE_SHIFT))
877 start_pfn = big_page_start >> PAGE_SHIFT;
878 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
879 if (start_pfn < end_pfn)
880 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
881 cpu_has_pse);
882
883 /* tail is not big page alignment ? */
884 start_pfn = end_pfn;
885 if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
886 end_pfn = end >> PAGE_SHIFT;
887 if (start_pfn < end_pfn)
888 kernel_physical_mapping_init(pgd_base, start_pfn,
889 end_pfn, 0);
890 }
4e29684c 891
e7b37895
YL
892 early_ioremap_page_table_range_init(pgd_base);
893
4e29684c
YL
894 load_cr3(swapper_pg_dir);
895
896 __flush_tlb_all();
897
898 if (!after_init_bootmem)
899 reserve_early(table_start << PAGE_SHIFT,
900 table_end << PAGE_SHIFT, "PGTABLE");
901
caadbdce
YL
902 if (!after_init_bootmem)
903 early_memtest(start, end);
904
4e29684c
YL
905 return end >> PAGE_SHIFT;
906}
907
e7b37895 908
1da177e4
LT
909/*
910 * paging_init() sets up the page tables - note that the first 8MB are
911 * already mapped by head.S.
912 *
913 * This routines also unmaps the page at virtual kernel address 0, so
914 * that we can trap those pesky NULL-reference errors in the kernel.
915 */
916void __init paging_init(void)
917{
1da177e4
LT
918 pagetable_init();
919
1da177e4
LT
920 __flush_tlb_all();
921
922 kmap_init();
11cd0bc1
YL
923
924 /*
925 * NOTE: at this point the bootmem allocator is fully available.
926 */
11cd0bc1
YL
927 sparse_init();
928 zone_sizes_init();
1da177e4
LT
929}
930
931/*
932 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
f7f17a67
DV
933 * and also on some strange 486's. All 586+'s are OK. This used to involve
934 * black magic jumps to work around some nasty CPU bugs, but fortunately the
935 * switch to using exceptions got rid of all that.
1da177e4 936 */
1da177e4
LT
937static void __init test_wp_bit(void)
938{
d7d119d7
IM
939 printk(KERN_INFO
940 "Checking if this processor honours the WP bit even in supervisor mode...");
1da177e4
LT
941
942 /* Any page-aligned address will do, the test is non-destructive */
943 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
944 boot_cpu_data.wp_works_ok = do_test_wp_bit();
945 clear_fixmap(FIX_WP_TEST);
946
947 if (!boot_cpu_data.wp_works_ok) {
d7d119d7 948 printk(KERN_CONT "No.\n");
1da177e4 949#ifdef CONFIG_X86_WP_WORKS_OK
d7d119d7
IM
950 panic(
951 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
1da177e4
LT
952#endif
953 } else {
d7d119d7 954 printk(KERN_CONT "Ok.\n");
1da177e4
LT
955 }
956}
957
8550eb99 958static struct kcore_list kcore_mem, kcore_vmalloc;
1da177e4
LT
959
960void __init mem_init(void)
961{
1da177e4 962 int codesize, reservedpages, datasize, initsize;
cc9f7a0c 963 int tmp;
1da177e4 964
05b79bdc 965#ifdef CONFIG_FLATMEM
8d8f3cbe 966 BUG_ON(!mem_map);
1da177e4 967#endif
1da177e4
LT
968 /* this will put all low memory onto the freelists */
969 totalram_pages += free_all_bootmem();
970
971 reservedpages = 0;
972 for (tmp = 0; tmp < max_low_pfn; tmp++)
973 /*
8550eb99 974 * Only count reserved RAM pages:
1da177e4
LT
975 */
976 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
977 reservedpages++;
978
cc9f7a0c 979 set_highmem_pages_init();
1da177e4
LT
980
981 codesize = (unsigned long) &_etext - (unsigned long) &_text;
982 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
983 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
984
8550eb99
IM
985 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
986 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
1da177e4
LT
987 VMALLOC_END-VMALLOC_START);
988
8550eb99
IM
989 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
990 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
1da177e4
LT
991 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
992 num_physpages << (PAGE_SHIFT-10),
993 codesize >> 10,
994 reservedpages << (PAGE_SHIFT-10),
995 datasize >> 10,
996 initsize >> 10,
997 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
998 );
999
d7d119d7 1000 printk(KERN_INFO "virtual kernel memory layout:\n"
8550eb99 1001 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
052e7994 1002#ifdef CONFIG_HIGHMEM
8550eb99 1003 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
052e7994 1004#endif
8550eb99
IM
1005 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
1006 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
1007 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
1008 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
1009 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
1010 FIXADDR_START, FIXADDR_TOP,
1011 (FIXADDR_TOP - FIXADDR_START) >> 10,
052e7994
JF
1012
1013#ifdef CONFIG_HIGHMEM
8550eb99
IM
1014 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
1015 (LAST_PKMAP*PAGE_SIZE) >> 10,
052e7994
JF
1016#endif
1017
8550eb99
IM
1018 VMALLOC_START, VMALLOC_END,
1019 (VMALLOC_END - VMALLOC_START) >> 20,
052e7994 1020
8550eb99
IM
1021 (unsigned long)__va(0), (unsigned long)high_memory,
1022 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
052e7994 1023
8550eb99
IM
1024 (unsigned long)&__init_begin, (unsigned long)&__init_end,
1025 ((unsigned long)&__init_end -
1026 (unsigned long)&__init_begin) >> 10,
052e7994 1027
8550eb99
IM
1028 (unsigned long)&_etext, (unsigned long)&_edata,
1029 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
052e7994 1030
8550eb99
IM
1031 (unsigned long)&_text, (unsigned long)&_etext,
1032 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
052e7994
JF
1033
1034#ifdef CONFIG_HIGHMEM
8550eb99
IM
1035 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1036 BUG_ON(VMALLOC_END > PKMAP_BASE);
052e7994 1037#endif
8550eb99
IM
1038 BUG_ON(VMALLOC_START > VMALLOC_END);
1039 BUG_ON((unsigned long)high_memory > VMALLOC_START);
052e7994 1040
1da177e4
LT
1041 if (boot_cpu_data.wp_works_ok < 0)
1042 test_wp_bit();
1043
76ebd054 1044 cpa_init();
61165d7a 1045 save_pg_dir();
1da177e4 1046 zap_low_mappings();
1da177e4
LT
1047}
1048
ad8f5797 1049#ifdef CONFIG_MEMORY_HOTPLUG
bc02af93 1050int arch_add_memory(int nid, u64 start, u64 size)
05039b92 1051{
7c7e9425 1052 struct pglist_data *pgdata = NODE_DATA(nid);
776ed98b 1053 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
05039b92
DH
1054 unsigned long start_pfn = start >> PAGE_SHIFT;
1055 unsigned long nr_pages = size >> PAGE_SHIFT;
1056
1057 return __add_pages(zone, start_pfn, nr_pages);
1058}
9d99aaa3 1059#endif
05039b92 1060
1da177e4
LT
1061/*
1062 * This function cannot be __init, since exceptions don't work in that
1063 * section. Put this after the callers, so that it cannot be inlined.
1064 */
8550eb99 1065static noinline int do_test_wp_bit(void)
1da177e4
LT
1066{
1067 char tmp_reg;
1068 int flag;
1069
1070 __asm__ __volatile__(
8550eb99
IM
1071 " movb %0, %1 \n"
1072 "1: movb %1, %0 \n"
1073 " xorl %2, %2 \n"
1da177e4 1074 "2: \n"
f832ff18 1075 _ASM_EXTABLE(1b,2b)
1da177e4
LT
1076 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1077 "=q" (tmp_reg),
1078 "=r" (flag)
1079 :"2" (1)
1080 :"memory");
8550eb99 1081
1da177e4
LT
1082 return flag;
1083}
1084
63aaf308 1085#ifdef CONFIG_DEBUG_RODATA
edeed305
AV
1086const int rodata_test_data = 0xC3;
1087EXPORT_SYMBOL_GPL(rodata_test_data);
63aaf308 1088
63aaf308
AV
1089void mark_rodata_ro(void)
1090{
6fb14755
JB
1091 unsigned long start = PFN_ALIGN(_text);
1092 unsigned long size = PFN_ALIGN(_etext) - start;
63aaf308 1093
8f0f996e
SR
1094#ifndef CONFIG_DYNAMIC_FTRACE
1095 /* Dynamic tracing modifies the kernel text section */
4e4eee0e
MD
1096 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1097 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1098 size >> 10);
0c42f392
AK
1099
1100#ifdef CONFIG_CPA_DEBUG
4e4eee0e
MD
1101 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1102 start, start+size);
1103 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
0c42f392 1104
4e4eee0e
MD
1105 printk(KERN_INFO "Testing CPA: write protecting again\n");
1106 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
602033ed 1107#endif
8f0f996e
SR
1108#endif /* CONFIG_DYNAMIC_FTRACE */
1109
6fb14755
JB
1110 start += size;
1111 size = (unsigned long)__end_rodata - start;
6d238cc4 1112 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
d7d119d7
IM
1113 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1114 size >> 10);
edeed305 1115 rodata_test();
63aaf308 1116
0c42f392 1117#ifdef CONFIG_CPA_DEBUG
d7d119d7 1118 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
6d238cc4 1119 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
0c42f392 1120
d7d119d7 1121 printk(KERN_INFO "Testing CPA: write protecting again\n");
6d238cc4 1122 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
0c42f392 1123#endif
63aaf308
AV
1124}
1125#endif
1126
9a0b5817
GH
1127void free_init_pages(char *what, unsigned long begin, unsigned long end)
1128{
ee01f112
IM
1129#ifdef CONFIG_DEBUG_PAGEALLOC
1130 /*
1131 * If debugging page accesses then do not free this memory but
1132 * mark them not present - any buggy init-section access will
1133 * create a kernel page fault:
1134 */
1135 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1136 begin, PAGE_ALIGN(end));
1137 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1138#else
86f03989
IM
1139 unsigned long addr;
1140
3c1df68b
AV
1141 /*
1142 * We just marked the kernel text read only above, now that
1143 * we are going to free part of that, we need to make that
1144 * writeable first.
1145 */
1146 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1147
9a0b5817 1148 for (addr = begin; addr < end; addr += PAGE_SIZE) {
e3ebadd9
LT
1149 ClearPageReserved(virt_to_page(addr));
1150 init_page_count(virt_to_page(addr));
1151 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1152 free_page(addr);
9a0b5817
GH
1153 totalram_pages++;
1154 }
6fb14755 1155 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
ee01f112 1156#endif
9a0b5817
GH
1157}
1158
1159void free_initmem(void)
1160{
1161 free_init_pages("unused kernel memory",
e3ebadd9
LT
1162 (unsigned long)(&__init_begin),
1163 (unsigned long)(&__init_end));
9a0b5817 1164}
63aaf308 1165
1da177e4
LT
1166#ifdef CONFIG_BLK_DEV_INITRD
1167void free_initrd_mem(unsigned long start, unsigned long end)
1168{
e3ebadd9 1169 free_init_pages("initrd memory", start, end);
1da177e4
LT
1170}
1171#endif
d2dbf343
YL
1172
1173int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1174 int flags)
1175{
1176 return reserve_bootmem(phys, len, flags);
1177}
This page took 0.626803 seconds and 5 git commands to generate.