[PATCH] x86_64: add __meminit for memory hotplug
[deliverable/linux.git] / arch / i386 / mm / init.c
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32
33 #include <asm/processor.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36 #include <asm/pgtable.h>
37 #include <asm/dma.h>
38 #include <asm/fixmap.h>
39 #include <asm/e820.h>
40 #include <asm/apic.h>
41 #include <asm/tlb.h>
42 #include <asm/tlbflush.h>
43 #include <asm/sections.h>
44
45 unsigned int __VMALLOC_RESERVE = 128 << 20;
46
47 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
48 unsigned long highstart_pfn, highend_pfn;
49
50 static int noinline do_test_wp_bit(void);
51
52 /*
53 * Creates a middle page table and puts a pointer to it in the
54 * given global directory entry. This only returns the gd entry
55 * in non-PAE compilation mode, since the middle layer is folded.
56 */
57 static pmd_t * __init one_md_table_init(pgd_t *pgd)
58 {
59 pud_t *pud;
60 pmd_t *pmd_table;
61
62 #ifdef CONFIG_X86_PAE
63 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
64 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
65 pud = pud_offset(pgd, 0);
66 if (pmd_table != pmd_offset(pud, 0))
67 BUG();
68 #else
69 pud = pud_offset(pgd, 0);
70 pmd_table = pmd_offset(pud, 0);
71 #endif
72
73 return pmd_table;
74 }
75
76 /*
77 * Create a page table and place a pointer to it in a middle page
78 * directory entry.
79 */
80 static pte_t * __init one_page_table_init(pmd_t *pmd)
81 {
82 if (pmd_none(*pmd)) {
83 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
84 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
85 if (page_table != pte_offset_kernel(pmd, 0))
86 BUG();
87
88 return page_table;
89 }
90
91 return pte_offset_kernel(pmd, 0);
92 }
93
94 /*
95 * This function initializes a certain range of kernel virtual memory
96 * with new bootmem page tables, everywhere page tables are missing in
97 * the given range.
98 */
99
100 /*
101 * NOTE: The pagetables are allocated contiguous on the physical space
102 * so we can cache the place of the first one and move around without
103 * checking the pgd every time.
104 */
105 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
106 {
107 pgd_t *pgd;
108 pud_t *pud;
109 pmd_t *pmd;
110 int pgd_idx, pmd_idx;
111 unsigned long vaddr;
112
113 vaddr = start;
114 pgd_idx = pgd_index(vaddr);
115 pmd_idx = pmd_index(vaddr);
116 pgd = pgd_base + pgd_idx;
117
118 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
119 if (pgd_none(*pgd))
120 one_md_table_init(pgd);
121 pud = pud_offset(pgd, vaddr);
122 pmd = pmd_offset(pud, vaddr);
123 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
124 if (pmd_none(*pmd))
125 one_page_table_init(pmd);
126
127 vaddr += PMD_SIZE;
128 }
129 pmd_idx = 0;
130 }
131 }
132
133 static inline int is_kernel_text(unsigned long addr)
134 {
135 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
136 return 1;
137 return 0;
138 }
139
140 /*
141 * This maps the physical memory to kernel virtual address space, a total
142 * of max_low_pfn pages, by creating page tables starting from address
143 * PAGE_OFFSET.
144 */
145 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
146 {
147 unsigned long pfn;
148 pgd_t *pgd;
149 pmd_t *pmd;
150 pte_t *pte;
151 int pgd_idx, pmd_idx, pte_ofs;
152
153 pgd_idx = pgd_index(PAGE_OFFSET);
154 pgd = pgd_base + pgd_idx;
155 pfn = 0;
156
157 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
158 pmd = one_md_table_init(pgd);
159 if (pfn >= max_low_pfn)
160 continue;
161 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
162 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
163
164 /* Map with big pages if possible, otherwise create normal page tables. */
165 if (cpu_has_pse) {
166 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
167
168 if (is_kernel_text(address) || is_kernel_text(address2))
169 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
170 else
171 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
172 pfn += PTRS_PER_PTE;
173 } else {
174 pte = one_page_table_init(pmd);
175
176 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
177 if (is_kernel_text(address))
178 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
179 else
180 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
181 }
182 }
183 }
184 }
185 }
186
187 static inline int page_kills_ppro(unsigned long pagenr)
188 {
189 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
190 return 1;
191 return 0;
192 }
193
194 extern int is_available_memory(efi_memory_desc_t *);
195
196 int page_is_ram(unsigned long pagenr)
197 {
198 int i;
199 unsigned long addr, end;
200
201 if (efi_enabled) {
202 efi_memory_desc_t *md;
203 void *p;
204
205 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
206 md = p;
207 if (!is_available_memory(md))
208 continue;
209 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
210 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
211
212 if ((pagenr >= addr) && (pagenr < end))
213 return 1;
214 }
215 return 0;
216 }
217
218 for (i = 0; i < e820.nr_map; i++) {
219
220 if (e820.map[i].type != E820_RAM) /* not usable memory */
221 continue;
222 /*
223 * !!!FIXME!!! Some BIOSen report areas as RAM that
224 * are not. Notably the 640->1Mb area. We need a sanity
225 * check here.
226 */
227 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
228 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
229 if ((pagenr >= addr) && (pagenr < end))
230 return 1;
231 }
232 return 0;
233 }
234
235 #ifdef CONFIG_HIGHMEM
236 pte_t *kmap_pte;
237 pgprot_t kmap_prot;
238
239 #define kmap_get_fixmap_pte(vaddr) \
240 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
241
242 static void __init kmap_init(void)
243 {
244 unsigned long kmap_vstart;
245
246 /* cache the first kmap pte */
247 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
248 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
249
250 kmap_prot = PAGE_KERNEL;
251 }
252
253 static void __init permanent_kmaps_init(pgd_t *pgd_base)
254 {
255 pgd_t *pgd;
256 pud_t *pud;
257 pmd_t *pmd;
258 pte_t *pte;
259 unsigned long vaddr;
260
261 vaddr = PKMAP_BASE;
262 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
263
264 pgd = swapper_pg_dir + pgd_index(vaddr);
265 pud = pud_offset(pgd, vaddr);
266 pmd = pmd_offset(pud, vaddr);
267 pte = pte_offset_kernel(pmd, vaddr);
268 pkmap_page_table = pte;
269 }
270
271 static void __meminit free_new_highpage(struct page *page)
272 {
273 set_page_count(page, 1);
274 __free_page(page);
275 totalhigh_pages++;
276 }
277
278 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
279 {
280 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
281 ClearPageReserved(page);
282 free_new_highpage(page);
283 } else
284 SetPageReserved(page);
285 }
286
287 static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
288 {
289 free_new_highpage(page);
290 totalram_pages++;
291 #ifdef CONFIG_FLATMEM
292 max_mapnr = max(pfn, max_mapnr);
293 #endif
294 num_physpages++;
295 return 0;
296 }
297
298 /*
299 * Not currently handling the NUMA case.
300 * Assuming single node and all memory that
301 * has been added dynamically that would be
302 * onlined here is in HIGHMEM
303 */
304 void online_page(struct page *page)
305 {
306 ClearPageReserved(page);
307 add_one_highpage_hotplug(page, page_to_pfn(page));
308 }
309
310
311 #ifdef CONFIG_NUMA
312 extern void set_highmem_pages_init(int);
313 #else
314 static void __init set_highmem_pages_init(int bad_ppro)
315 {
316 int pfn;
317 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
318 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
319 totalram_pages += totalhigh_pages;
320 }
321 #endif /* CONFIG_FLATMEM */
322
323 #else
324 #define kmap_init() do { } while (0)
325 #define permanent_kmaps_init(pgd_base) do { } while (0)
326 #define set_highmem_pages_init(bad_ppro) do { } while (0)
327 #endif /* CONFIG_HIGHMEM */
328
329 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
330 EXPORT_SYMBOL(__PAGE_KERNEL);
331 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
332
333 #ifdef CONFIG_NUMA
334 extern void __init remap_numa_kva(void);
335 #else
336 #define remap_numa_kva() do {} while (0)
337 #endif
338
339 static void __init pagetable_init (void)
340 {
341 unsigned long vaddr;
342 pgd_t *pgd_base = swapper_pg_dir;
343
344 #ifdef CONFIG_X86_PAE
345 int i;
346 /* Init entries of the first-level page table to the zero page */
347 for (i = 0; i < PTRS_PER_PGD; i++)
348 set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
349 #endif
350
351 /* Enable PSE if available */
352 if (cpu_has_pse) {
353 set_in_cr4(X86_CR4_PSE);
354 }
355
356 /* Enable PGE if available */
357 if (cpu_has_pge) {
358 set_in_cr4(X86_CR4_PGE);
359 __PAGE_KERNEL |= _PAGE_GLOBAL;
360 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
361 }
362
363 kernel_physical_mapping_init(pgd_base);
364 remap_numa_kva();
365
366 /*
367 * Fixed mappings, only the page table structure has to be
368 * created - mappings will be set by set_fixmap():
369 */
370 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
371 page_table_range_init(vaddr, 0, pgd_base);
372
373 permanent_kmaps_init(pgd_base);
374
375 #ifdef CONFIG_X86_PAE
376 /*
377 * Add low memory identity-mappings - SMP needs it when
378 * starting up on an AP from real-mode. In the non-PAE
379 * case we already have these mappings through head.S.
380 * All user-space mappings are explicitly cleared after
381 * SMP startup.
382 */
383 set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]);
384 #endif
385 }
386
387 #ifdef CONFIG_SOFTWARE_SUSPEND
388 /*
389 * Swap suspend & friends need this for resume because things like the intel-agp
390 * driver might have split up a kernel 4MB mapping.
391 */
392 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
393 __attribute__ ((aligned (PAGE_SIZE)));
394
395 static inline void save_pg_dir(void)
396 {
397 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
398 }
399 #else
400 static inline void save_pg_dir(void)
401 {
402 }
403 #endif
404
405 void zap_low_mappings (void)
406 {
407 int i;
408
409 save_pg_dir();
410
411 /*
412 * Zap initial low-memory mappings.
413 *
414 * Note that "pgd_clear()" doesn't do it for
415 * us, because pgd_clear() is a no-op on i386.
416 */
417 for (i = 0; i < USER_PTRS_PER_PGD; i++)
418 #ifdef CONFIG_X86_PAE
419 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
420 #else
421 set_pgd(swapper_pg_dir+i, __pgd(0));
422 #endif
423 flush_tlb_all();
424 }
425
426 static int disable_nx __initdata = 0;
427 u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
428
429 /*
430 * noexec = on|off
431 *
432 * Control non executable mappings.
433 *
434 * on Enable
435 * off Disable
436 */
437 void __init noexec_setup(const char *str)
438 {
439 if (!strncmp(str, "on",2) && cpu_has_nx) {
440 __supported_pte_mask |= _PAGE_NX;
441 disable_nx = 0;
442 } else if (!strncmp(str,"off",3)) {
443 disable_nx = 1;
444 __supported_pte_mask &= ~_PAGE_NX;
445 }
446 }
447
448 int nx_enabled = 0;
449 #ifdef CONFIG_X86_PAE
450
451 static void __init set_nx(void)
452 {
453 unsigned int v[4], l, h;
454
455 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
456 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
457 if ((v[3] & (1 << 20)) && !disable_nx) {
458 rdmsr(MSR_EFER, l, h);
459 l |= EFER_NX;
460 wrmsr(MSR_EFER, l, h);
461 nx_enabled = 1;
462 __supported_pte_mask |= _PAGE_NX;
463 }
464 }
465 }
466
467 /*
468 * Enables/disables executability of a given kernel page and
469 * returns the previous setting.
470 */
471 int __init set_kernel_exec(unsigned long vaddr, int enable)
472 {
473 pte_t *pte;
474 int ret = 1;
475
476 if (!nx_enabled)
477 goto out;
478
479 pte = lookup_address(vaddr);
480 BUG_ON(!pte);
481
482 if (!pte_exec_kernel(*pte))
483 ret = 0;
484
485 if (enable)
486 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
487 else
488 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
489 __flush_tlb_all();
490 out:
491 return ret;
492 }
493
494 #endif
495
496 /*
497 * paging_init() sets up the page tables - note that the first 8MB are
498 * already mapped by head.S.
499 *
500 * This routines also unmaps the page at virtual kernel address 0, so
501 * that we can trap those pesky NULL-reference errors in the kernel.
502 */
503 void __init paging_init(void)
504 {
505 #ifdef CONFIG_X86_PAE
506 set_nx();
507 if (nx_enabled)
508 printk("NX (Execute Disable) protection: active\n");
509 #endif
510
511 pagetable_init();
512
513 load_cr3(swapper_pg_dir);
514
515 #ifdef CONFIG_X86_PAE
516 /*
517 * We will bail out later - printk doesn't work right now so
518 * the user would just see a hanging kernel.
519 */
520 if (cpu_has_pae)
521 set_in_cr4(X86_CR4_PAE);
522 #endif
523 __flush_tlb_all();
524
525 kmap_init();
526 }
527
528 /*
529 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
530 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
531 * used to involve black magic jumps to work around some nasty CPU bugs,
532 * but fortunately the switch to using exceptions got rid of all that.
533 */
534
535 static void __init test_wp_bit(void)
536 {
537 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
538
539 /* Any page-aligned address will do, the test is non-destructive */
540 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
541 boot_cpu_data.wp_works_ok = do_test_wp_bit();
542 clear_fixmap(FIX_WP_TEST);
543
544 if (!boot_cpu_data.wp_works_ok) {
545 printk("No.\n");
546 #ifdef CONFIG_X86_WP_WORKS_OK
547 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
548 #endif
549 } else {
550 printk("Ok.\n");
551 }
552 }
553
554 static void __init set_max_mapnr_init(void)
555 {
556 #ifdef CONFIG_HIGHMEM
557 num_physpages = highend_pfn;
558 #else
559 num_physpages = max_low_pfn;
560 #endif
561 #ifdef CONFIG_FLATMEM
562 max_mapnr = num_physpages;
563 #endif
564 }
565
566 static struct kcore_list kcore_mem, kcore_vmalloc;
567
568 void __init mem_init(void)
569 {
570 extern int ppro_with_ram_bug(void);
571 int codesize, reservedpages, datasize, initsize;
572 int tmp;
573 int bad_ppro;
574
575 #ifdef CONFIG_FLATMEM
576 if (!mem_map)
577 BUG();
578 #endif
579
580 bad_ppro = ppro_with_ram_bug();
581
582 #ifdef CONFIG_HIGHMEM
583 /* check that fixmap and pkmap do not overlap */
584 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
585 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
586 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
587 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
588 BUG();
589 }
590 #endif
591
592 set_max_mapnr_init();
593
594 #ifdef CONFIG_HIGHMEM
595 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
596 #else
597 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
598 #endif
599
600 /* this will put all low memory onto the freelists */
601 totalram_pages += free_all_bootmem();
602
603 reservedpages = 0;
604 for (tmp = 0; tmp < max_low_pfn; tmp++)
605 /*
606 * Only count reserved RAM pages
607 */
608 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
609 reservedpages++;
610
611 set_highmem_pages_init(bad_ppro);
612
613 codesize = (unsigned long) &_etext - (unsigned long) &_text;
614 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
615 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
616
617 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
618 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
619 VMALLOC_END-VMALLOC_START);
620
621 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
622 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
623 num_physpages << (PAGE_SHIFT-10),
624 codesize >> 10,
625 reservedpages << (PAGE_SHIFT-10),
626 datasize >> 10,
627 initsize >> 10,
628 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
629 );
630
631 #ifdef CONFIG_X86_PAE
632 if (!cpu_has_pae)
633 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
634 #endif
635 if (boot_cpu_data.wp_works_ok < 0)
636 test_wp_bit();
637
638 /*
639 * Subtle. SMP is doing it's boot stuff late (because it has to
640 * fork idle threads) - but it also needs low mappings for the
641 * protected-mode entry to work. We zap these entries only after
642 * the WP-bit has been tested.
643 */
644 #ifndef CONFIG_SMP
645 zap_low_mappings();
646 #endif
647 }
648
649 /*
650 * this is for the non-NUMA, single node SMP system case.
651 * Specifically, in the case of x86, we will always add
652 * memory to the highmem for now.
653 */
654 #ifndef CONFIG_NEED_MULTIPLE_NODES
655 int add_memory(u64 start, u64 size)
656 {
657 struct pglist_data *pgdata = &contig_page_data;
658 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
659 unsigned long start_pfn = start >> PAGE_SHIFT;
660 unsigned long nr_pages = size >> PAGE_SHIFT;
661
662 return __add_pages(zone, start_pfn, nr_pages);
663 }
664
665 int remove_memory(u64 start, u64 size)
666 {
667 return -EINVAL;
668 }
669 #endif
670
671 kmem_cache_t *pgd_cache;
672 kmem_cache_t *pmd_cache;
673
674 void __init pgtable_cache_init(void)
675 {
676 if (PTRS_PER_PMD > 1) {
677 pmd_cache = kmem_cache_create("pmd",
678 PTRS_PER_PMD*sizeof(pmd_t),
679 PTRS_PER_PMD*sizeof(pmd_t),
680 0,
681 pmd_ctor,
682 NULL);
683 if (!pmd_cache)
684 panic("pgtable_cache_init(): cannot create pmd cache");
685 }
686 pgd_cache = kmem_cache_create("pgd",
687 PTRS_PER_PGD*sizeof(pgd_t),
688 PTRS_PER_PGD*sizeof(pgd_t),
689 0,
690 pgd_ctor,
691 PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
692 if (!pgd_cache)
693 panic("pgtable_cache_init(): Cannot create pgd cache");
694 }
695
696 /*
697 * This function cannot be __init, since exceptions don't work in that
698 * section. Put this after the callers, so that it cannot be inlined.
699 */
700 static int noinline do_test_wp_bit(void)
701 {
702 char tmp_reg;
703 int flag;
704
705 __asm__ __volatile__(
706 " movb %0,%1 \n"
707 "1: movb %1,%0 \n"
708 " xorl %2,%2 \n"
709 "2: \n"
710 ".section __ex_table,\"a\"\n"
711 " .align 4 \n"
712 " .long 1b,2b \n"
713 ".previous \n"
714 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
715 "=q" (tmp_reg),
716 "=r" (flag)
717 :"2" (1)
718 :"memory");
719
720 return flag;
721 }
722
723 void free_initmem(void)
724 {
725 unsigned long addr;
726
727 addr = (unsigned long)(&__init_begin);
728 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
729 ClearPageReserved(virt_to_page(addr));
730 set_page_count(virt_to_page(addr), 1);
731 memset((void *)addr, 0xcc, PAGE_SIZE);
732 free_page(addr);
733 totalram_pages++;
734 }
735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
736 }
737
738 #ifdef CONFIG_DEBUG_RODATA
739
740 extern char __start_rodata, __end_rodata;
741 void mark_rodata_ro(void)
742 {
743 unsigned long addr = (unsigned long)&__start_rodata;
744
745 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
746 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
747
748 printk ("Write protecting the kernel read-only data: %luk\n",
749 (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
750
751 /*
752 * change_page_attr() requires a global_flush_tlb() call after it.
753 * We do this after the printk so that if something went wrong in the
754 * change, the printk gets out at least to give a better debug hint
755 * of who is the culprit.
756 */
757 global_flush_tlb();
758 }
759 #endif
760
761
762 #ifdef CONFIG_BLK_DEV_INITRD
763 void free_initrd_mem(unsigned long start, unsigned long end)
764 {
765 if (start < end)
766 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
767 for (; start < end; start += PAGE_SIZE) {
768 ClearPageReserved(virt_to_page(start));
769 set_page_count(virt_to_page(start), 1);
770 free_page(start);
771 totalram_pages++;
772 }
773 }
774 #endif
This page took 0.045886 seconds and 5 git commands to generate.