resource: allow MMIO exclusivity for device drivers
[deliverable/linux.git] / arch / x86 / mm / init_32.c
1 /*
2 *
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
7
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/hugetlb.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/pci.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
33
34 #include <asm/asm.h>
35 #include <asm/bios_ebda.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/dma.h>
41 #include <asm/fixmap.h>
42 #include <asm/e820.h>
43 #include <asm/apic.h>
44 #include <asm/bugs.h>
45 #include <asm/tlb.h>
46 #include <asm/tlbflush.h>
47 #include <asm/pgalloc.h>
48 #include <asm/sections.h>
49 #include <asm/paravirt.h>
50 #include <asm/setup.h>
51 #include <asm/cacheflush.h>
52 #include <asm/smp.h>
53
54 unsigned int __VMALLOC_RESERVE = 128 << 20;
55
56 unsigned long max_low_pfn_mapped;
57 unsigned long max_pfn_mapped;
58
59 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
60 unsigned long highstart_pfn, highend_pfn;
61
62 static noinline int do_test_wp_bit(void);
63
64
65 static unsigned long __initdata table_start;
66 static unsigned long __meminitdata table_end;
67 static unsigned long __meminitdata table_top;
68
69 static int __initdata after_init_bootmem;
70
71 static __init void *alloc_low_page(void)
72 {
73 unsigned long pfn = table_end++;
74 void *adr;
75
76 if (pfn >= table_top)
77 panic("alloc_low_page: ran out of memory");
78
79 adr = __va(pfn * PAGE_SIZE);
80 memset(adr, 0, PAGE_SIZE);
81 return adr;
82 }
83
84 /*
85 * Creates a middle page table and puts a pointer to it in the
86 * given global directory entry. This only returns the gd entry
87 * in non-PAE compilation mode, since the middle layer is folded.
88 */
89 static pmd_t * __init one_md_table_init(pgd_t *pgd)
90 {
91 pud_t *pud;
92 pmd_t *pmd_table;
93
94 #ifdef CONFIG_X86_PAE
95 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
96 if (after_init_bootmem)
97 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
98 else
99 pmd_table = (pmd_t *)alloc_low_page();
100 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
101 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
102 pud = pud_offset(pgd, 0);
103 BUG_ON(pmd_table != pmd_offset(pud, 0));
104
105 return pmd_table;
106 }
107 #endif
108 pud = pud_offset(pgd, 0);
109 pmd_table = pmd_offset(pud, 0);
110
111 return pmd_table;
112 }
113
114 /*
115 * Create a page table and place a pointer to it in a middle page
116 * directory entry:
117 */
118 static pte_t * __init one_page_table_init(pmd_t *pmd)
119 {
120 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
121 pte_t *page_table = NULL;
122
123 if (after_init_bootmem) {
124 #ifdef CONFIG_DEBUG_PAGEALLOC
125 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
126 #endif
127 if (!page_table)
128 page_table =
129 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
130 } else
131 page_table = (pte_t *)alloc_low_page();
132
133 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
134 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
135 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
136 }
137
138 return pte_offset_kernel(pmd, 0);
139 }
140
141 /*
142 * This function initializes a certain range of kernel virtual memory
143 * with new bootmem page tables, everywhere page tables are missing in
144 * the given range.
145 *
146 * NOTE: The pagetables are allocated contiguous on the physical space
147 * so we can cache the place of the first one and move around without
148 * checking the pgd every time.
149 */
150 static void __init
151 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
152 {
153 int pgd_idx, pmd_idx;
154 unsigned long vaddr;
155 pgd_t *pgd;
156 pmd_t *pmd;
157
158 vaddr = start;
159 pgd_idx = pgd_index(vaddr);
160 pmd_idx = pmd_index(vaddr);
161 pgd = pgd_base + pgd_idx;
162
163 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
164 pmd = one_md_table_init(pgd);
165 pmd = pmd + pmd_index(vaddr);
166 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
167 pmd++, pmd_idx++) {
168 one_page_table_init(pmd);
169
170 vaddr += PMD_SIZE;
171 }
172 pmd_idx = 0;
173 }
174 }
175
176 static inline int is_kernel_text(unsigned long addr)
177 {
178 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
179 return 1;
180 return 0;
181 }
182
183 /*
184 * This maps the physical memory to kernel virtual address space, a total
185 * of max_low_pfn pages, by creating page tables starting from address
186 * PAGE_OFFSET:
187 */
188 static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
189 unsigned long start_pfn,
190 unsigned long end_pfn,
191 int use_pse)
192 {
193 int pgd_idx, pmd_idx, pte_ofs;
194 unsigned long pfn;
195 pgd_t *pgd;
196 pmd_t *pmd;
197 pte_t *pte;
198 unsigned pages_2m, pages_4k;
199 int mapping_iter;
200
201 /*
202 * First iteration will setup identity mapping using large/small pages
203 * based on use_pse, with other attributes same as set by
204 * the early code in head_32.S
205 *
206 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
207 * as desired for the kernel identity mapping.
208 *
209 * This two pass mechanism conforms to the TLB app note which says:
210 *
211 * "Software should not write to a paging-structure entry in a way
212 * that would change, for any linear address, both the page size
213 * and either the page frame or attributes."
214 */
215 mapping_iter = 1;
216
217 if (!cpu_has_pse)
218 use_pse = 0;
219
220 repeat:
221 pages_2m = pages_4k = 0;
222 pfn = start_pfn;
223 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
224 pgd = pgd_base + pgd_idx;
225 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
226 pmd = one_md_table_init(pgd);
227
228 if (pfn >= end_pfn)
229 continue;
230 #ifdef CONFIG_X86_PAE
231 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
232 pmd += pmd_idx;
233 #else
234 pmd_idx = 0;
235 #endif
236 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
237 pmd++, pmd_idx++) {
238 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
239
240 /*
241 * Map with big pages if possible, otherwise
242 * create normal page tables:
243 */
244 if (use_pse) {
245 unsigned int addr2;
246 pgprot_t prot = PAGE_KERNEL_LARGE;
247 /*
248 * first pass will use the same initial
249 * identity mapping attribute + _PAGE_PSE.
250 */
251 pgprot_t init_prot =
252 __pgprot(PTE_IDENT_ATTR |
253 _PAGE_PSE);
254
255 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
256 PAGE_OFFSET + PAGE_SIZE-1;
257
258 if (is_kernel_text(addr) ||
259 is_kernel_text(addr2))
260 prot = PAGE_KERNEL_LARGE_EXEC;
261
262 pages_2m++;
263 if (mapping_iter == 1)
264 set_pmd(pmd, pfn_pmd(pfn, init_prot));
265 else
266 set_pmd(pmd, pfn_pmd(pfn, prot));
267
268 pfn += PTRS_PER_PTE;
269 continue;
270 }
271 pte = one_page_table_init(pmd);
272
273 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
274 pte += pte_ofs;
275 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
276 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
277 pgprot_t prot = PAGE_KERNEL;
278 /*
279 * first pass will use the same initial
280 * identity mapping attribute.
281 */
282 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
283
284 if (is_kernel_text(addr))
285 prot = PAGE_KERNEL_EXEC;
286
287 pages_4k++;
288 if (mapping_iter == 1)
289 set_pte(pte, pfn_pte(pfn, init_prot));
290 else
291 set_pte(pte, pfn_pte(pfn, prot));
292 }
293 }
294 }
295 if (mapping_iter == 1) {
296 /*
297 * update direct mapping page count only in the first
298 * iteration.
299 */
300 update_page_count(PG_LEVEL_2M, pages_2m);
301 update_page_count(PG_LEVEL_4K, pages_4k);
302
303 /*
304 * local global flush tlb, which will flush the previous
305 * mappings present in both small and large page TLB's.
306 */
307 __flush_tlb_all();
308
309 /*
310 * Second iteration will set the actual desired PTE attributes.
311 */
312 mapping_iter = 2;
313 goto repeat;
314 }
315 }
316
317 /*
318 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
319 * is valid. The argument is a physical page number.
320 *
321 *
322 * On x86, access has to be given to the first megabyte of ram because that area
323 * contains bios code and data regions used by X and dosemu and similar apps.
324 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
325 * mmio resources as well as potential bios/acpi data regions.
326 */
327 int devmem_is_allowed(unsigned long pagenr)
328 {
329 if (pagenr <= 256)
330 return 1;
331 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
332 return 0;
333 if (!page_is_ram(pagenr))
334 return 1;
335 return 0;
336 }
337
338 pte_t *kmap_pte;
339 pgprot_t kmap_prot;
340
341 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
342 {
343 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
344 vaddr), vaddr), vaddr);
345 }
346
347 static void __init kmap_init(void)
348 {
349 unsigned long kmap_vstart;
350
351 /*
352 * Cache the first kmap pte:
353 */
354 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
355 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
356
357 kmap_prot = PAGE_KERNEL;
358 }
359
360 #ifdef CONFIG_HIGHMEM
361 static void __init permanent_kmaps_init(pgd_t *pgd_base)
362 {
363 unsigned long vaddr;
364 pgd_t *pgd;
365 pud_t *pud;
366 pmd_t *pmd;
367 pte_t *pte;
368
369 vaddr = PKMAP_BASE;
370 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
371
372 pgd = swapper_pg_dir + pgd_index(vaddr);
373 pud = pud_offset(pgd, vaddr);
374 pmd = pmd_offset(pud, vaddr);
375 pte = pte_offset_kernel(pmd, vaddr);
376 pkmap_page_table = pte;
377 }
378
379 static void __init add_one_highpage_init(struct page *page, int pfn)
380 {
381 ClearPageReserved(page);
382 init_page_count(page);
383 __free_page(page);
384 totalhigh_pages++;
385 }
386
387 struct add_highpages_data {
388 unsigned long start_pfn;
389 unsigned long end_pfn;
390 };
391
392 static int __init add_highpages_work_fn(unsigned long start_pfn,
393 unsigned long end_pfn, void *datax)
394 {
395 int node_pfn;
396 struct page *page;
397 unsigned long final_start_pfn, final_end_pfn;
398 struct add_highpages_data *data;
399
400 data = (struct add_highpages_data *)datax;
401
402 final_start_pfn = max(start_pfn, data->start_pfn);
403 final_end_pfn = min(end_pfn, data->end_pfn);
404 if (final_start_pfn >= final_end_pfn)
405 return 0;
406
407 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
408 node_pfn++) {
409 if (!pfn_valid(node_pfn))
410 continue;
411 page = pfn_to_page(node_pfn);
412 add_one_highpage_init(page, node_pfn);
413 }
414
415 return 0;
416
417 }
418
419 void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
420 unsigned long end_pfn)
421 {
422 struct add_highpages_data data;
423
424 data.start_pfn = start_pfn;
425 data.end_pfn = end_pfn;
426
427 work_with_active_regions(nid, add_highpages_work_fn, &data);
428 }
429
430 #ifndef CONFIG_NUMA
431 static void __init set_highmem_pages_init(void)
432 {
433 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
434
435 totalram_pages += totalhigh_pages;
436 }
437 #endif /* !CONFIG_NUMA */
438
439 #else
440 static inline void permanent_kmaps_init(pgd_t *pgd_base)
441 {
442 }
443 static inline void set_highmem_pages_init(void)
444 {
445 }
446 #endif /* CONFIG_HIGHMEM */
447
448 void __init native_pagetable_setup_start(pgd_t *base)
449 {
450 unsigned long pfn, va;
451 pgd_t *pgd;
452 pud_t *pud;
453 pmd_t *pmd;
454 pte_t *pte;
455
456 /*
457 * Remove any mappings which extend past the end of physical
458 * memory from the boot time page table:
459 */
460 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
461 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
462 pgd = base + pgd_index(va);
463 if (!pgd_present(*pgd))
464 break;
465
466 pud = pud_offset(pgd, va);
467 pmd = pmd_offset(pud, va);
468 if (!pmd_present(*pmd))
469 break;
470
471 pte = pte_offset_kernel(pmd, va);
472 if (!pte_present(*pte))
473 break;
474
475 pte_clear(NULL, va, pte);
476 }
477 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
478 }
479
480 void __init native_pagetable_setup_done(pgd_t *base)
481 {
482 }
483
484 /*
485 * Build a proper pagetable for the kernel mappings. Up until this
486 * point, we've been running on some set of pagetables constructed by
487 * the boot process.
488 *
489 * If we're booting on native hardware, this will be a pagetable
490 * constructed in arch/x86/kernel/head_32.S. The root of the
491 * pagetable will be swapper_pg_dir.
492 *
493 * If we're booting paravirtualized under a hypervisor, then there are
494 * more options: we may already be running PAE, and the pagetable may
495 * or may not be based in swapper_pg_dir. In any case,
496 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
497 * appropriately for the rest of the initialization to work.
498 *
499 * In general, pagetable_init() assumes that the pagetable may already
500 * be partially populated, and so it avoids stomping on any existing
501 * mappings.
502 */
503 static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
504 {
505 unsigned long vaddr, end;
506
507 /*
508 * Fixed mappings, only the page table structure has to be
509 * created - mappings will be set by set_fixmap():
510 */
511 early_ioremap_clear();
512 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
513 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
514 page_table_range_init(vaddr, end, pgd_base);
515 early_ioremap_reset();
516 }
517
518 static void __init pagetable_init(void)
519 {
520 pgd_t *pgd_base = swapper_pg_dir;
521
522 permanent_kmaps_init(pgd_base);
523 }
524
525 #ifdef CONFIG_ACPI_SLEEP
526 /*
527 * ACPI suspend needs this for resume, because things like the intel-agp
528 * driver might have split up a kernel 4MB mapping.
529 */
530 char swsusp_pg_dir[PAGE_SIZE]
531 __attribute__ ((aligned(PAGE_SIZE)));
532
533 static inline void save_pg_dir(void)
534 {
535 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
536 }
537 #else /* !CONFIG_ACPI_SLEEP */
538 static inline void save_pg_dir(void)
539 {
540 }
541 #endif /* !CONFIG_ACPI_SLEEP */
542
543 void zap_low_mappings(void)
544 {
545 int i;
546
547 /*
548 * Zap initial low-memory mappings.
549 *
550 * Note that "pgd_clear()" doesn't do it for
551 * us, because pgd_clear() is a no-op on i386.
552 */
553 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
554 #ifdef CONFIG_X86_PAE
555 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
556 #else
557 set_pgd(swapper_pg_dir+i, __pgd(0));
558 #endif
559 }
560 flush_tlb_all();
561 }
562
563 int nx_enabled;
564
565 pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
566 EXPORT_SYMBOL_GPL(__supported_pte_mask);
567
568 #ifdef CONFIG_X86_PAE
569
570 static int disable_nx __initdata;
571
572 /*
573 * noexec = on|off
574 *
575 * Control non executable mappings.
576 *
577 * on Enable
578 * off Disable
579 */
580 static int __init noexec_setup(char *str)
581 {
582 if (!str || !strcmp(str, "on")) {
583 if (cpu_has_nx) {
584 __supported_pte_mask |= _PAGE_NX;
585 disable_nx = 0;
586 }
587 } else {
588 if (!strcmp(str, "off")) {
589 disable_nx = 1;
590 __supported_pte_mask &= ~_PAGE_NX;
591 } else {
592 return -EINVAL;
593 }
594 }
595
596 return 0;
597 }
598 early_param("noexec", noexec_setup);
599
600 static void __init set_nx(void)
601 {
602 unsigned int v[4], l, h;
603
604 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
605 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
606
607 if ((v[3] & (1 << 20)) && !disable_nx) {
608 rdmsr(MSR_EFER, l, h);
609 l |= EFER_NX;
610 wrmsr(MSR_EFER, l, h);
611 nx_enabled = 1;
612 __supported_pte_mask |= _PAGE_NX;
613 }
614 }
615 }
616 #endif
617
618 /* user-defined highmem size */
619 static unsigned int highmem_pages = -1;
620
621 /*
622 * highmem=size forces highmem to be exactly 'size' bytes.
623 * This works even on boxes that have no highmem otherwise.
624 * This also works to reduce highmem size on bigger boxes.
625 */
626 static int __init parse_highmem(char *arg)
627 {
628 if (!arg)
629 return -EINVAL;
630
631 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
632 return 0;
633 }
634 early_param("highmem", parse_highmem);
635
636 /*
637 * Determine low and high memory ranges:
638 */
639 void __init find_low_pfn_range(void)
640 {
641 /* it could update max_pfn */
642
643 /* max_low_pfn is 0, we already have early_res support */
644
645 max_low_pfn = max_pfn;
646 if (max_low_pfn > MAXMEM_PFN) {
647 if (highmem_pages == -1)
648 highmem_pages = max_pfn - MAXMEM_PFN;
649 if (highmem_pages + MAXMEM_PFN < max_pfn)
650 max_pfn = MAXMEM_PFN + highmem_pages;
651 if (highmem_pages + MAXMEM_PFN > max_pfn) {
652 printk(KERN_WARNING "only %luMB highmem pages "
653 "available, ignoring highmem size of %uMB.\n",
654 pages_to_mb(max_pfn - MAXMEM_PFN),
655 pages_to_mb(highmem_pages));
656 highmem_pages = 0;
657 }
658 max_low_pfn = MAXMEM_PFN;
659 #ifndef CONFIG_HIGHMEM
660 /* Maximum memory usable is what is directly addressable */
661 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
662 MAXMEM>>20);
663 if (max_pfn > MAX_NONPAE_PFN)
664 printk(KERN_WARNING
665 "Use a HIGHMEM64G enabled kernel.\n");
666 else
667 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
668 max_pfn = MAXMEM_PFN;
669 #else /* !CONFIG_HIGHMEM */
670 #ifndef CONFIG_HIGHMEM64G
671 if (max_pfn > MAX_NONPAE_PFN) {
672 max_pfn = MAX_NONPAE_PFN;
673 printk(KERN_WARNING "Warning only 4GB will be used."
674 "Use a HIGHMEM64G enabled kernel.\n");
675 }
676 #endif /* !CONFIG_HIGHMEM64G */
677 #endif /* !CONFIG_HIGHMEM */
678 } else {
679 if (highmem_pages == -1)
680 highmem_pages = 0;
681 #ifdef CONFIG_HIGHMEM
682 if (highmem_pages >= max_pfn) {
683 printk(KERN_ERR "highmem size specified (%uMB) is "
684 "bigger than pages available (%luMB)!.\n",
685 pages_to_mb(highmem_pages),
686 pages_to_mb(max_pfn));
687 highmem_pages = 0;
688 }
689 if (highmem_pages) {
690 if (max_low_pfn - highmem_pages <
691 64*1024*1024/PAGE_SIZE){
692 printk(KERN_ERR "highmem size %uMB results in "
693 "smaller than 64MB lowmem, ignoring it.\n"
694 , pages_to_mb(highmem_pages));
695 highmem_pages = 0;
696 }
697 max_low_pfn -= highmem_pages;
698 }
699 #else
700 if (highmem_pages)
701 printk(KERN_ERR "ignoring highmem size on non-highmem"
702 " kernel!\n");
703 #endif
704 }
705 }
706
707 #ifndef CONFIG_NEED_MULTIPLE_NODES
708 void __init initmem_init(unsigned long start_pfn,
709 unsigned long end_pfn)
710 {
711 #ifdef CONFIG_HIGHMEM
712 highstart_pfn = highend_pfn = max_pfn;
713 if (max_pfn > max_low_pfn)
714 highstart_pfn = max_low_pfn;
715 memory_present(0, 0, highend_pfn);
716 e820_register_active_regions(0, 0, highend_pfn);
717 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
718 pages_to_mb(highend_pfn - highstart_pfn));
719 num_physpages = highend_pfn;
720 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
721 #else
722 memory_present(0, 0, max_low_pfn);
723 e820_register_active_regions(0, 0, max_low_pfn);
724 num_physpages = max_low_pfn;
725 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
726 #endif
727 #ifdef CONFIG_FLATMEM
728 max_mapnr = num_physpages;
729 #endif
730 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
731 pages_to_mb(max_low_pfn));
732
733 setup_bootmem_allocator();
734 }
735 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
736
737 static void __init zone_sizes_init(void)
738 {
739 unsigned long max_zone_pfns[MAX_NR_ZONES];
740 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
741 max_zone_pfns[ZONE_DMA] =
742 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
743 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
744 #ifdef CONFIG_HIGHMEM
745 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
746 #endif
747
748 free_area_init_nodes(max_zone_pfns);
749 }
750
751 void __init setup_bootmem_allocator(void)
752 {
753 int i;
754 unsigned long bootmap_size, bootmap;
755 /*
756 * Initialize the boot-time allocator (with low memory only):
757 */
758 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
759 bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
760 max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
761 PAGE_SIZE);
762 if (bootmap == -1L)
763 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
764 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
765
766 /* don't touch min_low_pfn */
767 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
768 min_low_pfn, max_low_pfn);
769 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
770 max_pfn_mapped<<PAGE_SHIFT);
771 printk(KERN_INFO " low ram: %08lx - %08lx\n",
772 min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
773 printk(KERN_INFO " bootmap %08lx - %08lx\n",
774 bootmap, bootmap + bootmap_size);
775 for_each_online_node(i)
776 free_bootmem_with_active_regions(i, max_low_pfn);
777 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
778
779 after_init_bootmem = 1;
780 }
781
782 static void __init find_early_table_space(unsigned long end, int use_pse)
783 {
784 unsigned long puds, pmds, ptes, tables, start;
785
786 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
787 tables = PAGE_ALIGN(puds * sizeof(pud_t));
788
789 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
790 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
791
792 if (use_pse) {
793 unsigned long extra;
794
795 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
796 extra += PMD_SIZE;
797 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
798 } else
799 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
800
801 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
802
803 /* for fixmap */
804 tables += PAGE_SIZE * 2;
805
806 /*
807 * RED-PEN putting page tables only on node 0 could
808 * cause a hotspot and fill up ZONE_DMA. The page tables
809 * need roughly 0.5KB per GB.
810 */
811 start = 0x7000;
812 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
813 tables, PAGE_SIZE);
814 if (table_start == -1UL)
815 panic("Cannot find space for the kernel page tables");
816
817 table_start >>= PAGE_SHIFT;
818 table_end = table_start;
819 table_top = table_start + (tables>>PAGE_SHIFT);
820
821 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
822 end, table_start << PAGE_SHIFT,
823 (table_start << PAGE_SHIFT) + tables);
824 }
825
826 unsigned long __init_refok init_memory_mapping(unsigned long start,
827 unsigned long end)
828 {
829 pgd_t *pgd_base = swapper_pg_dir;
830 unsigned long start_pfn, end_pfn;
831 unsigned long big_page_start;
832 #ifdef CONFIG_DEBUG_PAGEALLOC
833 /*
834 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
835 * This will simplify cpa(), which otherwise needs to support splitting
836 * large pages into small in interrupt context, etc.
837 */
838 int use_pse = 0;
839 #else
840 int use_pse = cpu_has_pse;
841 #endif
842
843 /*
844 * Find space for the kernel direct mapping tables.
845 */
846 if (!after_init_bootmem)
847 find_early_table_space(end, use_pse);
848
849 #ifdef CONFIG_X86_PAE
850 set_nx();
851 if (nx_enabled)
852 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
853 #endif
854
855 /* Enable PSE if available */
856 if (cpu_has_pse)
857 set_in_cr4(X86_CR4_PSE);
858
859 /* Enable PGE if available */
860 if (cpu_has_pge) {
861 set_in_cr4(X86_CR4_PGE);
862 __supported_pte_mask |= _PAGE_GLOBAL;
863 }
864
865 /*
866 * Don't use a large page for the first 2/4MB of memory
867 * because there are often fixed size MTRRs in there
868 * and overlapping MTRRs into large pages can cause
869 * slowdowns.
870 */
871 big_page_start = PMD_SIZE;
872
873 if (start < big_page_start) {
874 start_pfn = start >> PAGE_SHIFT;
875 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
876 } else {
877 /* head is not big page alignment ? */
878 start_pfn = start >> PAGE_SHIFT;
879 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
880 << (PMD_SHIFT - PAGE_SHIFT);
881 }
882 if (start_pfn < end_pfn)
883 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
884
885 /* big page range */
886 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
887 << (PMD_SHIFT - PAGE_SHIFT);
888 if (start_pfn < (big_page_start >> PAGE_SHIFT))
889 start_pfn = big_page_start >> PAGE_SHIFT;
890 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
891 if (start_pfn < end_pfn)
892 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
893 use_pse);
894
895 /* tail is not big page alignment ? */
896 start_pfn = end_pfn;
897 if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
898 end_pfn = end >> PAGE_SHIFT;
899 if (start_pfn < end_pfn)
900 kernel_physical_mapping_init(pgd_base, start_pfn,
901 end_pfn, 0);
902 }
903
904 early_ioremap_page_table_range_init(pgd_base);
905
906 load_cr3(swapper_pg_dir);
907
908 __flush_tlb_all();
909
910 if (!after_init_bootmem)
911 reserve_early(table_start << PAGE_SHIFT,
912 table_end << PAGE_SHIFT, "PGTABLE");
913
914 if (!after_init_bootmem)
915 early_memtest(start, end);
916
917 return end >> PAGE_SHIFT;
918 }
919
920
921 /*
922 * paging_init() sets up the page tables - note that the first 8MB are
923 * already mapped by head.S.
924 *
925 * This routines also unmaps the page at virtual kernel address 0, so
926 * that we can trap those pesky NULL-reference errors in the kernel.
927 */
928 void __init paging_init(void)
929 {
930 pagetable_init();
931
932 __flush_tlb_all();
933
934 kmap_init();
935
936 /*
937 * NOTE: at this point the bootmem allocator is fully available.
938 */
939 sparse_init();
940 zone_sizes_init();
941 }
942
943 /*
944 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
945 * and also on some strange 486's. All 586+'s are OK. This used to involve
946 * black magic jumps to work around some nasty CPU bugs, but fortunately the
947 * switch to using exceptions got rid of all that.
948 */
949 static void __init test_wp_bit(void)
950 {
951 printk(KERN_INFO
952 "Checking if this processor honours the WP bit even in supervisor mode...");
953
954 /* Any page-aligned address will do, the test is non-destructive */
955 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
956 boot_cpu_data.wp_works_ok = do_test_wp_bit();
957 clear_fixmap(FIX_WP_TEST);
958
959 if (!boot_cpu_data.wp_works_ok) {
960 printk(KERN_CONT "No.\n");
961 #ifdef CONFIG_X86_WP_WORKS_OK
962 panic(
963 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
964 #endif
965 } else {
966 printk(KERN_CONT "Ok.\n");
967 }
968 }
969
970 static struct kcore_list kcore_mem, kcore_vmalloc;
971
972 void __init mem_init(void)
973 {
974 int codesize, reservedpages, datasize, initsize;
975 int tmp;
976
977 pci_iommu_alloc();
978
979 #ifdef CONFIG_FLATMEM
980 BUG_ON(!mem_map);
981 #endif
982 /* this will put all low memory onto the freelists */
983 totalram_pages += free_all_bootmem();
984
985 reservedpages = 0;
986 for (tmp = 0; tmp < max_low_pfn; tmp++)
987 /*
988 * Only count reserved RAM pages:
989 */
990 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
991 reservedpages++;
992
993 set_highmem_pages_init();
994
995 codesize = (unsigned long) &_etext - (unsigned long) &_text;
996 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
997 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
998
999 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
1000 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
1001 VMALLOC_END-VMALLOC_START);
1002
1003 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
1004 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
1005 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
1006 num_physpages << (PAGE_SHIFT-10),
1007 codesize >> 10,
1008 reservedpages << (PAGE_SHIFT-10),
1009 datasize >> 10,
1010 initsize >> 10,
1011 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
1012 );
1013
1014 printk(KERN_INFO "virtual kernel memory layout:\n"
1015 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
1016 #ifdef CONFIG_HIGHMEM
1017 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
1018 #endif
1019 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
1020 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
1021 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
1022 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
1023 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
1024 FIXADDR_START, FIXADDR_TOP,
1025 (FIXADDR_TOP - FIXADDR_START) >> 10,
1026
1027 #ifdef CONFIG_HIGHMEM
1028 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
1029 (LAST_PKMAP*PAGE_SIZE) >> 10,
1030 #endif
1031
1032 VMALLOC_START, VMALLOC_END,
1033 (VMALLOC_END - VMALLOC_START) >> 20,
1034
1035 (unsigned long)__va(0), (unsigned long)high_memory,
1036 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
1037
1038 (unsigned long)&__init_begin, (unsigned long)&__init_end,
1039 ((unsigned long)&__init_end -
1040 (unsigned long)&__init_begin) >> 10,
1041
1042 (unsigned long)&_etext, (unsigned long)&_edata,
1043 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
1044
1045 (unsigned long)&_text, (unsigned long)&_etext,
1046 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
1047
1048 /*
1049 * Check boundaries twice: Some fundamental inconsistencies can
1050 * be detected at build time already.
1051 */
1052 #define __FIXADDR_TOP (-PAGE_SIZE)
1053 #ifdef CONFIG_HIGHMEM
1054 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1055 BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
1056 #endif
1057 #define high_memory (-128UL << 20)
1058 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
1059 #undef high_memory
1060 #undef __FIXADDR_TOP
1061
1062 #ifdef CONFIG_HIGHMEM
1063 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1064 BUG_ON(VMALLOC_END > PKMAP_BASE);
1065 #endif
1066 BUG_ON(VMALLOC_START >= VMALLOC_END);
1067 BUG_ON((unsigned long)high_memory > VMALLOC_START);
1068
1069 if (boot_cpu_data.wp_works_ok < 0)
1070 test_wp_bit();
1071
1072 save_pg_dir();
1073 zap_low_mappings();
1074 }
1075
1076 #ifdef CONFIG_MEMORY_HOTPLUG
1077 int arch_add_memory(int nid, u64 start, u64 size)
1078 {
1079 struct pglist_data *pgdata = NODE_DATA(nid);
1080 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
1081 unsigned long start_pfn = start >> PAGE_SHIFT;
1082 unsigned long nr_pages = size >> PAGE_SHIFT;
1083
1084 return __add_pages(nid, zone, start_pfn, nr_pages);
1085 }
1086 #endif
1087
1088 /*
1089 * This function cannot be __init, since exceptions don't work in that
1090 * section. Put this after the callers, so that it cannot be inlined.
1091 */
1092 static noinline int do_test_wp_bit(void)
1093 {
1094 char tmp_reg;
1095 int flag;
1096
1097 __asm__ __volatile__(
1098 " movb %0, %1 \n"
1099 "1: movb %1, %0 \n"
1100 " xorl %2, %2 \n"
1101 "2: \n"
1102 _ASM_EXTABLE(1b,2b)
1103 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1104 "=q" (tmp_reg),
1105 "=r" (flag)
1106 :"2" (1)
1107 :"memory");
1108
1109 return flag;
1110 }
1111
1112 #ifdef CONFIG_DEBUG_RODATA
1113 const int rodata_test_data = 0xC3;
1114 EXPORT_SYMBOL_GPL(rodata_test_data);
1115
1116 void mark_rodata_ro(void)
1117 {
1118 unsigned long start = PFN_ALIGN(_text);
1119 unsigned long size = PFN_ALIGN(_etext) - start;
1120
1121 #ifndef CONFIG_DYNAMIC_FTRACE
1122 /* Dynamic tracing modifies the kernel text section */
1123 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1124 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1125 size >> 10);
1126
1127 #ifdef CONFIG_CPA_DEBUG
1128 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1129 start, start+size);
1130 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
1131
1132 printk(KERN_INFO "Testing CPA: write protecting again\n");
1133 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
1134 #endif
1135 #endif /* CONFIG_DYNAMIC_FTRACE */
1136
1137 start += size;
1138 size = (unsigned long)__end_rodata - start;
1139 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1140 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1141 size >> 10);
1142 rodata_test();
1143
1144 #ifdef CONFIG_CPA_DEBUG
1145 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
1146 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
1147
1148 printk(KERN_INFO "Testing CPA: write protecting again\n");
1149 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1150 #endif
1151 }
1152 #endif
1153
1154 void free_init_pages(char *what, unsigned long begin, unsigned long end)
1155 {
1156 #ifdef CONFIG_DEBUG_PAGEALLOC
1157 /*
1158 * If debugging page accesses then do not free this memory but
1159 * mark them not present - any buggy init-section access will
1160 * create a kernel page fault:
1161 */
1162 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1163 begin, PAGE_ALIGN(end));
1164 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1165 #else
1166 unsigned long addr;
1167
1168 /*
1169 * We just marked the kernel text read only above, now that
1170 * we are going to free part of that, we need to make that
1171 * writeable first.
1172 */
1173 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1174
1175 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1176 ClearPageReserved(virt_to_page(addr));
1177 init_page_count(virt_to_page(addr));
1178 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1179 free_page(addr);
1180 totalram_pages++;
1181 }
1182 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1183 #endif
1184 }
1185
1186 void free_initmem(void)
1187 {
1188 free_init_pages("unused kernel memory",
1189 (unsigned long)(&__init_begin),
1190 (unsigned long)(&__init_end));
1191 }
1192
1193 #ifdef CONFIG_BLK_DEV_INITRD
1194 void free_initrd_mem(unsigned long start, unsigned long end)
1195 {
1196 free_init_pages("initrd memory", start, end);
1197 }
1198 #endif
1199
1200 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1201 int flags)
1202 {
1203 return reserve_bootmem(phys, len, flags);
1204 }
This page took 0.06074 seconds and 5 git commands to generate.