From 0889eba5b38f66d7d892a167d88284daddd3d43b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 16 Oct 2007 01:24:15 -0700 Subject: [PATCH] x86_64: SPARSEMEM_VMEMMAP 2M page size support x86_64 uses 2M page table entries to map its 1-1 kernel space. We also implement the virtual memmap using 2M page table entries. So there is no additional runtime overhead over FLATMEM, initialisation is slightly more complex. As FLATMEM still references memory to obtain the mem_map pointer and SPARSEMEM_VMEMMAP uses a compile time constant, SPARSEMEM_VMEMMAP should be superior. With this SPARSEMEM becomes the most efficient way of handling virt_to_page, pfn_to_page and friends for UP, SMP and NUMA on x86_64. [apw@shadowen.org: code resplit, style fixups] [apw@shadowen.org: vmemmap x86_64: ensure end of section memmap is initialised] Signed-off-by: Christoph Lameter Signed-off-by: Andy Whitcroft Acked-by: Mel Gorman Cc: Andi Kleen Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/x86_64/mm.txt | 1 + arch/x86/mm/init_64.c | 45 ++++++++++++++++++++++++++++++++++++ arch/x86_64/Kconfig | 1 + include/asm-x86/page_64.h | 1 + include/asm-x86/pgtable_64.h | 1 + 5 files changed, 49 insertions(+) diff --git a/Documentation/x86_64/mm.txt b/Documentation/x86_64/mm.txt index f42798ed1c54..b89b6d2bebfa 100644 --- a/Documentation/x86_64/mm.txt +++ b/Documentation/x86_64/mm.txt @@ -9,6 +9,7 @@ ffff800000000000 - ffff80ffffffffff (=40 bits) guard hole ffff810000000000 - ffffc0ffffffffff (=46 bits) direct mapping of all phys. memory ffffc10000000000 - ffffc1ffffffffff (=40 bits) hole ffffc20000000000 - ffffe1ffffffffff (=45 bits) vmalloc/ioremap space +ffffe20000000000 - ffffe2ffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffffff80000000 - ffffffff82800000 (=40 MB) kernel text mapping, from phys 0 ... unused hole ... diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 458893b376f8..7d4fc633a9c9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -748,3 +748,48 @@ const char *arch_vma_name(struct vm_area_struct *vma) return "[vsyscall]"; return NULL; } + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +/* + * Initialise the sparsemem vmemmap using huge-pages at the PMD level. + */ +int __meminit vmemmap_populate(struct page *start_page, + unsigned long size, int node) +{ + unsigned long addr = (unsigned long)start_page; + unsigned long end = (unsigned long)(start_page + size); + unsigned long next; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + for (; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + + pgd = vmemmap_pgd_populate(addr, node); + if (!pgd) + return -ENOMEM; + pud = vmemmap_pud_populate(pgd, addr, node); + if (!pud) + return -ENOMEM; + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + pte_t entry; + void *p = vmemmap_alloc_block(PMD_SIZE, node); + if (!p) + return -ENOMEM; + + entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); + mk_pte_huge(entry); + set_pmd(pmd, __pmd(pte_val(entry))); + + printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", + addr, addr + PMD_SIZE - 1, p, node); + } else + vmemmap_verify((pte_t *)pmd, node, addr, next); + } + + return 0; +} +#endif diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index cf013cb85ea4..8c83dbe4c4d0 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -409,6 +409,7 @@ config ARCH_DISCONTIGMEM_DEFAULT config ARCH_SPARSEMEM_ENABLE def_bool y depends on (NUMA || EXPERIMENTAL) + select SPARSEMEM_VMEMMAP_ENABLE config ARCH_MEMORY_PROBE def_bool y diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 88adf1afb0a2..c3b52bcb171e 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -134,6 +134,7 @@ extern unsigned long __phys_addr(unsigned long); VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define __HAVE_ARCH_GATE_AREA 1 +#define vmemmap ((struct page *)VMEMMAP_START) #include #include diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 57dd6b3107ea..a79f5355e3b0 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -137,6 +137,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long #define MAXMEM _AC(0x3fffffffffff, UL) #define VMALLOC_START _AC(0xffffc20000000000, UL) #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) +#define VMEMMAP_START _AC(0xffffe20000000000, UL) #define MODULES_VADDR _AC(0xffffffff88000000, UL) #define MODULES_END _AC(0xfffffffffff00000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) -- 2.34.1