Merge branch 'x86-headers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / x86 / mm / init_64.c
index 7042d14ed8f377c0a9cc3f069fe73133242b5636..14b9dd71d9e864e218b28f82c95df8f011cc0c91 100644 (file)
@@ -327,22 +327,30 @@ void __init cleanup_highmap(void)
        }
 }
 
+/*
+ * Create PTE level page table mapping for physical addresses.
+ * It returns the last physical address mapped.
+ */
 static unsigned long __meminit
-phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
+phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
              pgprot_t prot)
 {
-       unsigned long pages = 0, next;
-       unsigned long last_map_addr = end;
+       unsigned long pages = 0, paddr_next;
+       unsigned long paddr_last = paddr_end;
+       pte_t *pte;
        int i;
 
-       pte_t *pte = pte_page + pte_index(addr);
+       pte = pte_page + pte_index(paddr);
+       i = pte_index(paddr);
 
-       for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
-               next = (addr & PAGE_MASK) + PAGE_SIZE;
-               if (addr >= end) {
+       for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
+               paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
+               if (paddr >= paddr_end) {
                        if (!after_bootmem &&
-                           !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
-                           !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
+                           !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+                                            E820_RAM) &&
+                           !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+                                            E820_RESERVED_KERN))
                                set_pte(pte, __pte(0));
                        continue;
                }
@@ -353,54 +361,61 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
                 * pagetable pages as RO. So assume someone who pre-setup
                 * these mappings are more intelligent.
                 */
-               if (pte_val(*pte)) {
+               if (!pte_none(*pte)) {
                        if (!after_bootmem)
                                pages++;
                        continue;
                }
 
                if (0)
-                       printk("   pte=%p addr=%lx pte=%016lx\n",
-                              pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
+                       pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
+                               pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
                pages++;
-               set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
-               last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
+               set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+               paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
        }
 
        update_page_count(PG_LEVEL_4K, pages);
 
-       return last_map_addr;
+       return paddr_last;
 }
 
+/*
+ * Create PMD level page table mapping for physical addresses. The virtual
+ * and physical address have to be aligned at this level.
+ * It returns the last physical address mapped.
+ */
 static unsigned long __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
              unsigned long page_size_mask, pgprot_t prot)
 {
-       unsigned long pages = 0, next;
-       unsigned long last_map_addr = end;
+       unsigned long pages = 0, paddr_next;
+       unsigned long paddr_last = paddr_end;
 
-       int i = pmd_index(address);
+       int i = pmd_index(paddr);
 
-       for (; i < PTRS_PER_PMD; i++, address = next) {
-               pmd_t *pmd = pmd_page + pmd_index(address);
+       for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
+               pmd_t *pmd = pmd_page + pmd_index(paddr);
                pte_t *pte;
                pgprot_t new_prot = prot;
 
-               next = (address & PMD_MASK) + PMD_SIZE;
-               if (address >= end) {
+               paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
+               if (paddr >= paddr_end) {
                        if (!after_bootmem &&
-                           !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
-                           !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
+                           !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+                                            E820_RAM) &&
+                           !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+                                            E820_RESERVED_KERN))
                                set_pmd(pmd, __pmd(0));
                        continue;
                }
 
-               if (pmd_val(*pmd)) {
+               if (!pmd_none(*pmd)) {
                        if (!pmd_large(*pmd)) {
                                spin_lock(&init_mm.page_table_lock);
                                pte = (pte_t *)pmd_page_vaddr(*pmd);
-                               last_map_addr = phys_pte_init(pte, address,
-                                                               end, prot);
+                               paddr_last = phys_pte_init(pte, paddr,
+                                                          paddr_end, prot);
                                spin_unlock(&init_mm.page_table_lock);
                                continue;
                        }
@@ -419,7 +434,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                        if (page_size_mask & (1 << PG_LEVEL_2M)) {
                                if (!after_bootmem)
                                        pages++;
-                               last_map_addr = next;
+                               paddr_last = paddr_next;
                                continue;
                        }
                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
@@ -429,51 +444,65 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                        pages++;
                        spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pmd,
-                               pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
+                               pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
                                        __pgprot(pgprot_val(prot) | _PAGE_PSE)));
                        spin_unlock(&init_mm.page_table_lock);
-                       last_map_addr = next;
+                       paddr_last = paddr_next;
                        continue;
                }
 
                pte = alloc_low_page();
-               last_map_addr = phys_pte_init(pte, address, end, new_prot);
+               paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
 
                spin_lock(&init_mm.page_table_lock);
                pmd_populate_kernel(&init_mm, pmd, pte);
                spin_unlock(&init_mm.page_table_lock);
        }
        update_page_count(PG_LEVEL_2M, pages);
-       return last_map_addr;
+       return paddr_last;
 }
 
+/*
+ * Create PUD level page table mapping for physical addresses. The virtual
+ * and physical address do not have to be aligned at this level. KASLR can
+ * randomize virtual addresses up to this level.
+ * It returns the last physical address mapped.
+ */
 static unsigned long __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
-                        unsigned long page_size_mask)
+phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
+             unsigned long page_size_mask)
 {
-       unsigned long pages = 0, next;
-       unsigned long last_map_addr = end;
-       int i = pud_index(addr);
+       unsigned long pages = 0, paddr_next;
+       unsigned long paddr_last = paddr_end;
+       unsigned long vaddr = (unsigned long)__va(paddr);
+       int i = pud_index(vaddr);
 
-       for (; i < PTRS_PER_PUD; i++, addr = next) {
-               pud_t *pud = pud_page + pud_index(addr);
+       for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
+               pud_t *pud;
                pmd_t *pmd;
                pgprot_t prot = PAGE_KERNEL;
 
-               next = (addr & PUD_MASK) + PUD_SIZE;
-               if (addr >= end) {
+               vaddr = (unsigned long)__va(paddr);
+               pud = pud_page + pud_index(vaddr);
+               paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
+               if (paddr >= paddr_end) {
                        if (!after_bootmem &&
-                           !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
-                           !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
+                           !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+                                            E820_RAM) &&
+                           !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+                                            E820_RESERVED_KERN))
                                set_pud(pud, __pud(0));
                        continue;
                }
 
-               if (pud_val(*pud)) {
+               if (!pud_none(*pud)) {
                        if (!pud_large(*pud)) {
                                pmd = pmd_offset(pud, 0);
-                               last_map_addr = phys_pmd_init(pmd, addr, end,
-                                                        page_size_mask, prot);
+                               paddr_last = phys_pmd_init(pmd, paddr,
+                                                          paddr_end,
+                                                          page_size_mask,
+                                                          prot);
                                __flush_tlb_all();
                                continue;
                        }
@@ -492,7 +521,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                        if (page_size_mask & (1 << PG_LEVEL_1G)) {
                                if (!after_bootmem)
                                        pages++;
-                               last_map_addr = next;
+                               paddr_last = paddr_next;
                                continue;
                        }
                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
@@ -502,16 +531,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                        pages++;
                        spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pud,
-                               pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
+                               pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
                                        PAGE_KERNEL_LARGE));
                        spin_unlock(&init_mm.page_table_lock);
-                       last_map_addr = next;
+                       paddr_last = paddr_next;
                        continue;
                }
 
                pmd = alloc_low_page();
-               last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
-                                             prot);
+               paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
+                                          page_size_mask, prot);
 
                spin_lock(&init_mm.page_table_lock);
                pud_populate(&init_mm, pud, pmd);
@@ -521,38 +550,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 
        update_page_count(PG_LEVEL_1G, pages);
 
-       return last_map_addr;
+       return paddr_last;
 }
 
+/*
+ * Create page table mapping for the physical memory for specific physical
+ * addresses. The virtual and physical addresses have to be aligned on PMD level
+ * down. It returns the last physical address mapped.
+ */
 unsigned long __meminit
-kernel_physical_mapping_init(unsigned long start,
-                            unsigned long end,
+kernel_physical_mapping_init(unsigned long paddr_start,
+                            unsigned long paddr_end,
                             unsigned long page_size_mask)
 {
        bool pgd_changed = false;
-       unsigned long next, last_map_addr = end;
-       unsigned long addr;
+       unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
 
-       start = (unsigned long)__va(start);
-       end = (unsigned long)__va(end);
-       addr = start;
+       paddr_last = paddr_end;
+       vaddr = (unsigned long)__va(paddr_start);
+       vaddr_end = (unsigned long)__va(paddr_end);
+       vaddr_start = vaddr;
 
-       for (; start < end; start = next) {
-               pgd_t *pgd = pgd_offset_k(start);
+       for (; vaddr < vaddr_end; vaddr = vaddr_next) {
+               pgd_t *pgd = pgd_offset_k(vaddr);
                pud_t *pud;
 
-               next = (start & PGDIR_MASK) + PGDIR_SIZE;
+               vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
 
                if (pgd_val(*pgd)) {
                        pud = (pud_t *)pgd_page_vaddr(*pgd);
-                       last_map_addr = phys_pud_init(pud, __pa(start),
-                                                __pa(end), page_size_mask);
+                       paddr_last = phys_pud_init(pud, __pa(vaddr),
+                                                  __pa(vaddr_end),
+                                                  page_size_mask);
                        continue;
                }
 
                pud = alloc_low_page();
-               last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
-                                                page_size_mask);
+               paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
+                                          page_size_mask);
 
                spin_lock(&init_mm.page_table_lock);
                pgd_populate(&init_mm, pgd, pud);
@@ -561,11 +596,11 @@ kernel_physical_mapping_init(unsigned long start,
        }
 
        if (pgd_changed)
-               sync_global_pgds(addr, end - 1, 0);
+               sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
 
        __flush_tlb_all();
 
-       return last_map_addr;
+       return paddr_last;
 }
 
 #ifndef CONFIG_NUMA
@@ -672,7 +707,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 
        for (i = 0; i < PTRS_PER_PTE; i++) {
                pte = pte_start + i;
-               if (pte_val(*pte))
+               if (!pte_none(*pte))
                        return;
        }
 
@@ -690,7 +725,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 
        for (i = 0; i < PTRS_PER_PMD; i++) {
                pmd = pmd_start + i;
-               if (pmd_val(*pmd))
+               if (!pmd_none(*pmd))
                        return;
        }
 
@@ -701,27 +736,6 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
        spin_unlock(&init_mm.page_table_lock);
 }
 
-/* Return true if pgd is changed, otherwise return false. */
-static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
-{
-       pud_t *pud;
-       int i;
-
-       for (i = 0; i < PTRS_PER_PUD; i++) {
-               pud = pud_start + i;
-               if (pud_val(*pud))
-                       return false;
-       }
-
-       /* free a pud table */
-       free_pagetable(pgd_page(*pgd), 0);
-       spin_lock(&init_mm.page_table_lock);
-       pgd_clear(pgd);
-       spin_unlock(&init_mm.page_table_lock);
-
-       return true;
-}
-
 static void __meminit
 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
                 bool direct)
@@ -912,7 +926,6 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
        unsigned long addr;
        pgd_t *pgd;
        pud_t *pud;
-       bool pgd_changed = false;
 
        for (addr = start; addr < end; addr = next) {
                next = pgd_addr_end(addr, end);
@@ -923,13 +936,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
 
                pud = (pud_t *)pgd_page_vaddr(*pgd);
                remove_pud_table(pud, addr, next, direct);
-               if (free_pud_table(pud, pgd))
-                       pgd_changed = true;
        }
 
-       if (pgd_changed)
-               sync_global_pgds(start, end - 1, 1);
-
        flush_tlb_all();
 }
 
This page took 0.037812 seconds and 5 git commands to generate.