x86-32, numa: Reorganize calculate_numa_remap_page()
[deliverable/linux.git] / arch / x86 / mm / numa_32.c
index 60701a5e0de005b1a4bfe1fd8d58ba063188958f..5039e9b21d9e3ae94aa11a772c2c525c18ca5e8b 100644 (file)
@@ -264,70 +264,64 @@ void resume_map_numa_kva(pgd_t *pgd_base)
 }
 #endif
 
-static __init unsigned long calculate_numa_remap_pages(void)
+static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
 {
-       int nid;
-       unsigned long size, reserve_pages = 0;
+       unsigned long size;
+       u64 node_kva;
 
-       for_each_online_node(nid) {
-               u64 node_kva;
-
-               /*
-                * The acpi/srat node info can show hot-add memroy zones
-                * where memory could be added but not currently present.
-                */
-               printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
-                       nid, node_start_pfn[nid], node_end_pfn[nid]);
-               if (node_start_pfn[nid] > max_pfn)
-                       continue;
-               if (!node_end_pfn[nid])
-                       continue;
-               if (node_end_pfn[nid] > max_pfn)
-                       node_end_pfn[nid] = max_pfn;
-
-               /* ensure the remap includes space for the pgdat. */
-               size = node_remap_size[nid];
-               size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
-
-               /* convert size to large (pmd size) pages, rounding up */
-               size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
-               /* now the roundup is correct, convert to PAGE_SIZE pages */
-               size = size * PTRS_PER_PTE;
-
-               node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
-                                       ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
-                                       ((u64)size)<<PAGE_SHIFT,
-                                       LARGE_PAGE_BYTES);
-               if (node_kva == MEMBLOCK_ERROR)
-                       panic("Can not get kva ram\n");
-
-               node_remap_size[nid] = size;
-               node_remap_offset[nid] = reserve_pages;
-               reserve_pages += size;
-               printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
-                                 " node %d at %llx\n",
-                               size, nid, node_kva >> PAGE_SHIFT);
-
-               /*
-                *  prevent kva address below max_low_pfn want it on system
-                *  with less memory later.
-                *  layout will be: KVA address , KVA RAM
-                *
-                *  we are supposed to only record the one less then max_low_pfn
-                *  but we could have some hole in high memory, and it will only
-                *  check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
-                *  to use it as free.
-                *  So memblock_x86_reserve_range here, hope we don't run out of that array
-                */
-               memblock_x86_reserve_range(node_kva,
-                                          node_kva + (((u64)size)<<PAGE_SHIFT),
-                                          "KVA RAM");
-
-               node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;
-       }
-       printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
-                       reserve_pages);
-       return reserve_pages;
+       /*
+        * The acpi/srat node info can show hot-add memroy zones where
+        * memory could be added but not currently present.
+        */
+       printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
+              nid, node_start_pfn[nid], node_end_pfn[nid]);
+       if (node_start_pfn[nid] > max_pfn)
+               return 0;
+       if (!node_end_pfn[nid])
+               return 0;
+       if (node_end_pfn[nid] > max_pfn)
+               node_end_pfn[nid] = max_pfn;
+
+       /* ensure the remap includes space for the pgdat. */
+       size = node_remap_size[nid];
+       size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
+
+       /* convert size to large (pmd size) pages, rounding up */
+       size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
+       /* now the roundup is correct, convert to PAGE_SIZE pages */
+       size = size * PTRS_PER_PTE;
+
+       node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
+                                         (u64)node_end_pfn[nid] << PAGE_SHIFT,
+                                         (u64)size << PAGE_SHIFT,
+                                         LARGE_PAGE_BYTES);
+       if (node_kva == MEMBLOCK_ERROR)
+               panic("Can not get kva ram\n");
+
+       node_remap_size[nid] = size;
+       node_remap_offset[nid] = offset;
+       printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
+              size, nid, node_kva >> PAGE_SHIFT);
+
+       /*
+        *  prevent kva address below max_low_pfn want it on system
+        *  with less memory later.
+        *  layout will be: KVA address , KVA RAM
+        *
+        *  we are supposed to only record the one less then
+        *  max_low_pfn but we could have some hole in high memory,
+        *  and it will only check page_is_ram(pfn) &&
+        *  !page_is_reserved_early(pfn) to decide to use it as free.
+        *  So memblock_x86_reserve_range here, hope we don't run out
+        *  of that array
+        */
+       memblock_x86_reserve_range(node_kva,
+                                  node_kva + ((u64)size << PAGE_SHIFT),
+                                  "KVA RAM");
+
+       node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;
+
+       return size;
 }
 
 static void init_remap_allocator(int nid)
@@ -346,6 +340,7 @@ static void init_remap_allocator(int nid)
 
 void __init initmem_init(void)
 {
+       unsigned long reserve_pages = 0;
        int nid;
 
        /*
@@ -359,7 +354,11 @@ void __init initmem_init(void)
        get_memcfg_numa();
        numa_init_array();
 
-       kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
+       for_each_online_node(nid)
+               reserve_pages += init_alloc_remap(nid, reserve_pages);
+       kva_pages = roundup(reserve_pages, PTRS_PER_PTE);
+       printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
+                       reserve_pages);
 
        kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
                                max_low_pfn << PAGE_SHIFT,
This page took 0.031177 seconds and 5 git commands to generate.