Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / mm / sparse.c
index 6a4bf9160e855ae1e2d61fefb4922918f710bb24..fac95f2888f2d1fbe923fd83ff7d43786ac89c58 100644 (file)
@@ -65,21 +65,18 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
 
        if (slab_is_available()) {
                if (node_state(nid, N_HIGH_MEMORY))
-                       section = kmalloc_node(array_size, GFP_KERNEL, nid);
+                       section = kzalloc_node(array_size, GFP_KERNEL, nid);
                else
-                       section = kmalloc(array_size, GFP_KERNEL);
-       } else
+                       section = kzalloc(array_size, GFP_KERNEL);
+       } else {
                section = alloc_bootmem_node(NODE_DATA(nid), array_size);
-
-       if (section)
-               memset(section, 0, array_size);
+       }
 
        return section;
 }
 
 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
 {
-       static DEFINE_SPINLOCK(index_init_lock);
        unsigned long root = SECTION_NR_TO_ROOT(section_nr);
        struct mem_section *section;
        int ret = 0;
@@ -90,20 +87,9 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
        section = sparse_index_alloc(nid);
        if (!section)
                return -ENOMEM;
-       /*
-        * This lock keeps two different sections from
-        * reallocating for the same index
-        */
-       spin_lock(&index_init_lock);
-
-       if (mem_section[root]) {
-               ret = -EEXIST;
-               goto out;
-       }
 
        mem_section[root] = section;
-out:
-       spin_unlock(&index_init_lock);
+
        return ret;
 }
 #else /* !SPARSEMEM_EXTREME */
@@ -132,6 +118,8 @@ int __section_nr(struct mem_section* ms)
                     break;
        }
 
+       VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
+
        return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
 }
 
@@ -275,8 +263,9 @@ static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
-       pg_data_t *host_pgdat;
-       unsigned long goal;
+       unsigned long goal, limit;
+       unsigned long *p;
+       int nid;
        /*
         * A page may contain usemaps for other sections preventing the
         * page being freed and making a section unremovable while
@@ -287,10 +276,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       goal = __pa(pgdat) & PAGE_SECTION_MASK;
-       host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
-       return __alloc_bootmem_node_nopanic(host_pgdat, size,
-                                           SMP_CACHE_BYTES, goal);
+       goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+       limit = goal + (1UL << PA_SECTION_SHIFT);
+       nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
+again:
+       p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
+                                         SMP_CACHE_BYTES, goal, limit);
+       if (!p && limit) {
+               limit = 0;
+               goto again;
+       }
+       return p;
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -485,6 +481,9 @@ void __init sparse_init(void)
        struct page **map_map;
 #endif
 
+       /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
+       set_pageblock_order();
+
        /*
         * map is using big page (aka 2M in x86 64 bit)
         * usemap is less one page (aka 24 bytes)
This page took 0.048043 seconds and 5 git commands to generate.