x86: kill bad_ppro
authorYinghai Lu <yhlu.kernel@gmail.com>
Mon, 16 Jun 2008 23:11:08 +0000 (16:11 -0700)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 08:38:19 +0000 (10:38 +0200)
so don't punish all other cpus without that problem when init highmem

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/setup_32.c
arch/x86/mm/discontig_32.c
arch/x86/mm/init_32.c
include/asm-x86/highmem.h
include/asm-x86/numa_32.h

index f3ddba5ed9a7bd8af1add519c1165cf6ddd4c627..9692aeb8ecaecb42a873925a3e6c23d3643dd132 100644 (file)
@@ -68,6 +68,7 @@
 #include <asm/cacheflush.h>
 #include <asm/processor.h>
 #include <asm/efi.h>
+#include <asm/bugs.h>
 
 /* This value is set up by the early boot code to point to the value
    immediately after the boot time page tables.  It contains a *physical*
@@ -764,6 +765,14 @@ void __init setup_arch(char **cmdline_p)
        if (efi_enabled)
                efi_init();
 
+       if (ppro_with_ram_bug()) {
+               e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
+                                 E820_RESERVED);
+               sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+               printk(KERN_INFO "fixed physical RAM map:\n");
+               e820_print_map("bad_ppro");
+       }
+
        e820_register_active_regions(0, 0, -1UL);
        /*
         * partially used pages are not usable - thus
index 7c4d0255f8d8b1d1fcc94a510e5b1d0b2da46af4..6216e43b6e95c71e1cabaa11b3c591998563390a 100644 (file)
@@ -427,7 +427,7 @@ void __init zone_sizes_init(void)
        return;
 }
 
-void __init set_highmem_pages_init(int bad_ppro) 
+void __init set_highmem_pages_init(void)
 {
 #ifdef CONFIG_HIGHMEM
        struct zone *zone;
@@ -447,7 +447,7 @@ void __init set_highmem_pages_init(int bad_ppro)
                                zone->name, nid, zone_start_pfn, zone_end_pfn);
 
                add_highpages_with_active_regions(nid, zone_start_pfn,
-                                zone_end_pfn, bad_ppro);
+                                zone_end_pfn);
        }
        totalram_pages += totalhigh_pages;
 #endif
index ba07a489230e7fe1a02423b58423b0fcc1b9a24a..fb5694d788bfda1274b6647b8824d76f3cce9cff 100644 (file)
@@ -220,13 +220,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
        }
 }
 
-static inline int page_kills_ppro(unsigned long pagenr)
-{
-       if (pagenr >= 0x70000 && pagenr <= 0x7003F)
-               return 1;
-       return 0;
-}
-
 /*
  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  * is valid. The argument is a physical page number.
@@ -287,22 +280,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
        pkmap_page_table = pte;
 }
 
-static void __init
-add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
+static void __init add_one_highpage_init(struct page *page, int pfn)
 {
-       if (!(bad_ppro && page_kills_ppro(pfn))) {
-               ClearPageReserved(page);
-               init_page_count(page);
-               __free_page(page);
-               totalhigh_pages++;
-       } else
-               SetPageReserved(page);
+       ClearPageReserved(page);
+       init_page_count(page);
+       __free_page(page);
+       totalhigh_pages++;
 }
 
 struct add_highpages_data {
        unsigned long start_pfn;
        unsigned long end_pfn;
-       int bad_ppro;
 };
 
 static void __init add_highpages_work_fn(unsigned long start_pfn,
@@ -312,10 +300,8 @@ static void __init add_highpages_work_fn(unsigned long start_pfn,
        struct page *page;
        unsigned long final_start_pfn, final_end_pfn;
        struct add_highpages_data *data;
-       int bad_ppro;
 
        data = (struct add_highpages_data *)datax;
-       bad_ppro = data->bad_ppro;
 
        final_start_pfn = max(start_pfn, data->start_pfn);
        final_end_pfn = min(end_pfn, data->end_pfn);
@@ -327,29 +313,26 @@ static void __init add_highpages_work_fn(unsigned long start_pfn,
                if (!pfn_valid(node_pfn))
                        continue;
                page = pfn_to_page(node_pfn);
-               add_one_highpage_init(page, node_pfn, bad_ppro);
+               add_one_highpage_init(page, node_pfn);
        }
 
 }
 
 void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
-                                             unsigned long end_pfn,
-                                             int bad_ppro)
+                                             unsigned long end_pfn)
 {
        struct add_highpages_data data;
 
        data.start_pfn = start_pfn;
        data.end_pfn = end_pfn;
-       data.bad_ppro = bad_ppro;
 
        work_with_active_regions(nid, add_highpages_work_fn, &data);
 }
 
 #ifndef CONFIG_NUMA
-static void __init set_highmem_pages_init(int bad_ppro)
+static void __init set_highmem_pages_init(void)
 {
-       add_highpages_with_active_regions(0, highstart_pfn, highend_pfn,
-                                               bad_ppro);
+       add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
 
        totalram_pages += totalhigh_pages;
 }
@@ -358,7 +341,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
 #else
 # define kmap_init()                           do { } while (0)
 # define permanent_kmaps_init(pgd_base)                do { } while (0)
-# define set_highmem_pages_init(bad_ppro)      do { } while (0)
+# define set_highmem_pages_init()      do { } while (0)
 #endif /* CONFIG_HIGHMEM */
 
 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
@@ -605,13 +588,11 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
 void __init mem_init(void)
 {
        int codesize, reservedpages, datasize, initsize;
-       int tmp, bad_ppro;
+       int tmp;
 
 #ifdef CONFIG_FLATMEM
        BUG_ON(!mem_map);
 #endif
-       bad_ppro = ppro_with_ram_bug();
-
 #ifdef CONFIG_HIGHMEM
        /* check that fixmap and pkmap do not overlap */
        if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
@@ -634,7 +615,7 @@ void __init mem_init(void)
                if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
                        reservedpages++;
 
-       set_highmem_pages_init(bad_ppro);
+       set_highmem_pages_init();
 
        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
index 85c4fea41ff690fbda48df006130ede5a1b06374..4514b16cc7236b447444b30caf26c1a75196f498 100644 (file)
@@ -75,7 +75,7 @@ struct page *kmap_atomic_to_page(void *ptr);
 #define flush_cache_kmaps()    do { } while (0)
 
 extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
-                                       unsigned long end_pfn, int bad_ppro);
+                                       unsigned long end_pfn);
 
 #endif /* __KERNEL__ */
 
index 03d0f7a9bf0249b9e649605625530ccac9d6f836..a02674f64869e4f60343bacef573dfd4d5cfcff6 100644 (file)
@@ -5,7 +5,7 @@ extern int pxm_to_nid(int pxm);
 
 #ifdef CONFIG_NUMA
 extern void __init remap_numa_kva(void);
-extern void set_highmem_pages_init(int);
+extern void set_highmem_pages_init(void);
 #else
 static inline void remap_numa_kva(void)
 {
This page took 0.033429 seconds and 5 git commands to generate.