1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
19 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
20 # define DBG(x...) printk(KERN_DEBUG x)
26 * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27 * voyager wants cpu_number too.
30 DEFINE_PER_CPU(int, cpu_number
);
31 EXPORT_PER_CPU_SYMBOL(cpu_number
);
34 #ifdef CONFIG_X86_LOCAL_APIC
35 unsigned int num_processors
;
36 unsigned disabled_cpus __cpuinitdata
;
37 /* Processor that is doing the boot up */
38 unsigned int boot_cpu_physical_apicid
= -1U;
39 EXPORT_SYMBOL(boot_cpu_physical_apicid
);
40 unsigned int max_physical_apicid
;
42 /* Bitmask of physically existing CPUs */
43 physid_mask_t phys_cpu_present_map
;
47 * Map cpu index to physical APIC ID
49 DEFINE_EARLY_PER_CPU(u16
, x86_cpu_to_apicid
, BAD_APICID
);
50 DEFINE_EARLY_PER_CPU(u16
, x86_bios_cpu_apicid
, BAD_APICID
);
51 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid
);
52 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid
);
54 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
57 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
= {
58 [0] = (unsigned long)__per_cpu_load
,
61 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
63 EXPORT_SYMBOL(__per_cpu_offset
);
67 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
68 * Always point %gs to its beginning
70 void __init
setup_per_cpu_areas(void)
76 /* Copy section for each CPU (we discard the original) */
77 size
= roundup(PERCPU_ENOUGH_ROOM
, PAGE_SIZE
);
79 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
80 NR_CPUS
, nr_cpumask_bits
, nr_cpu_ids
, nr_node_ids
);
82 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size
);
84 for_each_possible_cpu(cpu
) {
85 #ifndef CONFIG_NEED_MULTIPLE_NODES
86 ptr
= alloc_bootmem_pages(size
);
88 int node
= early_cpu_to_node(cpu
);
89 if (!node_online(node
) || !NODE_DATA(node
)) {
90 ptr
= alloc_bootmem_pages(size
);
91 pr_info("cpu %d has no node %d or node-local memory\n",
93 pr_debug("per cpu data for cpu%d at %016lx\n",
96 ptr
= alloc_bootmem_pages_node(NODE_DATA(node
), size
);
97 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
98 cpu
, node
, __pa(ptr
));
102 memcpy(ptr
, __per_cpu_load
, __per_cpu_end
- __per_cpu_start
);
103 per_cpu_offset(cpu
) = ptr
- __per_cpu_start
;
104 per_cpu(this_cpu_off
, cpu
) = per_cpu_offset(cpu
);
105 per_cpu(cpu_number
, cpu
) = cpu
;
107 * Copy data used in early init routines from the initial arrays to the
108 * per cpu data areas. These arrays then become expendable and the
109 * *_early_ptr's are zeroed indicating that the static arrays are gone.
111 per_cpu(x86_cpu_to_apicid
, cpu
) =
112 early_per_cpu_map(x86_cpu_to_apicid
, cpu
);
113 per_cpu(x86_bios_cpu_apicid
, cpu
) =
114 early_per_cpu_map(x86_bios_cpu_apicid
, cpu
);
116 per_cpu(irq_stack_ptr
, cpu
) =
117 per_cpu(irq_stack_union
.irq_stack
, cpu
) + IRQ_STACK_SIZE
- 64;
119 per_cpu(x86_cpu_to_node_map
, cpu
) =
120 early_per_cpu_map(x86_cpu_to_node_map
, cpu
);
123 * Up to this point, CPU0 has been using .data.init
124 * area. Reload %gs offset for CPU0.
130 DBG("PERCPU: cpu %4d %p\n", cpu
, ptr
);
133 /* indicate the early static arrays will soon be gone */
134 early_per_cpu_ptr(x86_cpu_to_apicid
) = NULL
;
135 early_per_cpu_ptr(x86_bios_cpu_apicid
) = NULL
;
136 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
137 early_per_cpu_ptr(x86_cpu_to_node_map
) = NULL
;
140 /* Setup node to cpumask map */
141 setup_node_to_cpumask_map();
143 /* Setup cpu initialized, callin, callout masks */
144 setup_cpu_local_masks();