x86: Unify node_to_cpumask_map handling between 32 and 64bit
[deliverable/linux.git] / arch / x86 / mm / numa.c
1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/topology.h>
3 #include <linux/module.h>
4 #include <linux/bootmem.h>
5 #include <asm/numa.h>
6 #include <asm/acpi.h>
7
8 int __initdata numa_off;
9
10 static __init int numa_setup(char *opt)
11 {
12 if (!opt)
13 return -EINVAL;
14 if (!strncmp(opt, "off", 3))
15 numa_off = 1;
16 #ifdef CONFIG_NUMA_EMU
17 if (!strncmp(opt, "fake=", 5))
18 numa_emu_cmdline(opt + 5);
19 #endif
20 #ifdef CONFIG_ACPI_NUMA
21 if (!strncmp(opt, "noacpi", 6))
22 acpi_numa = -1;
23 #endif
24 return 0;
25 }
26 early_param("numa", numa_setup);
27
28 /*
29 * apicid, cpu, node mappings
30 */
31 s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
32 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
33 };
34
35 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
36 EXPORT_SYMBOL(node_to_cpumask_map);
37
38 /*
39 * Map cpu index to node index
40 */
41 #ifdef CONFIG_X86_32
42 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, 0);
43 #else
44 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
45 #endif
46 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
47
48 void __cpuinit numa_set_node(int cpu, int node)
49 {
50 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
51
52 /* early setting, no percpu area yet */
53 if (cpu_to_node_map) {
54 cpu_to_node_map[cpu] = node;
55 return;
56 }
57
58 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
59 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
60 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
61 dump_stack();
62 return;
63 }
64 #endif
65 per_cpu(x86_cpu_to_node_map, cpu) = node;
66
67 if (node != NUMA_NO_NODE)
68 set_cpu_numa_node(cpu, node);
69 }
70
71 void __cpuinit numa_clear_node(int cpu)
72 {
73 numa_set_node(cpu, NUMA_NO_NODE);
74 }
75
76 /*
77 * Allocate node_to_cpumask_map based on number of available nodes
78 * Requires node_possible_map to be valid.
79 *
80 * Note: node_to_cpumask() is not valid until after this is done.
81 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
82 */
83 void __init setup_node_to_cpumask_map(void)
84 {
85 unsigned int node, num = 0;
86
87 /* setup nr_node_ids if not done yet */
88 if (nr_node_ids == MAX_NUMNODES) {
89 for_each_node_mask(node, node_possible_map)
90 num = node;
91 nr_node_ids = num + 1;
92 }
93
94 /* allocate the map */
95 for (node = 0; node < nr_node_ids; node++)
96 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
97
98 /* cpumask_of_node() will now work */
99 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
100 }
101
102 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
103
104 # ifndef CONFIG_NUMA_EMU
105 void __cpuinit numa_add_cpu(int cpu)
106 {
107 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
108 }
109
110 void __cpuinit numa_remove_cpu(int cpu)
111 {
112 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
113 }
114 # endif /* !CONFIG_NUMA_EMU */
115
116 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
117
118 int __cpu_to_node(int cpu)
119 {
120 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
121 printk(KERN_WARNING
122 "cpu_to_node(%d): usage too early!\n", cpu);
123 dump_stack();
124 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
125 }
126 return per_cpu(x86_cpu_to_node_map, cpu);
127 }
128 EXPORT_SYMBOL(__cpu_to_node);
129
130 /*
131 * Same function as cpu_to_node() but used if called before the
132 * per_cpu areas are setup.
133 */
134 int early_cpu_to_node(int cpu)
135 {
136 if (early_per_cpu_ptr(x86_cpu_to_node_map))
137 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
138
139 if (!cpu_possible(cpu)) {
140 printk(KERN_WARNING
141 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
142 dump_stack();
143 return NUMA_NO_NODE;
144 }
145 return per_cpu(x86_cpu_to_node_map, cpu);
146 }
147
148 struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
149 {
150 int node = early_cpu_to_node(cpu);
151 struct cpumask *mask;
152 char buf[64];
153
154 mask = node_to_cpumask_map[node];
155 if (!mask) {
156 pr_err("node_to_cpumask_map[%i] NULL\n", node);
157 dump_stack();
158 return NULL;
159 }
160
161 cpulist_scnprintf(buf, sizeof(buf), mask);
162 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
163 enable ? "numa_add_cpu" : "numa_remove_cpu",
164 cpu, node, buf);
165 return mask;
166 }
167
168 # ifndef CONFIG_NUMA_EMU
169 static void __cpuinit numa_set_cpumask(int cpu, int enable)
170 {
171 struct cpumask *mask;
172
173 mask = debug_cpumask_set_cpu(cpu, enable);
174 if (!mask)
175 return;
176
177 if (enable)
178 cpumask_set_cpu(cpu, mask);
179 else
180 cpumask_clear_cpu(cpu, mask);
181 }
182
183 void __cpuinit numa_add_cpu(int cpu)
184 {
185 numa_set_cpumask(cpu, 1);
186 }
187
188 void __cpuinit numa_remove_cpu(int cpu)
189 {
190 numa_set_cpumask(cpu, 0);
191 }
192 # endif /* !CONFIG_NUMA_EMU */
193
194 /*
195 * Returns a pointer to the bitmask of CPUs on Node 'node'.
196 */
197 const struct cpumask *cpumask_of_node(int node)
198 {
199 if (node >= nr_node_ids) {
200 printk(KERN_WARNING
201 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
202 node, nr_node_ids);
203 dump_stack();
204 return cpu_none_mask;
205 }
206 if (node_to_cpumask_map[node] == NULL) {
207 printk(KERN_WARNING
208 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
209 node);
210 dump_stack();
211 return cpu_online_mask;
212 }
213 return node_to_cpumask_map[node];
214 }
215 EXPORT_SYMBOL(cpumask_of_node);
216
217 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
This page took 0.036923 seconds and 6 git commands to generate.