Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/mmzone.h> | |
18 | #include <linux/bootmem.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/node.h> | |
21 | #include <linux/cpu.h> | |
22 | #include <linux/ioport.h> | |
0707ad30 | 23 | #include <linux/irq.h> |
867e359b CM |
24 | #include <linux/kexec.h> |
25 | #include <linux/pci.h> | |
41bb38fc | 26 | #include <linux/swiotlb.h> |
867e359b CM |
27 | #include <linux/initrd.h> |
28 | #include <linux/io.h> | |
29 | #include <linux/highmem.h> | |
30 | #include <linux/smp.h> | |
31 | #include <linux/timex.h> | |
621b1955 | 32 | #include <linux/hugetlb.h> |
2ded5c24 | 33 | #include <linux/start_kernel.h> |
a05d3f9f | 34 | #include <linux/screen_info.h> |
867e359b CM |
35 | #include <asm/setup.h> |
36 | #include <asm/sections.h> | |
867e359b CM |
37 | #include <asm/cacheflush.h> |
38 | #include <asm/pgalloc.h> | |
39 | #include <asm/mmu_context.h> | |
40 | #include <hv/hypervisor.h> | |
41 | #include <arch/interrupts.h> | |
42 | ||
43 | /* <linux/smp.h> doesn't provide this definition. */ | |
44 | #ifndef CONFIG_SMP | |
45 | #define setup_max_cpus 1 | |
46 | #endif | |
47 | ||
48 | static inline int ABS(int x) { return x >= 0 ? x : -x; } | |
49 | ||
50 | /* Chip information */ | |
51 | char chip_model[64] __write_once; | |
52 | ||
a05d3f9f CM |
53 | #ifdef CONFIG_VT |
54 | struct screen_info screen_info; | |
55 | #endif | |
56 | ||
867e359b CM |
57 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; |
58 | EXPORT_SYMBOL(node_data); | |
59 | ||
867e359b CM |
60 | /* Information on the NUMA nodes that we compute early */ |
61 | unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; | |
62 | unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; | |
63 | unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; | |
64 | unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; | |
65 | unsigned long __initdata node_free_pfn[MAX_NUMNODES]; | |
66 | ||
76c567fb CM |
67 | static unsigned long __initdata node_percpu[MAX_NUMNODES]; |
68 | ||
293ef7b8 TG |
69 | /* |
70 | * per-CPU stack and boot info. | |
71 | */ | |
72 | DEFINE_PER_CPU(unsigned long, boot_sp) = | |
73 | (unsigned long)init_stack + THREAD_SIZE; | |
74 | ||
75 | #ifdef CONFIG_SMP | |
76 | DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; | |
77 | #else | |
78 | /* | |
79 | * The variable must be __initdata since it references __init code. | |
80 | * With CONFIG_SMP it is per-cpu data, which is exempt from validation. | |
81 | */ | |
82 | unsigned long __initdata boot_pc = (unsigned long)start_kernel; | |
83 | #endif | |
84 | ||
867e359b CM |
85 | #ifdef CONFIG_HIGHMEM |
86 | /* Page frame index of end of lowmem on each controller. */ | |
87 | unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; | |
88 | ||
89 | /* Number of pages that can be mapped into lowmem. */ | |
90 | static unsigned long __initdata mappable_physpages; | |
91 | #endif | |
92 | ||
93 | /* Data on which physical memory controller corresponds to which NUMA node */ | |
94 | int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; | |
95 | ||
96 | #ifdef CONFIG_HIGHMEM | |
97 | /* Map information from VAs to PAs */ | |
98 | unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] | |
99 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | |
100 | EXPORT_SYMBOL(pbase_map); | |
101 | ||
102 | /* Map information from PAs to VAs */ | |
103 | void *vbase_map[NR_PA_HIGHBIT_VALUES] | |
104 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | |
105 | EXPORT_SYMBOL(vbase_map); | |
106 | #endif | |
107 | ||
108 | /* Node number as a function of the high PA bits */ | |
109 | int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; | |
110 | EXPORT_SYMBOL(highbits_to_node); | |
111 | ||
112 | static unsigned int __initdata maxmem_pfn = -1U; | |
113 | static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { | |
114 | [0 ... MAX_NUMNODES-1] = -1U | |
115 | }; | |
116 | static nodemask_t __initdata isolnodes; | |
117 | ||
41bb38fc | 118 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b CM |
119 | enum { DEFAULT_PCI_RESERVE_MB = 64 }; |
120 | static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; | |
121 | unsigned long __initdata pci_reserve_start_pfn = -1U; | |
122 | unsigned long __initdata pci_reserve_end_pfn = -1U; | |
123 | #endif | |
124 | ||
125 | static int __init setup_maxmem(char *str) | |
126 | { | |
bfffe79b CM |
127 | unsigned long long maxmem; |
128 | if (str == NULL || (maxmem = memparse(str, NULL)) == 0) | |
867e359b CM |
129 | return -EINVAL; |
130 | ||
bfffe79b | 131 | maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); |
0707ad30 | 132 | pr_info("Forcing RAM used to no more than %dMB\n", |
867e359b CM |
133 | maxmem_pfn >> (20 - PAGE_SHIFT)); |
134 | return 0; | |
135 | } | |
136 | early_param("maxmem", setup_maxmem); | |
137 | ||
138 | static int __init setup_maxnodemem(char *str) | |
139 | { | |
140 | char *endp; | |
bfffe79b CM |
141 | unsigned long long maxnodemem; |
142 | long node; | |
867e359b CM |
143 | |
144 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | |
bfffe79b | 145 | if (node >= MAX_NUMNODES || *endp != ':') |
867e359b CM |
146 | return -EINVAL; |
147 | ||
bfffe79b CM |
148 | maxnodemem = memparse(endp+1, NULL); |
149 | maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << | |
867e359b | 150 | (HPAGE_SHIFT - PAGE_SHIFT); |
0707ad30 | 151 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", |
867e359b CM |
152 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); |
153 | return 0; | |
154 | } | |
155 | early_param("maxnodemem", setup_maxnodemem); | |
156 | ||
157 | static int __init setup_isolnodes(char *str) | |
158 | { | |
159 | char buf[MAX_NUMNODES * 5]; | |
160 | if (str == NULL || nodelist_parse(str, isolnodes) != 0) | |
161 | return -EINVAL; | |
162 | ||
163 | nodelist_scnprintf(buf, sizeof(buf), isolnodes); | |
0707ad30 | 164 | pr_info("Set isolnodes value to '%s'\n", buf); |
867e359b CM |
165 | return 0; |
166 | } | |
167 | early_param("isolnodes", setup_isolnodes); | |
168 | ||
41bb38fc | 169 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b CM |
170 | static int __init setup_pci_reserve(char* str) |
171 | { | |
172 | unsigned long mb; | |
173 | ||
174 | if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || | |
175 | mb > 3 * 1024) | |
176 | return -EINVAL; | |
177 | ||
178 | pci_reserve_mb = mb; | |
0707ad30 | 179 | pr_info("Reserving %dMB for PCIE root complex mappings\n", |
41bb38fc | 180 | pci_reserve_mb); |
867e359b CM |
181 | return 0; |
182 | } | |
183 | early_param("pci_reserve", setup_pci_reserve); | |
184 | #endif | |
185 | ||
186 | #ifndef __tilegx__ | |
187 | /* | |
188 | * vmalloc=size forces the vmalloc area to be exactly 'size' bytes. | |
189 | * This can be used to increase (or decrease) the vmalloc area. | |
190 | */ | |
191 | static int __init parse_vmalloc(char *arg) | |
192 | { | |
193 | if (!arg) | |
194 | return -EINVAL; | |
195 | ||
196 | VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK; | |
197 | ||
198 | /* See validate_va() for more on this test. */ | |
199 | if ((long)_VMALLOC_START >= 0) | |
200 | early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n", | |
201 | VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL); | |
202 | ||
203 | return 0; | |
204 | } | |
205 | early_param("vmalloc", parse_vmalloc); | |
206 | #endif | |
207 | ||
208 | #ifdef CONFIG_HIGHMEM | |
209 | /* | |
a78c942d CM |
210 | * Determine for each controller where its lowmem is mapped and how much of |
211 | * it is mapped there. On controller zero, the first few megabytes are | |
212 | * already mapped in as code at MEM_SV_INTRPT, so in principle we could | |
213 | * start our data mappings higher up, but for now we don't bother, to avoid | |
214 | * additional confusion. | |
867e359b CM |
215 | * |
216 | * One question is whether, on systems with more than 768 Mb and | |
217 | * controllers of different sizes, to map in a proportionate amount of | |
218 | * each one, or to try to map the same amount from each controller. | |
219 | * (E.g. if we have three controllers with 256MB, 1GB, and 256MB | |
220 | * respectively, do we map 256MB from each, or do we map 128 MB, 512 | |
221 | * MB, and 128 MB respectively?) For now we use a proportionate | |
222 | * solution like the latter. | |
223 | * | |
224 | * The VA/PA mapping demands that we align our decisions at 16 MB | |
225 | * boundaries so that we can rapidly convert VA to PA. | |
226 | */ | |
227 | static void *__init setup_pa_va_mapping(void) | |
228 | { | |
229 | unsigned long curr_pages = 0; | |
230 | unsigned long vaddr = PAGE_OFFSET; | |
231 | nodemask_t highonlynodes = isolnodes; | |
232 | int i, j; | |
233 | ||
234 | memset(pbase_map, -1, sizeof(pbase_map)); | |
235 | memset(vbase_map, -1, sizeof(vbase_map)); | |
236 | ||
237 | /* Node zero cannot be isolated for LOWMEM purposes. */ | |
238 | node_clear(0, highonlynodes); | |
239 | ||
240 | /* Count up the number of pages on non-highonlynodes controllers. */ | |
241 | mappable_physpages = 0; | |
242 | for_each_online_node(i) { | |
243 | if (!node_isset(i, highonlynodes)) | |
244 | mappable_physpages += | |
245 | node_end_pfn[i] - node_start_pfn[i]; | |
246 | } | |
247 | ||
248 | for_each_online_node(i) { | |
249 | unsigned long start = node_start_pfn[i]; | |
250 | unsigned long end = node_end_pfn[i]; | |
251 | unsigned long size = end - start; | |
252 | unsigned long vaddr_end; | |
253 | ||
254 | if (node_isset(i, highonlynodes)) { | |
255 | /* Mark this controller as having no lowmem. */ | |
256 | node_lowmem_end_pfn[i] = start; | |
257 | continue; | |
258 | } | |
259 | ||
260 | curr_pages += size; | |
261 | if (mappable_physpages > MAXMEM_PFN) { | |
262 | vaddr_end = PAGE_OFFSET + | |
263 | (((u64)curr_pages * MAXMEM_PFN / | |
264 | mappable_physpages) | |
265 | << PAGE_SHIFT); | |
266 | } else { | |
267 | vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); | |
268 | } | |
269 | for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) { | |
270 | unsigned long this_pfn = | |
271 | start + (j << HUGETLB_PAGE_ORDER); | |
272 | pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn; | |
273 | if (vbase_map[__pfn_to_highbits(this_pfn)] == | |
274 | (void *)-1) | |
275 | vbase_map[__pfn_to_highbits(this_pfn)] = | |
276 | (void *)(vaddr & HPAGE_MASK); | |
277 | } | |
278 | node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER); | |
279 | BUG_ON(node_lowmem_end_pfn[i] > end); | |
280 | } | |
281 | ||
282 | /* Return highest address of any mapped memory. */ | |
283 | return (void *)vaddr; | |
284 | } | |
285 | #endif /* CONFIG_HIGHMEM */ | |
286 | ||
287 | /* | |
288 | * Register our most important memory mappings with the debug stub. | |
289 | * | |
290 | * This is up to 4 mappings for lowmem, one mapping per memory | |
291 | * controller, plus one for our text segment. | |
292 | */ | |
0707ad30 | 293 | static void __cpuinit store_permanent_mappings(void) |
867e359b CM |
294 | { |
295 | int i; | |
296 | ||
297 | for_each_online_node(i) { | |
298 | HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; | |
299 | #ifdef CONFIG_HIGHMEM | |
300 | HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i]; | |
301 | #else | |
302 | HV_PhysAddr high_mapped_pa = node_end_pfn[i]; | |
303 | #endif | |
304 | ||
305 | unsigned long pages = high_mapped_pa - node_start_pfn[i]; | |
306 | HV_VirtAddr addr = (HV_VirtAddr) __va(pa); | |
307 | hv_store_mapping(addr, pages << PAGE_SHIFT, pa); | |
308 | } | |
309 | ||
310 | hv_store_mapping((HV_VirtAddr)_stext, | |
311 | (uint32_t)(_einittext - _stext), 0); | |
312 | } | |
313 | ||
314 | /* | |
315 | * Use hv_inquire_physical() to populate node_{start,end}_pfn[] | |
316 | * and node_online_map, doing suitable sanity-checking. | |
317 | * Also set min_low_pfn, max_low_pfn, and max_pfn. | |
318 | */ | |
319 | static void __init setup_memory(void) | |
320 | { | |
321 | int i, j; | |
322 | int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 }; | |
323 | #ifdef CONFIG_HIGHMEM | |
324 | long highmem_pages; | |
325 | #endif | |
326 | #ifndef __tilegx__ | |
327 | int cap; | |
328 | #endif | |
329 | #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) | |
330 | long lowmem_pages; | |
331 | #endif | |
332 | ||
333 | /* We are using a char to hold the cpu_2_node[] mapping */ | |
e18105c1 | 334 | BUILD_BUG_ON(MAX_NUMNODES > 127); |
867e359b CM |
335 | |
336 | /* Discover the ranges of memory available to us */ | |
337 | for (i = 0; ; ++i) { | |
338 | unsigned long start, size, end, highbits; | |
339 | HV_PhysAddrRange range = hv_inquire_physical(i); | |
340 | if (range.size == 0) | |
341 | break; | |
342 | #ifdef CONFIG_FLATMEM | |
343 | if (i > 0) { | |
0707ad30 | 344 | pr_err("Can't use discontiguous PAs: %#llx..%#llx\n", |
867e359b CM |
345 | range.size, range.start + range.size); |
346 | continue; | |
347 | } | |
348 | #endif | |
349 | #ifndef __tilegx__ | |
350 | if ((unsigned long)range.start) { | |
0707ad30 | 351 | pr_err("Range not at 4GB multiple: %#llx..%#llx\n", |
867e359b CM |
352 | range.start, range.start + range.size); |
353 | continue; | |
354 | } | |
355 | #endif | |
356 | if ((range.start & (HPAGE_SIZE-1)) != 0 || | |
357 | (range.size & (HPAGE_SIZE-1)) != 0) { | |
358 | unsigned long long start_pa = range.start; | |
0707ad30 | 359 | unsigned long long orig_size = range.size; |
867e359b CM |
360 | range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; |
361 | range.size -= (range.start - start_pa); | |
362 | range.size &= HPAGE_MASK; | |
0707ad30 | 363 | pr_err("Range not hugepage-aligned: %#llx..%#llx:" |
867e359b | 364 | " now %#llx-%#llx\n", |
0707ad30 | 365 | start_pa, start_pa + orig_size, |
867e359b CM |
366 | range.start, range.start + range.size); |
367 | } | |
368 | highbits = __pa_to_highbits(range.start); | |
369 | if (highbits >= NR_PA_HIGHBIT_VALUES) { | |
0707ad30 | 370 | pr_err("PA high bits too high: %#llx..%#llx\n", |
867e359b CM |
371 | range.start, range.start + range.size); |
372 | continue; | |
373 | } | |
374 | if (highbits_seen[highbits]) { | |
0707ad30 | 375 | pr_err("Range overlaps in high bits: %#llx..%#llx\n", |
867e359b CM |
376 | range.start, range.start + range.size); |
377 | continue; | |
378 | } | |
379 | highbits_seen[highbits] = 1; | |
380 | if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { | |
0707ad30 CM |
381 | int max_size = maxnodemem_pfn[i]; |
382 | if (max_size > 0) { | |
383 | pr_err("Maxnodemem reduced node %d to" | |
384 | " %d pages\n", i, max_size); | |
385 | range.size = PFN_PHYS(max_size); | |
867e359b | 386 | } else { |
0707ad30 | 387 | pr_err("Maxnodemem disabled node %d\n", i); |
867e359b CM |
388 | continue; |
389 | } | |
390 | } | |
391 | if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { | |
0707ad30 CM |
392 | int max_size = maxmem_pfn - num_physpages; |
393 | if (max_size > 0) { | |
394 | pr_err("Maxmem reduced node %d to %d pages\n", | |
395 | i, max_size); | |
396 | range.size = PFN_PHYS(max_size); | |
867e359b | 397 | } else { |
0707ad30 | 398 | pr_err("Maxmem disabled node %d\n", i); |
867e359b CM |
399 | continue; |
400 | } | |
401 | } | |
402 | if (i >= MAX_NUMNODES) { | |
0707ad30 | 403 | pr_err("Too many PA nodes (#%d): %#llx...%#llx\n", |
867e359b CM |
404 | i, range.size, range.size + range.start); |
405 | continue; | |
406 | } | |
407 | ||
408 | start = range.start >> PAGE_SHIFT; | |
409 | size = range.size >> PAGE_SHIFT; | |
410 | end = start + size; | |
411 | ||
412 | #ifndef __tilegx__ | |
413 | if (((HV_PhysAddr)end << PAGE_SHIFT) != | |
414 | (range.start + range.size)) { | |
0707ad30 | 415 | pr_err("PAs too high to represent: %#llx..%#llx\n", |
867e359b CM |
416 | range.start, range.start + range.size); |
417 | continue; | |
418 | } | |
419 | #endif | |
41bb38fc | 420 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b CM |
421 | /* |
422 | * Blocks that overlap the pci reserved region must | |
423 | * have enough space to hold the maximum percpu data | |
424 | * region at the top of the range. If there isn't | |
425 | * enough space above the reserved region, just | |
426 | * truncate the node. | |
427 | */ | |
428 | if (start <= pci_reserve_start_pfn && | |
429 | end > pci_reserve_start_pfn) { | |
430 | unsigned int per_cpu_size = | |
431 | __per_cpu_end - __per_cpu_start; | |
432 | unsigned int percpu_pages = | |
433 | NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); | |
434 | if (end < pci_reserve_end_pfn + percpu_pages) { | |
435 | end = pci_reserve_start_pfn; | |
0707ad30 | 436 | pr_err("PCI mapping region reduced node %d to" |
867e359b CM |
437 | " %ld pages\n", i, end - start); |
438 | } | |
439 | } | |
440 | #endif | |
441 | ||
442 | for (j = __pfn_to_highbits(start); | |
443 | j <= __pfn_to_highbits(end - 1); j++) | |
444 | highbits_to_node[j] = i; | |
445 | ||
446 | node_start_pfn[i] = start; | |
447 | node_end_pfn[i] = end; | |
448 | node_controller[i] = range.controller; | |
449 | num_physpages += size; | |
450 | max_pfn = end; | |
451 | ||
452 | /* Mark node as online */ | |
453 | node_set(i, node_online_map); | |
454 | node_set(i, node_possible_map); | |
455 | } | |
456 | ||
457 | #ifndef __tilegx__ | |
458 | /* | |
459 | * For 4KB pages, mem_map "struct page" data is 1% of the size | |
460 | * of the physical memory, so can be quite big (640 MB for | |
461 | * four 16G zones). These structures must be mapped in | |
462 | * lowmem, and since we currently cap out at about 768 MB, | |
463 | * it's impractical to try to use this much address space. | |
464 | * For now, arbitrarily cap the amount of physical memory | |
465 | * we're willing to use at 8 million pages (32GB of 4KB pages). | |
466 | */ | |
467 | cap = 8 * 1024 * 1024; /* 8 million pages */ | |
468 | if (num_physpages > cap) { | |
469 | int num_nodes = num_online_nodes(); | |
470 | int cap_each = cap / num_nodes; | |
471 | unsigned long dropped_pages = 0; | |
472 | for (i = 0; i < num_nodes; ++i) { | |
473 | int size = node_end_pfn[i] - node_start_pfn[i]; | |
474 | if (size > cap_each) { | |
475 | dropped_pages += (size - cap_each); | |
476 | node_end_pfn[i] = node_start_pfn[i] + cap_each; | |
477 | } | |
478 | } | |
479 | num_physpages -= dropped_pages; | |
0707ad30 | 480 | pr_warning("Only using %ldMB memory;" |
867e359b CM |
481 | " ignoring %ldMB.\n", |
482 | num_physpages >> (20 - PAGE_SHIFT), | |
483 | dropped_pages >> (20 - PAGE_SHIFT)); | |
0707ad30 | 484 | pr_warning("Consider using a larger page size.\n"); |
867e359b CM |
485 | } |
486 | #endif | |
487 | ||
488 | /* Heap starts just above the last loaded address. */ | |
489 | min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); | |
490 | ||
491 | #ifdef CONFIG_HIGHMEM | |
492 | /* Find where we map lowmem from each controller. */ | |
493 | high_memory = setup_pa_va_mapping(); | |
494 | ||
495 | /* Set max_low_pfn based on what node 0 can directly address. */ | |
496 | max_low_pfn = node_lowmem_end_pfn[0]; | |
497 | ||
498 | lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? | |
499 | MAXMEM_PFN : mappable_physpages; | |
500 | highmem_pages = (long) (num_physpages - lowmem_pages); | |
501 | ||
0707ad30 | 502 | pr_notice("%ldMB HIGHMEM available.\n", |
867e359b | 503 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); |
0707ad30 | 504 | pr_notice("%ldMB LOWMEM available.\n", |
867e359b CM |
505 | pages_to_mb(lowmem_pages)); |
506 | #else | |
507 | /* Set max_low_pfn based on what node 0 can directly address. */ | |
508 | max_low_pfn = node_end_pfn[0]; | |
509 | ||
510 | #ifndef __tilegx__ | |
511 | if (node_end_pfn[0] > MAXMEM_PFN) { | |
0707ad30 | 512 | pr_warning("Only using %ldMB LOWMEM.\n", |
867e359b | 513 | MAXMEM>>20); |
0707ad30 | 514 | pr_warning("Use a HIGHMEM enabled kernel.\n"); |
867e359b CM |
515 | max_low_pfn = MAXMEM_PFN; |
516 | max_pfn = MAXMEM_PFN; | |
517 | num_physpages = MAXMEM_PFN; | |
518 | node_end_pfn[0] = MAXMEM_PFN; | |
519 | } else { | |
0707ad30 | 520 | pr_notice("%ldMB memory available.\n", |
867e359b CM |
521 | pages_to_mb(node_end_pfn[0])); |
522 | } | |
523 | for (i = 1; i < MAX_NUMNODES; ++i) { | |
524 | node_start_pfn[i] = 0; | |
525 | node_end_pfn[i] = 0; | |
526 | } | |
527 | high_memory = __va(node_end_pfn[0]); | |
528 | #else | |
529 | lowmem_pages = 0; | |
530 | for (i = 0; i < MAX_NUMNODES; ++i) { | |
531 | int pages = node_end_pfn[i] - node_start_pfn[i]; | |
532 | lowmem_pages += pages; | |
533 | if (pages) | |
534 | high_memory = pfn_to_kaddr(node_end_pfn[i]); | |
535 | } | |
0707ad30 | 536 | pr_notice("%ldMB memory available.\n", |
867e359b CM |
537 | pages_to_mb(lowmem_pages)); |
538 | #endif | |
539 | #endif | |
540 | } | |
541 | ||
621b1955 CM |
542 | /* |
543 | * On 32-bit machines, we only put bootmem on the low controller, | |
544 | * since PAs > 4GB can't be used in bootmem. In principle one could | |
545 | * imagine, e.g., multiple 1 GB controllers all of which could support | |
546 | * bootmem, but in practice using controllers this small isn't a | |
547 | * particularly interesting scenario, so we just keep it simple and | |
548 | * use only the first controller for bootmem on 32-bit machines. | |
549 | */ | |
550 | static inline int node_has_bootmem(int nid) | |
867e359b | 551 | { |
621b1955 CM |
552 | #ifdef CONFIG_64BIT |
553 | return 1; | |
554 | #else | |
555 | return nid == 0; | |
556 | #endif | |
557 | } | |
867e359b | 558 | |
621b1955 CM |
559 | static inline unsigned long alloc_bootmem_pfn(int nid, |
560 | unsigned long size, | |
561 | unsigned long goal) | |
562 | { | |
563 | void *kva = __alloc_bootmem_node(NODE_DATA(nid), size, | |
564 | PAGE_SIZE, goal); | |
565 | unsigned long pfn = kaddr_to_pfn(kva); | |
566 | BUG_ON(goal && PFN_PHYS(pfn) != goal); | |
567 | return pfn; | |
568 | } | |
867e359b | 569 | |
621b1955 CM |
570 | static void __init setup_bootmem_allocator_node(int i) |
571 | { | |
572 | unsigned long start, end, mapsize, mapstart; | |
573 | ||
574 | if (node_has_bootmem(i)) { | |
575 | NODE_DATA(i)->bdata = &bootmem_node_data[i]; | |
576 | } else { | |
577 | /* Share controller zero's bdata for now. */ | |
578 | NODE_DATA(i)->bdata = &bootmem_node_data[0]; | |
579 | return; | |
580 | } | |
581 | ||
582 | /* Skip up to after the bss in node 0. */ | |
583 | start = (i == 0) ? min_low_pfn : node_start_pfn[i]; | |
584 | ||
585 | /* Only lowmem, if we're a HIGHMEM build. */ | |
586 | #ifdef CONFIG_HIGHMEM | |
587 | end = node_lowmem_end_pfn[i]; | |
867e359b | 588 | #else |
621b1955 | 589 | end = node_end_pfn[i]; |
867e359b CM |
590 | #endif |
591 | ||
621b1955 CM |
592 | /* No memory here. */ |
593 | if (end == start) | |
594 | return; | |
595 | ||
596 | /* Figure out where the bootmem bitmap is located. */ | |
597 | mapsize = bootmem_bootmap_pages(end - start); | |
598 | if (i == 0) { | |
599 | /* Use some space right before the heap on node 0. */ | |
600 | mapstart = start; | |
601 | start += mapsize; | |
602 | } else { | |
603 | /* Allocate bitmap on node 0 to avoid page table issues. */ | |
604 | mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0); | |
605 | } | |
867e359b | 606 | |
621b1955 CM |
607 | /* Initialize a node. */ |
608 | init_bootmem_node(NODE_DATA(i), mapstart, start, end); | |
867e359b | 609 | |
621b1955 CM |
610 | /* Free all the space back into the allocator. */ |
611 | free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start)); | |
612 | ||
41bb38fc | 613 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b | 614 | /* |
41bb38fc | 615 | * Throw away any memory aliased by the PCI region. |
867e359b | 616 | */ |
621b1955 CM |
617 | if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) |
618 | reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn), | |
619 | PFN_PHYS(pci_reserve_end_pfn - | |
620 | pci_reserve_start_pfn), | |
621 | BOOTMEM_EXCLUSIVE); | |
622 | #endif | |
623 | } | |
867e359b | 624 | |
621b1955 CM |
625 | static void __init setup_bootmem_allocator(void) |
626 | { | |
627 | int i; | |
628 | for (i = 0; i < MAX_NUMNODES; ++i) | |
629 | setup_bootmem_allocator_node(i); | |
867e359b CM |
630 | |
631 | #ifdef CONFIG_KEXEC | |
632 | if (crashk_res.start != crashk_res.end) | |
28f65c11 | 633 | reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); |
867e359b | 634 | #endif |
867e359b CM |
635 | } |
636 | ||
637 | void *__init alloc_remap(int nid, unsigned long size) | |
638 | { | |
639 | int pages = node_end_pfn[nid] - node_start_pfn[nid]; | |
640 | void *map = pfn_to_kaddr(node_memmap_pfn[nid]); | |
641 | BUG_ON(size != pages * sizeof(struct page)); | |
642 | memset(map, 0, size); | |
643 | return map; | |
644 | } | |
645 | ||
646 | static int __init percpu_size(void) | |
647 | { | |
76c567fb CM |
648 | int size = __per_cpu_end - __per_cpu_start; |
649 | size += PERCPU_MODULE_RESERVE; | |
650 | size += PERCPU_DYNAMIC_EARLY_SIZE; | |
651 | if (size < PCPU_MIN_UNIT_SIZE) | |
652 | size = PCPU_MIN_UNIT_SIZE; | |
653 | size = roundup(size, PAGE_SIZE); | |
654 | ||
867e359b CM |
655 | /* In several places we assume the per-cpu data fits on a huge page. */ |
656 | BUG_ON(kdata_huge && size > HPAGE_SIZE); | |
657 | return size; | |
658 | } | |
659 | ||
867e359b CM |
660 | static void __init zone_sizes_init(void) |
661 | { | |
662 | unsigned long zones_size[MAX_NR_ZONES] = { 0 }; | |
867e359b CM |
663 | int size = percpu_size(); |
664 | int num_cpus = smp_height * smp_width; | |
eef015c8 CM |
665 | const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT)); |
666 | ||
867e359b CM |
667 | int i; |
668 | ||
669 | for (i = 0; i < num_cpus; ++i) | |
670 | node_percpu[cpu_to_node(i)] += size; | |
671 | ||
672 | for_each_online_node(i) { | |
673 | unsigned long start = node_start_pfn[i]; | |
674 | unsigned long end = node_end_pfn[i]; | |
675 | #ifdef CONFIG_HIGHMEM | |
676 | unsigned long lowmem_end = node_lowmem_end_pfn[i]; | |
677 | #else | |
678 | unsigned long lowmem_end = end; | |
679 | #endif | |
680 | int memmap_size = (end - start) * sizeof(struct page); | |
681 | node_free_pfn[i] = start; | |
682 | ||
683 | /* | |
684 | * Set aside pages for per-cpu data and the mem_map array. | |
685 | * | |
686 | * Since the per-cpu data requires special homecaching, | |
687 | * if we are in kdata_huge mode, we put it at the end of | |
688 | * the lowmem region. If we're not in kdata_huge mode, | |
689 | * we take the per-cpu pages from the bottom of the | |
690 | * controller, since that avoids fragmenting a huge page | |
691 | * that users might want. We always take the memmap | |
692 | * from the bottom of the controller, since with | |
693 | * kdata_huge that lets it be under a huge TLB entry. | |
694 | * | |
695 | * If the user has requested isolnodes for a controller, | |
696 | * though, there'll be no lowmem, so we just alloc_bootmem | |
697 | * the memmap. There will be no percpu memory either. | |
698 | */ | |
621b1955 CM |
699 | if (i != 0 && cpu_isset(i, isolnodes)) { |
700 | node_memmap_pfn[i] = | |
701 | alloc_bootmem_pfn(0, memmap_size, 0); | |
702 | BUG_ON(node_percpu[i] != 0); | |
703 | } else if (node_has_bootmem(start)) { | |
867e359b CM |
704 | unsigned long goal = 0; |
705 | node_memmap_pfn[i] = | |
621b1955 | 706 | alloc_bootmem_pfn(i, memmap_size, 0); |
867e359b CM |
707 | if (kdata_huge) |
708 | goal = PFN_PHYS(lowmem_end) - node_percpu[i]; | |
709 | if (node_percpu[i]) | |
710 | node_percpu_pfn[i] = | |
621b1955 CM |
711 | alloc_bootmem_pfn(i, node_percpu[i], |
712 | goal); | |
867e359b | 713 | } else { |
621b1955 | 714 | /* In non-bootmem zones, just reserve some pages. */ |
867e359b CM |
715 | node_memmap_pfn[i] = node_free_pfn[i]; |
716 | node_free_pfn[i] += PFN_UP(memmap_size); | |
717 | if (!kdata_huge) { | |
718 | node_percpu_pfn[i] = node_free_pfn[i]; | |
719 | node_free_pfn[i] += PFN_UP(node_percpu[i]); | |
720 | } else { | |
721 | node_percpu_pfn[i] = | |
722 | lowmem_end - PFN_UP(node_percpu[i]); | |
723 | } | |
724 | } | |
725 | ||
726 | #ifdef CONFIG_HIGHMEM | |
727 | if (start > lowmem_end) { | |
728 | zones_size[ZONE_NORMAL] = 0; | |
729 | zones_size[ZONE_HIGHMEM] = end - start; | |
730 | } else { | |
731 | zones_size[ZONE_NORMAL] = lowmem_end - start; | |
732 | zones_size[ZONE_HIGHMEM] = end - lowmem_end; | |
733 | } | |
734 | #else | |
735 | zones_size[ZONE_NORMAL] = end - start; | |
736 | #endif | |
737 | ||
eef015c8 CM |
738 | if (start < dma_end) { |
739 | zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL], | |
740 | dma_end - start); | |
741 | zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA]; | |
742 | } else { | |
743 | zones_size[ZONE_DMA] = 0; | |
744 | } | |
745 | ||
621b1955 CM |
746 | /* Take zone metadata from controller 0 if we're isolnode. */ |
747 | if (node_isset(i, isolnodes)) | |
748 | NODE_DATA(i)->bdata = &bootmem_node_data[0]; | |
867e359b CM |
749 | |
750 | free_area_init_node(i, zones_size, start, NULL); | |
76c567fb | 751 | printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n", |
867e359b CM |
752 | PFN_UP(node_percpu[i])); |
753 | ||
754 | /* Track the type of memory on each node */ | |
eef015c8 | 755 | if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA]) |
867e359b CM |
756 | node_set_state(i, N_NORMAL_MEMORY); |
757 | #ifdef CONFIG_HIGHMEM | |
758 | if (end != start) | |
759 | node_set_state(i, N_HIGH_MEMORY); | |
760 | #endif | |
761 | ||
762 | node_set_online(i); | |
763 | } | |
764 | } | |
765 | ||
766 | #ifdef CONFIG_NUMA | |
767 | ||
768 | /* which logical CPUs are on which nodes */ | |
769 | struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; | |
770 | EXPORT_SYMBOL(node_2_cpu_mask); | |
771 | ||
772 | /* which node each logical CPU is on */ | |
773 | char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); | |
774 | EXPORT_SYMBOL(cpu_2_node); | |
775 | ||
776 | /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ | |
777 | static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus) | |
778 | { | |
779 | if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus)) | |
780 | return -1; | |
781 | else | |
782 | return cpu_to_node(cpu); | |
783 | } | |
784 | ||
785 | /* Return number of immediately-adjacent tiles sharing the same NUMA node. */ | |
786 | static int __init node_neighbors(int node, int cpu, | |
787 | struct cpumask *unbound_cpus) | |
788 | { | |
789 | int neighbors = 0; | |
790 | int w = smp_width; | |
791 | int h = smp_height; | |
792 | int x = cpu % w; | |
793 | int y = cpu / w; | |
794 | if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node) | |
795 | ++neighbors; | |
796 | if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node) | |
797 | ++neighbors; | |
798 | if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node) | |
799 | ++neighbors; | |
800 | if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node) | |
801 | ++neighbors; | |
802 | return neighbors; | |
803 | } | |
804 | ||
805 | static void __init setup_numa_mapping(void) | |
806 | { | |
807 | int distance[MAX_NUMNODES][NR_CPUS]; | |
808 | HV_Coord coord; | |
809 | int cpu, node, cpus, i, x, y; | |
810 | int num_nodes = num_online_nodes(); | |
811 | struct cpumask unbound_cpus; | |
812 | nodemask_t default_nodes; | |
813 | ||
814 | cpumask_clear(&unbound_cpus); | |
815 | ||
816 | /* Get set of nodes we will use for defaults */ | |
817 | nodes_andnot(default_nodes, node_online_map, isolnodes); | |
818 | if (nodes_empty(default_nodes)) { | |
819 | BUG_ON(!node_isset(0, node_online_map)); | |
0707ad30 | 820 | pr_err("Forcing NUMA node zero available as a default node\n"); |
867e359b CM |
821 | node_set(0, default_nodes); |
822 | } | |
823 | ||
824 | /* Populate the distance[] array */ | |
825 | memset(distance, -1, sizeof(distance)); | |
826 | cpu = 0; | |
827 | for (coord.y = 0; coord.y < smp_height; ++coord.y) { | |
828 | for (coord.x = 0; coord.x < smp_width; | |
829 | ++coord.x, ++cpu) { | |
830 | BUG_ON(cpu >= nr_cpu_ids); | |
831 | if (!cpu_possible(cpu)) { | |
832 | cpu_2_node[cpu] = -1; | |
833 | continue; | |
834 | } | |
835 | for_each_node_mask(node, default_nodes) { | |
836 | HV_MemoryControllerInfo info = | |
837 | hv_inquire_memory_controller( | |
838 | coord, node_controller[node]); | |
839 | distance[node][cpu] = | |
840 | ABS(info.coord.x) + ABS(info.coord.y); | |
841 | } | |
842 | cpumask_set_cpu(cpu, &unbound_cpus); | |
843 | } | |
844 | } | |
845 | cpus = cpu; | |
846 | ||
847 | /* | |
848 | * Round-robin through the NUMA nodes until all the cpus are | |
849 | * assigned. We could be more clever here (e.g. create four | |
850 | * sorted linked lists on the same set of cpu nodes, and pull | |
851 | * off them in round-robin sequence, removing from all four | |
852 | * lists each time) but given the relatively small numbers | |
853 | * involved, O(n^2) seem OK for a one-time cost. | |
854 | */ | |
855 | node = first_node(default_nodes); | |
856 | while (!cpumask_empty(&unbound_cpus)) { | |
857 | int best_cpu = -1; | |
858 | int best_distance = INT_MAX; | |
859 | for (cpu = 0; cpu < cpus; ++cpu) { | |
860 | if (cpumask_test_cpu(cpu, &unbound_cpus)) { | |
861 | /* | |
862 | * Compute metric, which is how much | |
863 | * closer the cpu is to this memory | |
864 | * controller than the others, shifted | |
865 | * up, and then the number of | |
866 | * neighbors already in the node as an | |
867 | * epsilon adjustment to try to keep | |
868 | * the nodes compact. | |
869 | */ | |
870 | int d = distance[node][cpu] * num_nodes; | |
871 | for_each_node_mask(i, default_nodes) { | |
872 | if (i != node) | |
873 | d -= distance[i][cpu]; | |
874 | } | |
875 | d *= 8; /* allow space for epsilon */ | |
876 | d -= node_neighbors(node, cpu, &unbound_cpus); | |
877 | if (d < best_distance) { | |
878 | best_cpu = cpu; | |
879 | best_distance = d; | |
880 | } | |
881 | } | |
882 | } | |
883 | BUG_ON(best_cpu < 0); | |
884 | cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); | |
885 | cpu_2_node[best_cpu] = node; | |
886 | cpumask_clear_cpu(best_cpu, &unbound_cpus); | |
887 | node = next_node(node, default_nodes); | |
888 | if (node == MAX_NUMNODES) | |
889 | node = first_node(default_nodes); | |
890 | } | |
891 | ||
892 | /* Print out node assignments and set defaults for disabled cpus */ | |
893 | cpu = 0; | |
894 | for (y = 0; y < smp_height; ++y) { | |
895 | printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); | |
896 | for (x = 0; x < smp_width; ++x, ++cpu) { | |
897 | if (cpu_to_node(cpu) < 0) { | |
0707ad30 | 898 | pr_cont(" -"); |
867e359b CM |
899 | cpu_2_node[cpu] = first_node(default_nodes); |
900 | } else { | |
0707ad30 | 901 | pr_cont(" %d", cpu_to_node(cpu)); |
867e359b CM |
902 | } |
903 | } | |
0707ad30 | 904 | pr_cont("\n"); |
867e359b CM |
905 | } |
906 | } | |
907 | ||
908 | static struct cpu cpu_devices[NR_CPUS]; | |
909 | ||
910 | static int __init topology_init(void) | |
911 | { | |
912 | int i; | |
913 | ||
914 | for_each_online_node(i) | |
915 | register_one_node(i); | |
916 | ||
4d658d13 | 917 | for (i = 0; i < smp_height * smp_width; ++i) |
867e359b CM |
918 | register_cpu(&cpu_devices[i], i); |
919 | ||
920 | return 0; | |
921 | } | |
922 | ||
923 | subsys_initcall(topology_init); | |
924 | ||
925 | #else /* !CONFIG_NUMA */ | |
926 | ||
927 | #define setup_numa_mapping() do { } while (0) | |
928 | ||
929 | #endif /* CONFIG_NUMA */ | |
930 | ||
621b1955 CM |
931 | /* |
932 | * Initialize hugepage support on this cpu. We do this on all cores | |
933 | * early in boot: before argument parsing for the boot cpu, and after | |
934 | * argument parsing but before the init functions run on the secondaries. | |
935 | * So the values we set up here in the hypervisor may be overridden on | |
936 | * the boot cpu as arguments are parsed. | |
937 | */ | |
938 | static __cpuinit void init_super_pages(void) | |
939 | { | |
940 | #ifdef CONFIG_HUGETLB_SUPER_PAGES | |
941 | int i; | |
942 | for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i) | |
943 | hv_set_pte_super_shift(i, huge_shift[i]); | |
944 | #endif | |
945 | } | |
946 | ||
867e359b | 947 | /** |
0707ad30 CM |
948 | * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. |
949 | * @boot: Is this the boot cpu? | |
867e359b | 950 | * |
0707ad30 | 951 | * Called from setup_arch() on the boot cpu, or online_secondary(). |
867e359b | 952 | */ |
0707ad30 | 953 | void __cpuinit setup_cpu(int boot) |
867e359b | 954 | { |
0707ad30 CM |
955 | /* The boot cpu sets up its permanent mappings much earlier. */ |
956 | if (!boot) | |
957 | store_permanent_mappings(); | |
958 | ||
867e359b CM |
959 | /* Allow asynchronous TLB interrupts. */ |
960 | #if CHIP_HAS_TILE_DMA() | |
5d966115 CM |
961 | arch_local_irq_unmask(INT_DMATLB_MISS); |
962 | arch_local_irq_unmask(INT_DMATLB_ACCESS); | |
867e359b CM |
963 | #endif |
964 | #if CHIP_HAS_SN_PROC() | |
5d966115 | 965 | arch_local_irq_unmask(INT_SNITLB_MISS); |
867e359b | 966 | #endif |
a78c942d | 967 | #ifdef __tilegx__ |
5d966115 | 968 | arch_local_irq_unmask(INT_SINGLE_STEP_K); |
a78c942d | 969 | #endif |
867e359b CM |
970 | |
971 | /* | |
972 | * Allow user access to many generic SPRs, like the cycle | |
973 | * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc. | |
974 | */ | |
975 | __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1); | |
976 | ||
977 | #if CHIP_HAS_SN() | |
978 | /* Static network is not restricted. */ | |
979 | __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); | |
980 | #endif | |
981 | #if CHIP_HAS_SN_PROC() | |
982 | __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); | |
983 | __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); | |
984 | #endif | |
985 | ||
986 | /* | |
a78c942d CM |
987 | * Set the MPL for interrupt control 0 & 1 to the corresponding |
988 | * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT | |
989 | * SPRs, as well as the interrupt mask. | |
867e359b CM |
990 | */ |
991 | __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); | |
a78c942d | 992 | __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1); |
0707ad30 CM |
993 | |
994 | /* Initialize IRQ support for this cpu. */ | |
995 | setup_irq_regs(); | |
996 | ||
997 | #ifdef CONFIG_HARDWALL | |
998 | /* Reset the network state on this cpu. */ | |
999 | reset_network_state(); | |
1000 | #endif | |
621b1955 CM |
1001 | |
1002 | init_super_pages(); | |
867e359b CM |
1003 | } |
1004 | ||
43d9ebba CM |
1005 | #ifdef CONFIG_BLK_DEV_INITRD |
1006 | ||
867e359b | 1007 | static int __initdata set_initramfs_file; |
ff7f3efb | 1008 | static char __initdata initramfs_file[128] = "initramfs"; |
867e359b CM |
1009 | |
1010 | static int __init setup_initramfs_file(char *str) | |
1011 | { | |
1012 | if (str == NULL) | |
1013 | return -EINVAL; | |
1014 | strncpy(initramfs_file, str, sizeof(initramfs_file) - 1); | |
1015 | set_initramfs_file = 1; | |
1016 | ||
1017 | return 0; | |
1018 | } | |
1019 | early_param("initramfs_file", setup_initramfs_file); | |
1020 | ||
1021 | /* | |
ff7f3efb CM |
1022 | * We look for a file called "initramfs" in the hvfs. If there is one, we |
1023 | * allocate some memory for it and it will be unpacked to the initramfs. | |
1024 | * If it's compressed, the initd code will uncompress it first. | |
867e359b CM |
1025 | */ |
1026 | static void __init load_hv_initrd(void) | |
1027 | { | |
1028 | HV_FS_StatInfo stat; | |
1029 | int fd, rc; | |
1030 | void *initrd; | |
1031 | ||
1032 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); | |
1033 | if (fd == HV_ENOENT) { | |
ff7f3efb | 1034 | if (set_initramfs_file) { |
0707ad30 CM |
1035 | pr_warning("No such hvfs initramfs file '%s'\n", |
1036 | initramfs_file); | |
ff7f3efb CM |
1037 | return; |
1038 | } else { | |
1039 | /* Try old backwards-compatible name. */ | |
1040 | fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); | |
1041 | if (fd == HV_ENOENT) | |
1042 | return; | |
1043 | } | |
867e359b CM |
1044 | } |
1045 | BUG_ON(fd < 0); | |
1046 | stat = hv_fs_fstat(fd); | |
1047 | BUG_ON(stat.size < 0); | |
1048 | if (stat.flags & HV_FS_ISDIR) { | |
0707ad30 CM |
1049 | pr_warning("Ignoring hvfs file '%s': it's a directory.\n", |
1050 | initramfs_file); | |
867e359b CM |
1051 | return; |
1052 | } | |
1053 | initrd = alloc_bootmem_pages(stat.size); | |
1054 | rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); | |
1055 | if (rc != stat.size) { | |
0707ad30 | 1056 | pr_err("Error reading %d bytes from hvfs file '%s': %d\n", |
867e359b | 1057 | stat.size, initramfs_file, rc); |
bc63de7c | 1058 | free_initrd_mem((unsigned long) initrd, stat.size); |
867e359b CM |
1059 | return; |
1060 | } | |
1061 | initrd_start = (unsigned long) initrd; | |
1062 | initrd_end = initrd_start + stat.size; | |
1063 | } | |
1064 | ||
1065 | void __init free_initrd_mem(unsigned long begin, unsigned long end) | |
1066 | { | |
bc63de7c | 1067 | free_bootmem(__pa(begin), end - begin); |
867e359b CM |
1068 | } |
1069 | ||
43d9ebba CM |
1070 | #else |
1071 | static inline void load_hv_initrd(void) {} | |
1072 | #endif /* CONFIG_BLK_DEV_INITRD */ | |
1073 | ||
867e359b CM |
1074 | static void __init validate_hv(void) |
1075 | { | |
1076 | /* | |
1077 | * It may already be too late, but let's check our built-in | |
1078 | * configuration against what the hypervisor is providing. | |
1079 | */ | |
1080 | unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE); | |
1081 | int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL); | |
1082 | int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE); | |
1083 | HV_ASIDRange asid_range; | |
1084 | ||
1085 | #ifndef CONFIG_SMP | |
1086 | HV_Topology topology = hv_inquire_topology(); | |
1087 | BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); | |
1088 | if (topology.width != 1 || topology.height != 1) { | |
0707ad30 CM |
1089 | pr_warning("Warning: booting UP kernel on %dx%d grid;" |
1090 | " will ignore all but first tile.\n", | |
1091 | topology.width, topology.height); | |
867e359b CM |
1092 | } |
1093 | #endif | |
1094 | ||
1095 | if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) | |
1096 | early_panic("Hypervisor glue size %ld is too big!\n", | |
1097 | glue_size); | |
1098 | if (hv_page_size != PAGE_SIZE) | |
1099 | early_panic("Hypervisor page size %#x != our %#lx\n", | |
1100 | hv_page_size, PAGE_SIZE); | |
1101 | if (hv_hpage_size != HPAGE_SIZE) | |
1102 | early_panic("Hypervisor huge page size %#x != our %#lx\n", | |
1103 | hv_hpage_size, HPAGE_SIZE); | |
1104 | ||
1105 | #ifdef CONFIG_SMP | |
1106 | /* | |
1107 | * Some hypervisor APIs take a pointer to a bitmap array | |
1108 | * whose size is at least the number of cpus on the chip. | |
1109 | * We use a struct cpumask for this, so it must be big enough. | |
1110 | */ | |
1111 | if ((smp_height * smp_width) > nr_cpu_ids) | |
1112 | early_panic("Hypervisor %d x %d grid too big for Linux" | |
1113 | " NR_CPUS %d\n", smp_height, smp_width, | |
1114 | nr_cpu_ids); | |
1115 | #endif | |
1116 | ||
1117 | /* | |
1118 | * Check that we're using allowed ASIDs, and initialize the | |
1119 | * various asid variables to their appropriate initial states. | |
1120 | */ | |
1121 | asid_range = hv_inquire_asid(0); | |
1122 | __get_cpu_var(current_asid) = min_asid = asid_range.start; | |
1123 | max_asid = asid_range.start + asid_range.size - 1; | |
1124 | ||
1125 | if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, | |
1126 | sizeof(chip_model)) < 0) { | |
0707ad30 | 1127 | pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); |
867e359b CM |
1128 | strlcpy(chip_model, "unknown", sizeof(chip_model)); |
1129 | } | |
1130 | } | |
1131 | ||
1132 | static void __init validate_va(void) | |
1133 | { | |
1134 | #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ | |
1135 | /* | |
1136 | * Similarly, make sure we're only using allowed VAs. | |
1137 | * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, | |
1138 | * and 0 .. KERNEL_HIGH_VADDR. | |
1139 | * In addition, make sure we CAN'T use the end of memory, since | |
1140 | * we use the last chunk of each pgd for the pgd_list. | |
1141 | */ | |
a78c942d | 1142 | int i, user_kernel_ok = 0; |
867e359b CM |
1143 | unsigned long max_va = 0; |
1144 | unsigned long list_va = | |
1145 | ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); | |
1146 | ||
1147 | for (i = 0; ; ++i) { | |
1148 | HV_VirtAddrRange range = hv_inquire_virtual(i); | |
1149 | if (range.size == 0) | |
1150 | break; | |
1151 | if (range.start <= MEM_USER_INTRPT && | |
1152 | range.start + range.size >= MEM_HV_INTRPT) | |
a78c942d | 1153 | user_kernel_ok = 1; |
867e359b CM |
1154 | if (range.start == 0) |
1155 | max_va = range.size; | |
1156 | BUG_ON(range.start + range.size > list_va); | |
1157 | } | |
a78c942d CM |
1158 | if (!user_kernel_ok) |
1159 | early_panic("Hypervisor not configured for user/kernel VAs\n"); | |
867e359b CM |
1160 | if (max_va == 0) |
1161 | early_panic("Hypervisor not configured for low VAs\n"); | |
1162 | if (max_va < KERNEL_HIGH_VADDR) | |
1163 | early_panic("Hypervisor max VA %#lx smaller than %#lx\n", | |
1164 | max_va, KERNEL_HIGH_VADDR); | |
1165 | ||
1166 | /* Kernel PCs must have their high bit set; see intvec.S. */ | |
1167 | if ((long)VMALLOC_START >= 0) | |
1168 | early_panic( | |
1169 | "Linux VMALLOC region below the 2GB line (%#lx)!\n" | |
1170 | "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" | |
1171 | "or smaller VMALLOC_RESERVE.\n", | |
1172 | VMALLOC_START); | |
1173 | #endif | |
1174 | } | |
1175 | ||
1176 | /* | |
1177 | * cpu_lotar_map lists all the cpus that are valid for the supervisor | |
1178 | * to cache data on at a page level, i.e. what cpus can be placed in | |
1179 | * the LOTAR field of a PTE. It is equivalent to the set of possible | |
1180 | * cpus plus any other cpus that are willing to share their cache. | |
1181 | * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). | |
1182 | */ | |
1183 | struct cpumask __write_once cpu_lotar_map; | |
1184 | EXPORT_SYMBOL(cpu_lotar_map); | |
1185 | ||
1186 | #if CHIP_HAS_CBOX_HOME_MAP() | |
1187 | /* | |
1188 | * hash_for_home_map lists all the tiles that hash-for-home data | |
1189 | * will be cached on. Note that this may includes tiles that are not | |
1190 | * valid for this supervisor to use otherwise (e.g. if a hypervisor | |
1191 | * device is being shared between multiple supervisors). | |
1192 | * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE). | |
1193 | */ | |
1194 | struct cpumask hash_for_home_map; | |
1195 | EXPORT_SYMBOL(hash_for_home_map); | |
1196 | #endif | |
1197 | ||
1198 | /* | |
1199 | * cpu_cacheable_map lists all the cpus whose caches the hypervisor can | |
5f054e31 | 1200 | * flush on our behalf. It is set to cpu_possible_mask OR'ed with |
867e359b CM |
1201 | * hash_for_home_map, and it is what should be passed to |
1202 | * hv_flush_remote() to flush all caches. Note that if there are | |
1203 | * dedicated hypervisor driver tiles that have authorized use of their | |
1204 | * cache, those tiles will only appear in cpu_lotar_map, NOT in | |
1205 | * cpu_cacheable_map, as they are a special case. | |
1206 | */ | |
1207 | struct cpumask __write_once cpu_cacheable_map; | |
1208 | EXPORT_SYMBOL(cpu_cacheable_map); | |
1209 | ||
1210 | static __initdata struct cpumask disabled_map; | |
1211 | ||
1212 | static int __init disabled_cpus(char *str) | |
1213 | { | |
1214 | int boot_cpu = smp_processor_id(); | |
1215 | ||
1216 | if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) | |
1217 | return -EINVAL; | |
1218 | if (cpumask_test_cpu(boot_cpu, &disabled_map)) { | |
0707ad30 | 1219 | pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); |
867e359b CM |
1220 | cpumask_clear_cpu(boot_cpu, &disabled_map); |
1221 | } | |
1222 | return 0; | |
1223 | } | |
1224 | ||
1225 | early_param("disabled_cpus", disabled_cpus); | |
1226 | ||
0707ad30 | 1227 | void __init print_disabled_cpus(void) |
867e359b CM |
1228 | { |
1229 | if (!cpumask_empty(&disabled_map)) { | |
1230 | char buf[100]; | |
1231 | cpulist_scnprintf(buf, sizeof(buf), &disabled_map); | |
0707ad30 | 1232 | pr_info("CPUs not available for Linux: %s\n", buf); |
867e359b CM |
1233 | } |
1234 | } | |
1235 | ||
1236 | static void __init setup_cpu_maps(void) | |
1237 | { | |
1238 | struct cpumask hv_disabled_map, cpu_possible_init; | |
1239 | int boot_cpu = smp_processor_id(); | |
1240 | int cpus, i, rc; | |
1241 | ||
1242 | /* Learn which cpus are allowed by the hypervisor. */ | |
1243 | rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL, | |
1244 | (HV_VirtAddr) cpumask_bits(&cpu_possible_init), | |
1245 | sizeof(cpu_cacheable_map)); | |
1246 | if (rc < 0) | |
1247 | early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc); | |
1248 | if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init)) | |
1249 | early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu); | |
1250 | ||
1251 | /* Compute the cpus disabled by the hvconfig file. */ | |
1252 | cpumask_complement(&hv_disabled_map, &cpu_possible_init); | |
1253 | ||
1254 | /* Include them with the cpus disabled by "disabled_cpus". */ | |
1255 | cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map); | |
1256 | ||
1257 | /* | |
1258 | * Disable every cpu after "setup_max_cpus". But don't mark | |
1259 | * as disabled the cpus that are outside of our initial rectangle, | |
1260 | * since that turns out to be confusing. | |
1261 | */ | |
1262 | cpus = 1; /* this cpu */ | |
1263 | cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */ | |
1264 | for (i = 0; cpus < setup_max_cpus; ++i) | |
1265 | if (!cpumask_test_cpu(i, &disabled_map)) | |
1266 | ++cpus; | |
1267 | for (; i < smp_height * smp_width; ++i) | |
1268 | cpumask_set_cpu(i, &disabled_map); | |
1269 | cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */ | |
1270 | for (i = smp_height * smp_width; i < NR_CPUS; ++i) | |
1271 | cpumask_clear_cpu(i, &disabled_map); | |
1272 | ||
1273 | /* | |
1274 | * Setup cpu_possible map as every cpu allocated to us, minus | |
1275 | * the results of any "disabled_cpus" settings. | |
1276 | */ | |
1277 | cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map); | |
1278 | init_cpu_possible(&cpu_possible_init); | |
1279 | ||
1280 | /* Learn which cpus are valid for LOTAR caching. */ | |
1281 | rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR, | |
1282 | (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), | |
1283 | sizeof(cpu_lotar_map)); | |
1284 | if (rc < 0) { | |
0707ad30 | 1285 | pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); |
0b5f9c00 | 1286 | cpu_lotar_map = *cpu_possible_mask; |
867e359b CM |
1287 | } |
1288 | ||
1289 | #if CHIP_HAS_CBOX_HOME_MAP() | |
1290 | /* Retrieve set of CPUs used for hash-for-home caching */ | |
1291 | rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, | |
1292 | (HV_VirtAddr) hash_for_home_map.bits, | |
1293 | sizeof(hash_for_home_map)); | |
1294 | if (rc < 0) | |
1295 | early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); | |
0b5f9c00 | 1296 | cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map); |
867e359b | 1297 | #else |
0b5f9c00 | 1298 | cpu_cacheable_map = *cpu_possible_mask; |
867e359b CM |
1299 | #endif |
1300 | } | |
1301 | ||
1302 | ||
1303 | static int __init dataplane(char *str) | |
1304 | { | |
0707ad30 | 1305 | pr_warning("WARNING: dataplane support disabled in this kernel\n"); |
867e359b CM |
1306 | return 0; |
1307 | } | |
1308 | ||
1309 | early_param("dataplane", dataplane); | |
1310 | ||
1311 | #ifdef CONFIG_CMDLINE_BOOL | |
1312 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | |
1313 | #endif | |
1314 | ||
1315 | void __init setup_arch(char **cmdline_p) | |
1316 | { | |
1317 | int len; | |
1318 | ||
1319 | #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) | |
1320 | len = hv_get_command_line((HV_VirtAddr) boot_command_line, | |
1321 | COMMAND_LINE_SIZE); | |
1322 | if (boot_command_line[0]) | |
0707ad30 CM |
1323 | pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", |
1324 | boot_command_line); | |
867e359b CM |
1325 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
1326 | #else | |
1327 | char *hv_cmdline; | |
1328 | #if defined(CONFIG_CMDLINE_BOOL) | |
1329 | if (builtin_cmdline[0]) { | |
1330 | int builtin_len = strlcpy(boot_command_line, builtin_cmdline, | |
1331 | COMMAND_LINE_SIZE); | |
1332 | if (builtin_len < COMMAND_LINE_SIZE-1) | |
1333 | boot_command_line[builtin_len++] = ' '; | |
1334 | hv_cmdline = &boot_command_line[builtin_len]; | |
1335 | len = COMMAND_LINE_SIZE - builtin_len; | |
1336 | } else | |
1337 | #endif | |
1338 | { | |
1339 | hv_cmdline = boot_command_line; | |
1340 | len = COMMAND_LINE_SIZE; | |
1341 | } | |
1342 | len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len); | |
1343 | if (len < 0 || len > COMMAND_LINE_SIZE) | |
1344 | early_panic("hv_get_command_line failed: %d\n", len); | |
1345 | #endif | |
1346 | ||
1347 | *cmdline_p = boot_command_line; | |
1348 | ||
1349 | /* Set disabled_map and setup_max_cpus very early */ | |
1350 | parse_early_param(); | |
1351 | ||
1352 | /* Make sure the kernel is compatible with the hypervisor. */ | |
1353 | validate_hv(); | |
1354 | validate_va(); | |
1355 | ||
1356 | setup_cpu_maps(); | |
1357 | ||
1358 | ||
41bb38fc | 1359 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b CM |
1360 | /* |
1361 | * Initialize the PCI structures. This is done before memory | |
1362 | * setup so that we know whether or not a pci_reserve region | |
1363 | * is necessary. | |
1364 | */ | |
1365 | if (tile_pci_init() == 0) | |
1366 | pci_reserve_mb = 0; | |
1367 | ||
1368 | /* PCI systems reserve a region just below 4GB for mapping iomem. */ | |
1369 | pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); | |
1370 | pci_reserve_start_pfn = pci_reserve_end_pfn - | |
1371 | (pci_reserve_mb << (20 - PAGE_SHIFT)); | |
1372 | #endif | |
1373 | ||
1374 | init_mm.start_code = (unsigned long) _text; | |
1375 | init_mm.end_code = (unsigned long) _etext; | |
1376 | init_mm.end_data = (unsigned long) _edata; | |
1377 | init_mm.brk = (unsigned long) _end; | |
1378 | ||
1379 | setup_memory(); | |
1380 | store_permanent_mappings(); | |
1381 | setup_bootmem_allocator(); | |
1382 | ||
1383 | /* | |
1384 | * NOTE: before this point _nobody_ is allowed to allocate | |
1385 | * any memory using the bootmem allocator. | |
1386 | */ | |
1387 | ||
41bb38fc CM |
1388 | #ifdef CONFIG_SWIOTLB |
1389 | swiotlb_init(0); | |
1390 | #endif | |
1391 | ||
867e359b CM |
1392 | paging_init(); |
1393 | setup_numa_mapping(); | |
1394 | zone_sizes_init(); | |
1395 | set_page_homes(); | |
0707ad30 | 1396 | setup_cpu(1); |
867e359b CM |
1397 | setup_clock(); |
1398 | load_hv_initrd(); | |
1399 | } | |
1400 | ||
1401 | ||
1402 | /* | |
1403 | * Set up per-cpu memory. | |
1404 | */ | |
1405 | ||
1406 | unsigned long __per_cpu_offset[NR_CPUS] __write_once; | |
1407 | EXPORT_SYMBOL(__per_cpu_offset); | |
1408 | ||
1409 | static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; | |
1410 | static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 }; | |
1411 | ||
1412 | /* | |
1413 | * As the percpu code allocates pages, we return the pages from the | |
1414 | * end of the node for the specified cpu. | |
1415 | */ | |
1416 | static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | |
1417 | { | |
1418 | int nid = cpu_to_node(cpu); | |
1419 | unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; | |
1420 | ||
1421 | BUG_ON(size % PAGE_SIZE != 0); | |
1422 | pfn_offset[nid] += size / PAGE_SIZE; | |
76c567fb CM |
1423 | BUG_ON(node_percpu[nid] < size); |
1424 | node_percpu[nid] -= size; | |
867e359b CM |
1425 | if (percpu_pfn[cpu] == 0) |
1426 | percpu_pfn[cpu] = pfn; | |
1427 | return pfn_to_kaddr(pfn); | |
1428 | } | |
1429 | ||
1430 | /* | |
1431 | * Pages reserved for percpu memory are not freeable, and in any case we are | |
1432 | * on a short path to panic() in setup_per_cpu_area() at this point anyway. | |
1433 | */ | |
1434 | static void __init pcpu_fc_free(void *ptr, size_t size) | |
1435 | { | |
1436 | } | |
1437 | ||
1438 | /* | |
1439 | * Set up vmalloc page tables using bootmem for the percpu code. | |
1440 | */ | |
1441 | static void __init pcpu_fc_populate_pte(unsigned long addr) | |
1442 | { | |
1443 | pgd_t *pgd; | |
1444 | pud_t *pud; | |
1445 | pmd_t *pmd; | |
1446 | pte_t *pte; | |
1447 | ||
1448 | BUG_ON(pgd_addr_invalid(addr)); | |
77d23303 CM |
1449 | if (addr < VMALLOC_START || addr >= VMALLOC_END) |
1450 | panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" | |
1451 | " try increasing CONFIG_VMALLOC_RESERVE\n", | |
1452 | addr, VMALLOC_START, VMALLOC_END); | |
867e359b CM |
1453 | |
1454 | pgd = swapper_pg_dir + pgd_index(addr); | |
1455 | pud = pud_offset(pgd, addr); | |
1456 | BUG_ON(!pud_present(*pud)); | |
1457 | pmd = pmd_offset(pud, addr); | |
1458 | if (pmd_present(*pmd)) { | |
1459 | BUG_ON(pmd_huge_page(*pmd)); | |
1460 | } else { | |
1461 | pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, | |
1462 | HV_PAGE_TABLE_ALIGN, 0); | |
1463 | pmd_populate_kernel(&init_mm, pmd, pte); | |
1464 | } | |
1465 | } | |
1466 | ||
1467 | void __init setup_per_cpu_areas(void) | |
1468 | { | |
1469 | struct page *pg; | |
1470 | unsigned long delta, pfn, lowmem_va; | |
1471 | unsigned long size = percpu_size(); | |
1472 | char *ptr; | |
1473 | int rc, cpu, i; | |
1474 | ||
1475 | rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc, | |
1476 | pcpu_fc_free, pcpu_fc_populate_pte); | |
1477 | if (rc < 0) | |
1478 | panic("Cannot initialize percpu area (err=%d)", rc); | |
1479 | ||
1480 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
1481 | for_each_possible_cpu(cpu) { | |
1482 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | |
1483 | ||
1484 | /* finv the copy out of cache so we can change homecache */ | |
1485 | ptr = pcpu_base_addr + pcpu_unit_offsets[cpu]; | |
1486 | __finv_buffer(ptr, size); | |
1487 | pfn = percpu_pfn[cpu]; | |
1488 | ||
1489 | /* Rewrite the page tables to cache on that cpu */ | |
1490 | pg = pfn_to_page(pfn); | |
1491 | for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { | |
1492 | ||
1493 | /* Update the vmalloc mapping and page home. */ | |
d5d14ed6 CM |
1494 | unsigned long addr = (unsigned long)ptr + i; |
1495 | pte_t *ptep = virt_to_pte(NULL, addr); | |
867e359b CM |
1496 | pte_t pte = *ptep; |
1497 | BUG_ON(pfn != pte_pfn(pte)); | |
1498 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | |
1499 | pte = set_remote_cache_cpu(pte, cpu); | |
d5d14ed6 | 1500 | set_pte_at(&init_mm, addr, ptep, pte); |
867e359b CM |
1501 | |
1502 | /* Update the lowmem mapping for consistency. */ | |
1503 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); | |
1504 | ptep = virt_to_pte(NULL, lowmem_va); | |
1505 | if (pte_huge(*ptep)) { | |
1506 | printk(KERN_DEBUG "early shatter of huge page" | |
1507 | " at %#lx\n", lowmem_va); | |
1508 | shatter_pmd((pmd_t *)ptep); | |
1509 | ptep = virt_to_pte(NULL, lowmem_va); | |
1510 | BUG_ON(pte_huge(*ptep)); | |
1511 | } | |
1512 | BUG_ON(pfn != pte_pfn(*ptep)); | |
d5d14ed6 | 1513 | set_pte_at(&init_mm, lowmem_va, ptep, pte); |
867e359b CM |
1514 | } |
1515 | } | |
1516 | ||
1517 | /* Set our thread pointer appropriately. */ | |
1518 | set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); | |
1519 | ||
1520 | /* Make sure the finv's have completed. */ | |
1521 | mb_incoherent(); | |
1522 | ||
1523 | /* Flush the TLB so we reference it properly from here on out. */ | |
1524 | local_flush_tlb_all(); | |
1525 | } | |
1526 | ||
1527 | static struct resource data_resource = { | |
1528 | .name = "Kernel data", | |
1529 | .start = 0, | |
1530 | .end = 0, | |
1531 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
1532 | }; | |
1533 | ||
1534 | static struct resource code_resource = { | |
1535 | .name = "Kernel code", | |
1536 | .start = 0, | |
1537 | .end = 0, | |
1538 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
1539 | }; | |
1540 | ||
1541 | /* | |
41bb38fc | 1542 | * On Pro, we reserve all resources above 4GB so that PCI won't try to put |
f6d2ce00 | 1543 | * mappings above 4GB. |
867e359b | 1544 | */ |
41bb38fc | 1545 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b CM |
1546 | static struct resource* __init |
1547 | insert_non_bus_resource(void) | |
1548 | { | |
1549 | struct resource *res = | |
1550 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | |
1551 | res->name = "Non-Bus Physical Address Space"; | |
1552 | res->start = (1ULL << 32); | |
1553 | res->end = -1LL; | |
1554 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | |
1555 | if (insert_resource(&iomem_resource, res)) { | |
1556 | kfree(res); | |
1557 | return NULL; | |
1558 | } | |
1559 | return res; | |
1560 | } | |
1561 | #endif | |
1562 | ||
1563 | static struct resource* __init | |
1564 | insert_ram_resource(u64 start_pfn, u64 end_pfn) | |
1565 | { | |
1566 | struct resource *res = | |
1567 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | |
1568 | res->name = "System RAM"; | |
1569 | res->start = start_pfn << PAGE_SHIFT; | |
1570 | res->end = (end_pfn << PAGE_SHIFT) - 1; | |
1571 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | |
1572 | if (insert_resource(&iomem_resource, res)) { | |
1573 | kfree(res); | |
1574 | return NULL; | |
1575 | } | |
1576 | return res; | |
1577 | } | |
1578 | ||
1579 | /* | |
1580 | * Request address space for all standard resources | |
1581 | * | |
1582 | * If the system includes PCI root complex drivers, we need to create | |
1583 | * a window just below 4GB where PCI BARs can be mapped. | |
1584 | */ | |
1585 | static int __init request_standard_resources(void) | |
1586 | { | |
1587 | int i; | |
1588 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | |
1589 | ||
41bb38fc | 1590 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b CM |
1591 | insert_non_bus_resource(); |
1592 | #endif | |
1593 | ||
1594 | for_each_online_node(i) { | |
1595 | u64 start_pfn = node_start_pfn[i]; | |
1596 | u64 end_pfn = node_end_pfn[i]; | |
1597 | ||
41bb38fc | 1598 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
867e359b CM |
1599 | if (start_pfn <= pci_reserve_start_pfn && |
1600 | end_pfn > pci_reserve_start_pfn) { | |
1601 | if (end_pfn > pci_reserve_end_pfn) | |
1602 | insert_ram_resource(pci_reserve_end_pfn, | |
1603 | end_pfn); | |
1604 | end_pfn = pci_reserve_start_pfn; | |
1605 | } | |
1606 | #endif | |
1607 | insert_ram_resource(start_pfn, end_pfn); | |
1608 | } | |
1609 | ||
1610 | code_resource.start = __pa(_text - CODE_DELTA); | |
1611 | code_resource.end = __pa(_etext - CODE_DELTA)-1; | |
1612 | data_resource.start = __pa(_sdata); | |
1613 | data_resource.end = __pa(_end)-1; | |
1614 | ||
1615 | insert_resource(&iomem_resource, &code_resource); | |
1616 | insert_resource(&iomem_resource, &data_resource); | |
1617 | ||
1618 | #ifdef CONFIG_KEXEC | |
1619 | insert_resource(&iomem_resource, &crashk_res); | |
1620 | #endif | |
1621 | ||
1622 | return 0; | |
1623 | } | |
1624 | ||
1625 | subsys_initcall(request_standard_resources); |