Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/bootmem.h> | |
23 | #include <linux/proc_fs.h> | |
59170891 | 24 | #include <linux/pci.h> |
6fb14755 | 25 | #include <linux/pfn.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
17a941d8 | 27 | #include <linux/dma-mapping.h> |
44df75e6 MT |
28 | #include <linux/module.h> |
29 | #include <linux/memory_hotplug.h> | |
ae32b129 | 30 | #include <linux/nmi.h> |
1da177e4 LT |
31 | |
32 | #include <asm/processor.h> | |
33 | #include <asm/system.h> | |
34 | #include <asm/uaccess.h> | |
35 | #include <asm/pgtable.h> | |
36 | #include <asm/pgalloc.h> | |
37 | #include <asm/dma.h> | |
38 | #include <asm/fixmap.h> | |
39 | #include <asm/e820.h> | |
40 | #include <asm/apic.h> | |
41 | #include <asm/tlb.h> | |
42 | #include <asm/mmu_context.h> | |
43 | #include <asm/proto.h> | |
44 | #include <asm/smp.h> | |
2bc0414e | 45 | #include <asm/sections.h> |
718fc13b | 46 | #include <asm/kdebug.h> |
aaa64e04 | 47 | #include <asm/numa.h> |
7bfeab9a | 48 | #include <asm/cacheflush.h> |
1da177e4 | 49 | |
e18c6874 AK |
50 | static unsigned long dma_reserve __initdata; |
51 | ||
1da177e4 LT |
52 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
53 | ||
00d1c5e0 IM |
54 | int direct_gbpages __meminitdata |
55 | #ifdef CONFIG_DIRECT_GBPAGES | |
56 | = 1 | |
57 | #endif | |
58 | ; | |
59 | ||
60 | static int __init parse_direct_gbpages_off(char *arg) | |
61 | { | |
62 | direct_gbpages = 0; | |
63 | return 0; | |
64 | } | |
65 | early_param("nogbpages", parse_direct_gbpages_off); | |
66 | ||
67 | static int __init parse_direct_gbpages_on(char *arg) | |
68 | { | |
69 | direct_gbpages = 1; | |
70 | return 0; | |
71 | } | |
72 | early_param("gbpages", parse_direct_gbpages_on); | |
73 | ||
1da177e4 LT |
74 | /* |
75 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
76 | * physical space so we can cache the place of the first one and move | |
77 | * around without checking the pgd every time. | |
78 | */ | |
79 | ||
80 | void show_mem(void) | |
81 | { | |
e92343cc AK |
82 | long i, total = 0, reserved = 0; |
83 | long shared = 0, cached = 0; | |
1da177e4 | 84 | struct page *page; |
14a62c34 | 85 | pg_data_t *pgdat; |
1da177e4 | 86 | |
e92343cc | 87 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 88 | show_free_areas(); |
ec936fc5 | 89 | for_each_online_pgdat(pgdat) { |
14a62c34 TG |
90 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
91 | /* | |
92 | * This loop can take a while with 256 GB and | |
93 | * 4k pages so defer the NMI watchdog: | |
94 | */ | |
95 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) | |
ae32b129 | 96 | touch_nmi_watchdog(); |
14a62c34 | 97 | |
12710a56 BP |
98 | if (!pfn_valid(pgdat->node_start_pfn + i)) |
99 | continue; | |
14a62c34 | 100 | |
1da177e4 LT |
101 | page = pfn_to_page(pgdat->node_start_pfn + i); |
102 | total++; | |
e92343cc AK |
103 | if (PageReserved(page)) |
104 | reserved++; | |
105 | else if (PageSwapCache(page)) | |
106 | cached++; | |
107 | else if (page_count(page)) | |
108 | shared += page_count(page) - 1; | |
14a62c34 | 109 | } |
1da177e4 | 110 | } |
14a62c34 TG |
111 | printk(KERN_INFO "%lu pages of RAM\n", total); |
112 | printk(KERN_INFO "%lu reserved pages\n", reserved); | |
113 | printk(KERN_INFO "%lu pages shared\n", shared); | |
114 | printk(KERN_INFO "%lu pages swap cached\n", cached); | |
1da177e4 LT |
115 | } |
116 | ||
1da177e4 LT |
117 | int after_bootmem; |
118 | ||
5f44a669 | 119 | static __init void *spp_getpage(void) |
14a62c34 | 120 | { |
1da177e4 | 121 | void *ptr; |
14a62c34 | 122 | |
1da177e4 | 123 | if (after_bootmem) |
14a62c34 | 124 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 LT |
125 | else |
126 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
127 | |
128 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
129 | panic("set_pte_phys: cannot allocate page data %s\n", | |
130 | after_bootmem ? "after bootmem" : ""); | |
131 | } | |
1da177e4 | 132 | |
10f22dde | 133 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 134 | |
1da177e4 | 135 | return ptr; |
14a62c34 | 136 | } |
1da177e4 | 137 | |
14a62c34 TG |
138 | static __init void |
139 | set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
140 | { |
141 | pgd_t *pgd; | |
142 | pud_t *pud; | |
143 | pmd_t *pmd; | |
144 | pte_t *pte, new_pte; | |
145 | ||
10f22dde | 146 | pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys); |
1da177e4 LT |
147 | |
148 | pgd = pgd_offset_k(vaddr); | |
149 | if (pgd_none(*pgd)) { | |
10f22dde IM |
150 | printk(KERN_ERR |
151 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
1da177e4 LT |
152 | return; |
153 | } | |
154 | pud = pud_offset(pgd, vaddr); | |
155 | if (pud_none(*pud)) { | |
14a62c34 | 156 | pmd = (pmd_t *) spp_getpage(); |
1da177e4 LT |
157 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); |
158 | if (pmd != pmd_offset(pud, 0)) { | |
10f22dde | 159 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
14a62c34 | 160 | pmd, pmd_offset(pud, 0)); |
1da177e4 LT |
161 | return; |
162 | } | |
163 | } | |
164 | pmd = pmd_offset(pud, vaddr); | |
165 | if (pmd_none(*pmd)) { | |
166 | pte = (pte_t *) spp_getpage(); | |
167 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | |
168 | if (pte != pte_offset_kernel(pmd, 0)) { | |
10f22dde | 169 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 LT |
170 | return; |
171 | } | |
172 | } | |
173 | new_pte = pfn_pte(phys >> PAGE_SHIFT, prot); | |
174 | ||
175 | pte = pte_offset_kernel(pmd, vaddr); | |
176 | if (!pte_none(*pte) && | |
177 | pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) | |
178 | pte_ERROR(*pte); | |
179 | set_pte(pte, new_pte); | |
180 | ||
181 | /* | |
182 | * It's enough to flush this one mapping. | |
183 | * (PGE mappings get flushed as well) | |
184 | */ | |
185 | __flush_tlb_one(vaddr); | |
186 | } | |
187 | ||
31eedd82 | 188 | /* |
88f3aec7 IM |
189 | * The head.S code sets up the kernel high mapping: |
190 | * | |
191 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 TG |
192 | * |
193 | * phys_addr holds the negative offset to the kernel, which is added | |
194 | * to the compile time generated pmds. This results in invalid pmds up | |
195 | * to the point where we hit the physaddr 0 mapping. | |
196 | * | |
197 | * We limit the mappings to the region from _text to _end. _end is | |
198 | * rounded up to the 2MB boundary. This catches the invalid pmds as | |
199 | * well, as they are located before _text: | |
200 | */ | |
201 | void __init cleanup_highmap(void) | |
202 | { | |
203 | unsigned long vaddr = __START_KERNEL_map; | |
204 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | |
205 | pmd_t *pmd = level2_kernel_pgt; | |
206 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | |
207 | ||
208 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { | |
209 | if (!pmd_present(*pmd)) | |
210 | continue; | |
211 | if (vaddr < (unsigned long) _text || vaddr > end) | |
212 | set_pmd(pmd, __pmd(0)); | |
213 | } | |
214 | } | |
215 | ||
1da177e4 | 216 | /* NOTE: this is meant to be run only at boot */ |
14a62c34 TG |
217 | void __init |
218 | __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
219 | { |
220 | unsigned long address = __fix_to_virt(idx); | |
221 | ||
222 | if (idx >= __end_of_fixed_addresses) { | |
10f22dde | 223 | printk(KERN_ERR "Invalid __set_fixmap\n"); |
1da177e4 LT |
224 | return; |
225 | } | |
226 | set_pte_phys(address, phys, prot); | |
227 | } | |
228 | ||
75175278 AK |
229 | static unsigned long __initdata table_start; |
230 | static unsigned long __meminitdata table_end; | |
1da177e4 | 231 | |
dafe41ee | 232 | static __meminit void *alloc_low_page(unsigned long *phys) |
14a62c34 | 233 | { |
dafe41ee | 234 | unsigned long pfn = table_end++; |
1da177e4 LT |
235 | void *adr; |
236 | ||
44df75e6 MT |
237 | if (after_bootmem) { |
238 | adr = (void *)get_zeroed_page(GFP_ATOMIC); | |
239 | *phys = __pa(adr); | |
14a62c34 | 240 | |
44df75e6 MT |
241 | return adr; |
242 | } | |
243 | ||
14a62c34 TG |
244 | if (pfn >= end_pfn) |
245 | panic("alloc_low_page: ran out of memory"); | |
dafe41ee VG |
246 | |
247 | adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); | |
44df75e6 | 248 | memset(adr, 0, PAGE_SIZE); |
dafe41ee VG |
249 | *phys = pfn * PAGE_SIZE; |
250 | return adr; | |
251 | } | |
1da177e4 | 252 | |
dafe41ee | 253 | static __meminit void unmap_low_page(void *adr) |
14a62c34 | 254 | { |
44df75e6 MT |
255 | if (after_bootmem) |
256 | return; | |
257 | ||
dafe41ee | 258 | early_iounmap(adr, PAGE_SIZE); |
14a62c34 | 259 | } |
1da177e4 | 260 | |
f2d3efed | 261 | /* Must run before zap_low_mappings */ |
a3142c8e | 262 | __meminit void *early_ioremap(unsigned long addr, unsigned long size) |
f2d3efed | 263 | { |
dafe41ee | 264 | pmd_t *pmd, *last_pmd; |
14a62c34 | 265 | unsigned long vaddr; |
dafe41ee VG |
266 | int i, pmds; |
267 | ||
268 | pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; | |
269 | vaddr = __START_KERNEL_map; | |
270 | pmd = level2_kernel_pgt; | |
271 | last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1; | |
14a62c34 | 272 | |
dafe41ee VG |
273 | for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) { |
274 | for (i = 0; i < pmds; i++) { | |
275 | if (pmd_present(pmd[i])) | |
14a62c34 | 276 | goto continue_outer_loop; |
dafe41ee VG |
277 | } |
278 | vaddr += addr & ~PMD_MASK; | |
279 | addr &= PMD_MASK; | |
14a62c34 | 280 | |
dafe41ee | 281 | for (i = 0; i < pmds; i++, addr += PMD_SIZE) |
929fd589 | 282 | set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); |
1a2b4412 | 283 | __flush_tlb_all(); |
14a62c34 | 284 | |
dafe41ee | 285 | return (void *)vaddr; |
14a62c34 | 286 | continue_outer_loop: |
dafe41ee | 287 | ; |
f2d3efed | 288 | } |
10f22dde | 289 | printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size); |
14a62c34 | 290 | |
dafe41ee | 291 | return NULL; |
f2d3efed AK |
292 | } |
293 | ||
14a62c34 TG |
294 | /* |
295 | * To avoid virtual aliases later: | |
296 | */ | |
a3142c8e | 297 | __meminit void early_iounmap(void *addr, unsigned long size) |
f2d3efed | 298 | { |
dafe41ee VG |
299 | unsigned long vaddr; |
300 | pmd_t *pmd; | |
301 | int i, pmds; | |
302 | ||
303 | vaddr = (unsigned long)addr; | |
304 | pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; | |
305 | pmd = level2_kernel_pgt + pmd_index(vaddr); | |
14a62c34 | 306 | |
dafe41ee VG |
307 | for (i = 0; i < pmds; i++) |
308 | pmd_clear(pmd + i); | |
14a62c34 | 309 | |
1a2b4412 | 310 | __flush_tlb_all(); |
f2d3efed AK |
311 | } |
312 | ||
cc615032 | 313 | static unsigned long __meminit |
6ad91658 | 314 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) |
44df75e6 | 315 | { |
6ad91658 | 316 | int i = pmd_index(address); |
44df75e6 | 317 | |
6ad91658 | 318 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
6ad91658 | 319 | pmd_t *pmd = pmd_page + pmd_index(address); |
44df75e6 | 320 | |
5f51e139 | 321 | if (address >= end) { |
14a62c34 | 322 | if (!after_bootmem) { |
5f51e139 JB |
323 | for (; i < PTRS_PER_PMD; i++, pmd++) |
324 | set_pmd(pmd, __pmd(0)); | |
14a62c34 | 325 | } |
44df75e6 MT |
326 | break; |
327 | } | |
6ad91658 KM |
328 | |
329 | if (pmd_val(*pmd)) | |
330 | continue; | |
331 | ||
d4f71f79 AK |
332 | set_pte((pte_t *)pmd, |
333 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | |
44df75e6 | 334 | } |
cc615032 | 335 | return address; |
44df75e6 MT |
336 | } |
337 | ||
cc615032 | 338 | static unsigned long __meminit |
44df75e6 MT |
339 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) |
340 | { | |
14a62c34 | 341 | pmd_t *pmd = pmd_offset(pud, 0); |
cc615032 AK |
342 | unsigned long last_map_addr; |
343 | ||
6ad91658 | 344 | spin_lock(&init_mm.page_table_lock); |
cc615032 | 345 | last_map_addr = phys_pmd_init(pmd, address, end); |
6ad91658 KM |
346 | spin_unlock(&init_mm.page_table_lock); |
347 | __flush_tlb_all(); | |
cc615032 | 348 | return last_map_addr; |
44df75e6 MT |
349 | } |
350 | ||
cc615032 | 351 | static unsigned long __meminit |
14a62c34 TG |
352 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) |
353 | { | |
cc615032 | 354 | unsigned long last_map_addr = end; |
6ad91658 | 355 | int i = pud_index(addr); |
44df75e6 | 356 | |
14a62c34 | 357 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { |
6ad91658 KM |
358 | unsigned long pmd_phys; |
359 | pud_t *pud = pud_page + pud_index(addr); | |
1da177e4 LT |
360 | pmd_t *pmd; |
361 | ||
6ad91658 | 362 | if (addr >= end) |
1da177e4 | 363 | break; |
1da177e4 | 364 | |
14a62c34 TG |
365 | if (!after_bootmem && |
366 | !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { | |
367 | set_pud(pud, __pud(0)); | |
1da177e4 | 368 | continue; |
14a62c34 | 369 | } |
1da177e4 | 370 | |
6ad91658 | 371 | if (pud_val(*pud)) { |
ef925766 | 372 | if (!pud_large(*pud)) |
cc615032 | 373 | last_map_addr = phys_pmd_update(pud, addr, end); |
ef925766 AK |
374 | continue; |
375 | } | |
376 | ||
377 | if (direct_gbpages) { | |
378 | set_pte((pte_t *)pud, | |
379 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | |
cc615032 | 380 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
6ad91658 KM |
381 | continue; |
382 | } | |
383 | ||
dafe41ee | 384 | pmd = alloc_low_page(&pmd_phys); |
14a62c34 | 385 | |
44df75e6 | 386 | spin_lock(&init_mm.page_table_lock); |
1da177e4 | 387 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
cc615032 | 388 | last_map_addr = phys_pmd_init(pmd, addr, end); |
44df75e6 | 389 | spin_unlock(&init_mm.page_table_lock); |
14a62c34 | 390 | |
dafe41ee | 391 | unmap_low_page(pmd); |
1da177e4 | 392 | } |
1a2b4412 | 393 | __flush_tlb_all(); |
cc615032 AK |
394 | |
395 | return last_map_addr >> PAGE_SHIFT; | |
14a62c34 | 396 | } |
1da177e4 LT |
397 | |
398 | static void __init find_early_table_space(unsigned long end) | |
399 | { | |
6c5acd16 | 400 | unsigned long puds, pmds, tables, start; |
1da177e4 LT |
401 | |
402 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
ef925766 AK |
403 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); |
404 | if (!direct_gbpages) { | |
405 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
406 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | |
407 | } | |
1da177e4 | 408 | |
14a62c34 TG |
409 | /* |
410 | * RED-PEN putting page tables only on node 0 could | |
411 | * cause a hotspot and fill up ZONE_DMA. The page tables | |
412 | * need roughly 0.5KB per GB. | |
413 | */ | |
414 | start = 0x8000; | |
24a5da73 | 415 | table_start = find_e820_area(start, end, tables, PAGE_SIZE); |
1da177e4 LT |
416 | if (table_start == -1UL) |
417 | panic("Cannot find space for the kernel page tables"); | |
418 | ||
419 | table_start >>= PAGE_SHIFT; | |
420 | table_end = table_start; | |
44df75e6 MT |
421 | |
422 | early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", | |
5f51e139 JB |
423 | end, table_start << PAGE_SHIFT, |
424 | (table_start << PAGE_SHIFT) + tables); | |
1da177e4 LT |
425 | } |
426 | ||
ef925766 AK |
427 | static void __init init_gbpages(void) |
428 | { | |
429 | if (direct_gbpages && cpu_has_gbpages) | |
430 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | |
431 | else | |
432 | direct_gbpages = 0; | |
433 | } | |
434 | ||
c64df707 YL |
435 | #ifdef CONFIG_MEMTEST_BOOTPARAM |
436 | ||
437 | static void __init memtest(unsigned long start_phys, unsigned long size, | |
438 | unsigned pattern) | |
272b9cad YL |
439 | { |
440 | unsigned long i; | |
441 | unsigned long *start; | |
442 | unsigned long start_bad; | |
443 | unsigned long last_bad; | |
444 | unsigned long val; | |
445 | unsigned long start_phys_aligned; | |
446 | unsigned long count; | |
447 | unsigned long incr; | |
448 | ||
449 | switch (pattern) { | |
450 | case 0: | |
451 | val = 0UL; | |
452 | break; | |
453 | case 1: | |
454 | val = -1UL; | |
455 | break; | |
456 | case 2: | |
457 | val = 0x5555555555555555UL; | |
458 | break; | |
459 | case 3: | |
460 | val = 0xaaaaaaaaaaaaaaaaUL; | |
461 | break; | |
462 | default: | |
463 | return; | |
464 | } | |
465 | ||
466 | incr = sizeof(unsigned long); | |
467 | start_phys_aligned = ALIGN(start_phys, incr); | |
468 | count = (size - (start_phys_aligned - start_phys))/incr; | |
469 | start = __va(start_phys_aligned); | |
470 | start_bad = 0; | |
471 | last_bad = 0; | |
472 | ||
473 | for (i = 0; i < count; i++) | |
474 | start[i] = val; | |
475 | for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { | |
476 | if (*start != val) { | |
477 | if (start_phys_aligned == last_bad + incr) { | |
478 | last_bad += incr; | |
479 | } else { | |
480 | if (start_bad) { | |
dcfe9465 | 481 | printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved", |
272b9cad YL |
482 | val, start_bad, last_bad + incr); |
483 | reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); | |
484 | } | |
485 | start_bad = last_bad = start_phys_aligned; | |
486 | } | |
487 | } | |
488 | } | |
489 | if (start_bad) { | |
dcfe9465 | 490 | printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved", |
272b9cad YL |
491 | val, start_bad, last_bad + incr); |
492 | reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); | |
493 | } | |
494 | ||
495 | } | |
496 | ||
c64df707 YL |
497 | static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE; |
498 | ||
272b9cad YL |
499 | static int __init parse_memtest(char *arg) |
500 | { | |
501 | if (arg) | |
c64df707 | 502 | memtest_pattern = simple_strtoul(arg, NULL, 0); |
272b9cad YL |
503 | return 0; |
504 | } | |
505 | ||
506 | early_param("memtest", parse_memtest); | |
507 | ||
508 | static void __init early_memtest(unsigned long start, unsigned long end) | |
509 | { | |
510 | unsigned long t_start, t_size; | |
511 | unsigned pattern; | |
512 | ||
c64df707 YL |
513 | if (!memtest_pattern) |
514 | return; | |
515 | ||
516 | printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); | |
272b9cad YL |
517 | for (pattern = 0; pattern < memtest_pattern; pattern++) { |
518 | t_start = start; | |
519 | t_size = 0; | |
520 | while (t_start < end) { | |
521 | t_start = find_e820_area_size(t_start, &t_size, 1); | |
522 | ||
523 | /* done ? */ | |
524 | if (t_start >= end) | |
525 | break; | |
526 | if (t_start + t_size > end) | |
527 | t_size = end - t_start; | |
528 | ||
529 | printk(KERN_CONT "\n %016lx - %016lx pattern %d", | |
530 | t_start, t_start + t_size, pattern); | |
531 | ||
532 | memtest(t_start, t_size, pattern); | |
533 | ||
534 | t_start += t_size; | |
535 | } | |
536 | } | |
c64df707 | 537 | printk(KERN_CONT "\n"); |
272b9cad | 538 | } |
c64df707 YL |
539 | #else |
540 | static void __init early_memtest(unsigned long start, unsigned long end) | |
541 | { | |
542 | } | |
543 | #endif | |
272b9cad | 544 | |
14a62c34 TG |
545 | /* |
546 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | |
547 | * This runs before bootmem is initialized and gets pages directly from | |
548 | * the physical memory. To access them they are temporarily mapped. | |
549 | */ | |
cc615032 | 550 | unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) |
14a62c34 | 551 | { |
cc615032 | 552 | unsigned long next, last_map_addr = end; |
272b9cad | 553 | unsigned long start_phys = start, end_phys = end; |
1da177e4 | 554 | |
272b9cad | 555 | printk(KERN_INFO "init_memory_mapping\n"); |
1da177e4 | 556 | |
14a62c34 | 557 | /* |
1da177e4 | 558 | * Find space for the kernel direct mapping tables. |
14a62c34 TG |
559 | * |
560 | * Later we should allocate these tables in the local node of the | |
561 | * memory mapped. Unfortunately this is done currently before the | |
562 | * nodes are discovered. | |
1da177e4 | 563 | */ |
ef925766 AK |
564 | if (!after_bootmem) { |
565 | init_gbpages(); | |
44df75e6 | 566 | find_early_table_space(end); |
ef925766 | 567 | } |
1da177e4 LT |
568 | |
569 | start = (unsigned long)__va(start); | |
570 | end = (unsigned long)__va(end); | |
571 | ||
572 | for (; start < end; start = next) { | |
44df75e6 | 573 | pgd_t *pgd = pgd_offset_k(start); |
14a62c34 | 574 | unsigned long pud_phys; |
44df75e6 MT |
575 | pud_t *pud; |
576 | ||
577 | if (after_bootmem) | |
d2ae5b5f | 578 | pud = pud_offset(pgd, start & PGDIR_MASK); |
44df75e6 | 579 | else |
dafe41ee | 580 | pud = alloc_low_page(&pud_phys); |
44df75e6 | 581 | |
1da177e4 | 582 | next = start + PGDIR_SIZE; |
14a62c34 TG |
583 | if (next > end) |
584 | next = end; | |
cc615032 | 585 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next)); |
44df75e6 MT |
586 | if (!after_bootmem) |
587 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); | |
dafe41ee | 588 | unmap_low_page(pud); |
14a62c34 | 589 | } |
1da177e4 | 590 | |
44df75e6 | 591 | if (!after_bootmem) |
f51c9452 | 592 | mmu_cr4_features = read_cr4(); |
1da177e4 | 593 | __flush_tlb_all(); |
75175278 | 594 | |
24a5da73 YL |
595 | if (!after_bootmem) |
596 | reserve_early(table_start << PAGE_SHIFT, | |
597 | table_end << PAGE_SHIFT, "PGTABLE"); | |
272b9cad YL |
598 | |
599 | if (!after_bootmem) | |
600 | early_memtest(start_phys, end_phys); | |
cc615032 AK |
601 | |
602 | return last_map_addr; | |
1da177e4 LT |
603 | } |
604 | ||
2b97690f | 605 | #ifndef CONFIG_NUMA |
1da177e4 LT |
606 | void __init paging_init(void) |
607 | { | |
6391af17 | 608 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
14a62c34 | 609 | |
6391af17 MG |
610 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
611 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | |
612 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | |
613 | max_zone_pfns[ZONE_NORMAL] = end_pfn; | |
614 | ||
44df75e6 MT |
615 | memory_present(0, 0, end_pfn); |
616 | sparse_init(); | |
5cb248ab | 617 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
618 | } |
619 | #endif | |
620 | ||
44df75e6 MT |
621 | /* |
622 | * Memory hotplug specific functions | |
44df75e6 | 623 | */ |
44df75e6 MT |
624 | void online_page(struct page *page) |
625 | { | |
626 | ClearPageReserved(page); | |
7835e98b | 627 | init_page_count(page); |
44df75e6 MT |
628 | __free_page(page); |
629 | totalram_pages++; | |
630 | num_physpages++; | |
631 | } | |
632 | ||
bc02af93 | 633 | #ifdef CONFIG_MEMORY_HOTPLUG |
9d99aaa3 AK |
634 | /* |
635 | * Memory is added always to NORMAL zone. This means you will never get | |
636 | * additional DMA/DMA32 memory. | |
637 | */ | |
bc02af93 | 638 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 639 | { |
bc02af93 | 640 | struct pglist_data *pgdat = NODE_DATA(nid); |
776ed98b | 641 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
cc615032 | 642 | unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; |
44df75e6 MT |
643 | unsigned long nr_pages = size >> PAGE_SHIFT; |
644 | int ret; | |
645 | ||
cc615032 AK |
646 | last_mapped_pfn = init_memory_mapping(start, start + size-1); |
647 | if (last_mapped_pfn > max_pfn_mapped) | |
648 | max_pfn_mapped = last_mapped_pfn; | |
45e0b78b | 649 | |
44df75e6 | 650 | ret = __add_pages(zone, start_pfn, nr_pages); |
10f22dde | 651 | WARN_ON(1); |
44df75e6 | 652 | |
44df75e6 | 653 | return ret; |
44df75e6 | 654 | } |
bc02af93 | 655 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 656 | |
8243229f | 657 | #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) |
4942e998 KM |
658 | int memory_add_physaddr_to_nid(u64 start) |
659 | { | |
660 | return 0; | |
661 | } | |
8c2676a5 | 662 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
4942e998 KM |
663 | #endif |
664 | ||
45e0b78b KM |
665 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
666 | ||
14a62c34 TG |
667 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, |
668 | kcore_modules, kcore_vsyscall; | |
1da177e4 LT |
669 | |
670 | void __init mem_init(void) | |
671 | { | |
0a43e4bf | 672 | long codesize, reservedpages, datasize, initsize; |
1da177e4 | 673 | |
0dc243ae | 674 | pci_iommu_alloc(); |
1da177e4 | 675 | |
48ddb154 | 676 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 LT |
677 | |
678 | reservedpages = 0; | |
679 | ||
680 | /* this will put all low memory onto the freelists */ | |
2b97690f | 681 | #ifdef CONFIG_NUMA |
0a43e4bf | 682 | totalram_pages = numa_free_all_bootmem(); |
1da177e4 | 683 | #else |
0a43e4bf | 684 | totalram_pages = free_all_bootmem(); |
1da177e4 | 685 | #endif |
5cb248ab MG |
686 | reservedpages = end_pfn - totalram_pages - |
687 | absent_pages_in_range(0, end_pfn); | |
1da177e4 LT |
688 | after_bootmem = 1; |
689 | ||
690 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
691 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
692 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
693 | ||
694 | /* Register memory areas for /proc/kcore */ | |
14a62c34 TG |
695 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
696 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
697 | VMALLOC_END-VMALLOC_START); |
698 | kclist_add(&kcore_kernel, &_stext, _end - _stext); | |
699 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); | |
14a62c34 | 700 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, |
1da177e4 LT |
701 | VSYSCALL_END - VSYSCALL_START); |
702 | ||
10f22dde | 703 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
14a62c34 | 704 | "%ldk reserved, %ldk data, %ldk init)\n", |
1da177e4 LT |
705 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
706 | end_pfn << (PAGE_SHIFT-10), | |
707 | codesize >> 10, | |
708 | reservedpages << (PAGE_SHIFT-10), | |
709 | datasize >> 10, | |
710 | initsize >> 10); | |
76ebd054 TG |
711 | |
712 | cpa_init(); | |
1da177e4 LT |
713 | } |
714 | ||
d167a518 | 715 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
1da177e4 | 716 | { |
bfc734b2 | 717 | unsigned long addr = begin; |
1da177e4 | 718 | |
bfc734b2 | 719 | if (addr >= end) |
d167a518 GH |
720 | return; |
721 | ||
ee01f112 IM |
722 | /* |
723 | * If debugging page accesses then do not free this memory but | |
724 | * mark them not present - any buggy init-section access will | |
725 | * create a kernel page fault: | |
726 | */ | |
727 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
728 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | |
729 | begin, PAGE_ALIGN(end)); | |
730 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | |
731 | #else | |
6fb14755 | 732 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
14a62c34 | 733 | |
bfc734b2 | 734 | for (; addr < end; addr += PAGE_SIZE) { |
e3ebadd9 LT |
735 | ClearPageReserved(virt_to_page(addr)); |
736 | init_page_count(virt_to_page(addr)); | |
737 | memset((void *)(addr & ~(PAGE_SIZE-1)), | |
738 | POISON_FREE_INITMEM, PAGE_SIZE); | |
e3ebadd9 | 739 | free_page(addr); |
1da177e4 LT |
740 | totalram_pages++; |
741 | } | |
ee01f112 | 742 | #endif |
d167a518 GH |
743 | } |
744 | ||
745 | void free_initmem(void) | |
746 | { | |
d167a518 | 747 | free_init_pages("unused kernel memory", |
e3ebadd9 LT |
748 | (unsigned long)(&__init_begin), |
749 | (unsigned long)(&__init_end)); | |
1da177e4 LT |
750 | } |
751 | ||
67df197b | 752 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
753 | const int rodata_test_data = 0xC3; |
754 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 755 | |
67df197b AV |
756 | void mark_rodata_ro(void) |
757 | { | |
4e4eee0e | 758 | unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); |
67df197b | 759 | |
6fb14755 | 760 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 761 | (end - start) >> 10); |
984bb80d AV |
762 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
763 | ||
764 | /* | |
765 | * The rodata section (but not the kernel text!) should also be | |
766 | * not-executable. | |
767 | */ | |
768 | start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; | |
769 | set_memory_nx(start, (end - start) >> PAGE_SHIFT); | |
67df197b | 770 | |
1a487252 AV |
771 | rodata_test(); |
772 | ||
0c42f392 | 773 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 774 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 775 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 776 | |
10f22dde | 777 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 778 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 779 | #endif |
67df197b | 780 | } |
4e4eee0e | 781 | |
67df197b AV |
782 | #endif |
783 | ||
1da177e4 LT |
784 | #ifdef CONFIG_BLK_DEV_INITRD |
785 | void free_initrd_mem(unsigned long start, unsigned long end) | |
786 | { | |
e3ebadd9 | 787 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
788 | } |
789 | #endif | |
790 | ||
14a62c34 TG |
791 | void __init reserve_bootmem_generic(unsigned long phys, unsigned len) |
792 | { | |
2b97690f | 793 | #ifdef CONFIG_NUMA |
1da177e4 | 794 | int nid = phys_to_nid(phys); |
5e58a02a AK |
795 | #endif |
796 | unsigned long pfn = phys >> PAGE_SHIFT; | |
14a62c34 | 797 | |
5e58a02a | 798 | if (pfn >= end_pfn) { |
14a62c34 TG |
799 | /* |
800 | * This can happen with kdump kernels when accessing | |
801 | * firmware tables: | |
802 | */ | |
67794292 | 803 | if (pfn < max_pfn_mapped) |
5e58a02a | 804 | return; |
14a62c34 | 805 | |
5e58a02a AK |
806 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", |
807 | phys, len); | |
808 | return; | |
809 | } | |
810 | ||
811 | /* Should check here against the e820 map to avoid double free */ | |
812 | #ifdef CONFIG_NUMA | |
72a7fe39 | 813 | reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT); |
14a62c34 | 814 | #else |
72a7fe39 | 815 | reserve_bootmem(phys, len, BOOTMEM_DEFAULT); |
1da177e4 | 816 | #endif |
0e0b864e | 817 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
e18c6874 | 818 | dma_reserve += len / PAGE_SIZE; |
0e0b864e MG |
819 | set_dma_reserve(dma_reserve); |
820 | } | |
1da177e4 LT |
821 | } |
822 | ||
14a62c34 TG |
823 | int kern_addr_valid(unsigned long addr) |
824 | { | |
1da177e4 | 825 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
826 | pgd_t *pgd; |
827 | pud_t *pud; | |
828 | pmd_t *pmd; | |
829 | pte_t *pte; | |
1da177e4 LT |
830 | |
831 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
832 | return 0; |
833 | ||
1da177e4 LT |
834 | pgd = pgd_offset_k(addr); |
835 | if (pgd_none(*pgd)) | |
836 | return 0; | |
837 | ||
838 | pud = pud_offset(pgd, addr); | |
839 | if (pud_none(*pud)) | |
14a62c34 | 840 | return 0; |
1da177e4 LT |
841 | |
842 | pmd = pmd_offset(pud, addr); | |
843 | if (pmd_none(*pmd)) | |
844 | return 0; | |
14a62c34 | 845 | |
1da177e4 LT |
846 | if (pmd_large(*pmd)) |
847 | return pfn_valid(pmd_pfn(*pmd)); | |
848 | ||
849 | pte = pte_offset_kernel(pmd, addr); | |
850 | if (pte_none(*pte)) | |
851 | return 0; | |
14a62c34 | 852 | |
1da177e4 LT |
853 | return pfn_valid(pte_pfn(*pte)); |
854 | } | |
855 | ||
14a62c34 TG |
856 | /* |
857 | * A pseudo VMA to allow ptrace access for the vsyscall page. This only | |
858 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does | |
859 | * not need special handling anymore: | |
860 | */ | |
1da177e4 | 861 | static struct vm_area_struct gate_vma = { |
14a62c34 TG |
862 | .vm_start = VSYSCALL_START, |
863 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), | |
864 | .vm_page_prot = PAGE_READONLY_EXEC, | |
865 | .vm_flags = VM_READ | VM_EXEC | |
1da177e4 LT |
866 | }; |
867 | ||
1da177e4 LT |
868 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
869 | { | |
870 | #ifdef CONFIG_IA32_EMULATION | |
1e014410 AK |
871 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
872 | return NULL; | |
1da177e4 LT |
873 | #endif |
874 | return &gate_vma; | |
875 | } | |
876 | ||
877 | int in_gate_area(struct task_struct *task, unsigned long addr) | |
878 | { | |
879 | struct vm_area_struct *vma = get_gate_vma(task); | |
14a62c34 | 880 | |
1e014410 AK |
881 | if (!vma) |
882 | return 0; | |
14a62c34 | 883 | |
1da177e4 LT |
884 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
885 | } | |
886 | ||
14a62c34 TG |
887 | /* |
888 | * Use this when you have no reliable task/vma, typically from interrupt | |
889 | * context. It is less reliable than using the task's vma and may give | |
890 | * false positives: | |
1da177e4 LT |
891 | */ |
892 | int in_gate_area_no_task(unsigned long addr) | |
893 | { | |
1e014410 | 894 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 895 | } |
2e1c49db | 896 | |
2aae950b AK |
897 | const char *arch_vma_name(struct vm_area_struct *vma) |
898 | { | |
899 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | |
900 | return "[vdso]"; | |
901 | if (vma == &gate_vma) | |
902 | return "[vsyscall]"; | |
903 | return NULL; | |
904 | } | |
0889eba5 CL |
905 | |
906 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
907 | /* | |
908 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
909 | */ | |
14a62c34 TG |
910 | int __meminit |
911 | vmemmap_populate(struct page *start_page, unsigned long size, int node) | |
0889eba5 CL |
912 | { |
913 | unsigned long addr = (unsigned long)start_page; | |
914 | unsigned long end = (unsigned long)(start_page + size); | |
915 | unsigned long next; | |
916 | pgd_t *pgd; | |
917 | pud_t *pud; | |
918 | pmd_t *pmd; | |
919 | ||
920 | for (; addr < end; addr = next) { | |
921 | next = pmd_addr_end(addr, end); | |
922 | ||
923 | pgd = vmemmap_pgd_populate(addr, node); | |
924 | if (!pgd) | |
925 | return -ENOMEM; | |
14a62c34 | 926 | |
0889eba5 CL |
927 | pud = vmemmap_pud_populate(pgd, addr, node); |
928 | if (!pud) | |
929 | return -ENOMEM; | |
930 | ||
931 | pmd = pmd_offset(pud, addr); | |
932 | if (pmd_none(*pmd)) { | |
933 | pte_t entry; | |
14a62c34 TG |
934 | void *p; |
935 | ||
936 | p = vmemmap_alloc_block(PMD_SIZE, node); | |
0889eba5 CL |
937 | if (!p) |
938 | return -ENOMEM; | |
939 | ||
14a62c34 TG |
940 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, |
941 | PAGE_KERNEL_LARGE); | |
0889eba5 CL |
942 | set_pmd(pmd, __pmd(pte_val(entry))); |
943 | ||
944 | printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", | |
945 | addr, addr + PMD_SIZE - 1, p, node); | |
14a62c34 | 946 | } else { |
0889eba5 | 947 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
14a62c34 | 948 | } |
0889eba5 | 949 | } |
0889eba5 CL |
950 | return 0; |
951 | } | |
952 | #endif |