Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
a2531293 | 5 | * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
11034d55 | 21 | #include <linux/initrd.h> |
1da177e4 LT |
22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | |
a9ce6bc1 | 24 | #include <linux/memblock.h> |
1da177e4 | 25 | #include <linux/proc_fs.h> |
59170891 | 26 | #include <linux/pci.h> |
6fb14755 | 27 | #include <linux/pfn.h> |
c9cf5528 | 28 | #include <linux/poison.h> |
17a941d8 | 29 | #include <linux/dma-mapping.h> |
44df75e6 | 30 | #include <linux/module.h> |
a63fdc51 | 31 | #include <linux/memory.h> |
44df75e6 | 32 | #include <linux/memory_hotplug.h> |
ae32b129 | 33 | #include <linux/nmi.h> |
5a0e3ad6 | 34 | #include <linux/gfp.h> |
2f96b8c1 | 35 | #include <linux/kcore.h> |
1da177e4 LT |
36 | |
37 | #include <asm/processor.h> | |
46eaa670 | 38 | #include <asm/bios_ebda.h> |
1da177e4 LT |
39 | #include <asm/uaccess.h> |
40 | #include <asm/pgtable.h> | |
41 | #include <asm/pgalloc.h> | |
42 | #include <asm/dma.h> | |
43 | #include <asm/fixmap.h> | |
44 | #include <asm/e820.h> | |
45 | #include <asm/apic.h> | |
46 | #include <asm/tlb.h> | |
47 | #include <asm/mmu_context.h> | |
48 | #include <asm/proto.h> | |
49 | #include <asm/smp.h> | |
2bc0414e | 50 | #include <asm/sections.h> |
718fc13b | 51 | #include <asm/kdebug.h> |
aaa64e04 | 52 | #include <asm/numa.h> |
7bfeab9a | 53 | #include <asm/cacheflush.h> |
4fcb2083 | 54 | #include <asm/init.h> |
e5f15b45 | 55 | #include <asm/setup.h> |
1da177e4 | 56 | |
5c51bdbe YL |
57 | #include "mm_internal.h" |
58 | ||
aece2785 YL |
59 | static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, |
60 | unsigned long addr, unsigned long end) | |
61 | { | |
62 | addr &= PMD_MASK; | |
63 | for (; addr < end; addr += PMD_SIZE) { | |
64 | pmd_t *pmd = pmd_page + pmd_index(addr); | |
65 | ||
66 | if (!pmd_present(*pmd)) | |
67 | set_pmd(pmd, __pmd(addr | pmd_flag)); | |
68 | } | |
69 | } | |
70 | static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, | |
71 | unsigned long addr, unsigned long end) | |
72 | { | |
73 | unsigned long next; | |
74 | ||
75 | for (; addr < end; addr = next) { | |
76 | pud_t *pud = pud_page + pud_index(addr); | |
77 | pmd_t *pmd; | |
78 | ||
79 | next = (addr & PUD_MASK) + PUD_SIZE; | |
80 | if (next > end) | |
81 | next = end; | |
82 | ||
83 | if (pud_present(*pud)) { | |
84 | pmd = pmd_offset(pud, 0); | |
85 | ident_pmd_init(info->pmd_flag, pmd, addr, next); | |
86 | continue; | |
87 | } | |
88 | pmd = (pmd_t *)info->alloc_pgt_page(info->context); | |
89 | if (!pmd) | |
90 | return -ENOMEM; | |
91 | ident_pmd_init(info->pmd_flag, pmd, addr, next); | |
92 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | |
93 | } | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, | |
99 | unsigned long addr, unsigned long end) | |
100 | { | |
101 | unsigned long next; | |
102 | int result; | |
103 | int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0; | |
104 | ||
105 | for (; addr < end; addr = next) { | |
106 | pgd_t *pgd = pgd_page + pgd_index(addr) + off; | |
107 | pud_t *pud; | |
108 | ||
109 | next = (addr & PGDIR_MASK) + PGDIR_SIZE; | |
110 | if (next > end) | |
111 | next = end; | |
112 | ||
113 | if (pgd_present(*pgd)) { | |
114 | pud = pud_offset(pgd, 0); | |
115 | result = ident_pud_init(info, pud, addr, next); | |
116 | if (result) | |
117 | return result; | |
118 | continue; | |
119 | } | |
120 | ||
121 | pud = (pud_t *)info->alloc_pgt_page(info->context); | |
122 | if (!pud) | |
123 | return -ENOMEM; | |
124 | result = ident_pud_init(info, pud, addr, next); | |
125 | if (result) | |
126 | return result; | |
127 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); | |
128 | } | |
129 | ||
130 | return 0; | |
131 | } | |
132 | ||
00d1c5e0 IM |
133 | static int __init parse_direct_gbpages_off(char *arg) |
134 | { | |
135 | direct_gbpages = 0; | |
136 | return 0; | |
137 | } | |
138 | early_param("nogbpages", parse_direct_gbpages_off); | |
139 | ||
140 | static int __init parse_direct_gbpages_on(char *arg) | |
141 | { | |
142 | direct_gbpages = 1; | |
143 | return 0; | |
144 | } | |
145 | early_param("gbpages", parse_direct_gbpages_on); | |
146 | ||
1da177e4 LT |
147 | /* |
148 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
149 | * physical space so we can cache the place of the first one and move | |
150 | * around without checking the pgd every time. | |
151 | */ | |
152 | ||
f955371c | 153 | pteval_t __supported_pte_mask __read_mostly = ~0; |
bd220a24 YL |
154 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
155 | ||
bd220a24 YL |
156 | int force_personality32; |
157 | ||
deed05b7 IM |
158 | /* |
159 | * noexec32=on|off | |
160 | * Control non executable heap for 32bit processes. | |
161 | * To control the stack too use noexec=off | |
162 | * | |
163 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | |
164 | * off PROT_READ implies PROT_EXEC | |
165 | */ | |
bd220a24 YL |
166 | static int __init nonx32_setup(char *str) |
167 | { | |
168 | if (!strcmp(str, "on")) | |
169 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
170 | else if (!strcmp(str, "off")) | |
171 | force_personality32 |= READ_IMPLIES_EXEC; | |
172 | return 1; | |
173 | } | |
174 | __setup("noexec32=", nonx32_setup); | |
175 | ||
6afb5157 HL |
176 | /* |
177 | * When memory was added/removed make sure all the processes MM have | |
178 | * suitable PGD entries in the local PGD level page. | |
179 | */ | |
9661d5bc | 180 | void sync_global_pgds(unsigned long start, unsigned long end, int removed) |
6afb5157 | 181 | { |
44235dcd JF |
182 | unsigned long address; |
183 | ||
184 | for (address = start; address <= end; address += PGDIR_SIZE) { | |
185 | const pgd_t *pgd_ref = pgd_offset_k(address); | |
44235dcd JF |
186 | struct page *page; |
187 | ||
9661d5bc YI |
188 | /* |
189 | * When it is called after memory hot remove, pgd_none() | |
190 | * returns true. In this case (removed == 1), we must clear | |
191 | * the PGD entries in the local PGD level page. | |
192 | */ | |
193 | if (pgd_none(*pgd_ref) && !removed) | |
44235dcd JF |
194 | continue; |
195 | ||
a79e53d8 | 196 | spin_lock(&pgd_lock); |
44235dcd | 197 | list_for_each_entry(page, &pgd_list, lru) { |
be354f40 | 198 | pgd_t *pgd; |
617d34d9 JF |
199 | spinlock_t *pgt_lock; |
200 | ||
44235dcd | 201 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
a79e53d8 | 202 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
203 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
204 | spin_lock(pgt_lock); | |
205 | ||
9661d5bc | 206 | if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) |
44235dcd JF |
207 | BUG_ON(pgd_page_vaddr(*pgd) |
208 | != pgd_page_vaddr(*pgd_ref)); | |
617d34d9 | 209 | |
9661d5bc YI |
210 | if (removed) { |
211 | if (pgd_none(*pgd_ref) && !pgd_none(*pgd)) | |
212 | pgd_clear(pgd); | |
213 | } else { | |
214 | if (pgd_none(*pgd)) | |
215 | set_pgd(pgd, *pgd_ref); | |
216 | } | |
217 | ||
617d34d9 | 218 | spin_unlock(pgt_lock); |
44235dcd | 219 | } |
a79e53d8 | 220 | spin_unlock(&pgd_lock); |
44235dcd | 221 | } |
6afb5157 HL |
222 | } |
223 | ||
8d6ea967 MS |
224 | /* |
225 | * NOTE: This function is marked __ref because it calls __init function | |
226 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
227 | */ | |
228 | static __ref void *spp_getpage(void) | |
14a62c34 | 229 | { |
1da177e4 | 230 | void *ptr; |
14a62c34 | 231 | |
1da177e4 | 232 | if (after_bootmem) |
9e730237 | 233 | ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); |
1da177e4 LT |
234 | else |
235 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
236 | |
237 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
238 | panic("set_pte_phys: cannot allocate page data %s\n", | |
239 | after_bootmem ? "after bootmem" : ""); | |
240 | } | |
1da177e4 | 241 | |
10f22dde | 242 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 243 | |
1da177e4 | 244 | return ptr; |
14a62c34 | 245 | } |
1da177e4 | 246 | |
f254f390 | 247 | static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) |
1da177e4 | 248 | { |
458a3e64 TH |
249 | if (pgd_none(*pgd)) { |
250 | pud_t *pud = (pud_t *)spp_getpage(); | |
251 | pgd_populate(&init_mm, pgd, pud); | |
252 | if (pud != pud_offset(pgd, 0)) | |
253 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", | |
254 | pud, pud_offset(pgd, 0)); | |
255 | } | |
256 | return pud_offset(pgd, vaddr); | |
257 | } | |
1da177e4 | 258 | |
f254f390 | 259 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
458a3e64 | 260 | { |
1da177e4 | 261 | if (pud_none(*pud)) { |
458a3e64 | 262 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 263 | pud_populate(&init_mm, pud, pmd); |
458a3e64 | 264 | if (pmd != pmd_offset(pud, 0)) |
10f22dde | 265 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
458a3e64 | 266 | pmd, pmd_offset(pud, 0)); |
1da177e4 | 267 | } |
458a3e64 TH |
268 | return pmd_offset(pud, vaddr); |
269 | } | |
270 | ||
f254f390 | 271 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) |
458a3e64 | 272 | { |
1da177e4 | 273 | if (pmd_none(*pmd)) { |
458a3e64 | 274 | pte_t *pte = (pte_t *) spp_getpage(); |
bb23e403 | 275 | pmd_populate_kernel(&init_mm, pmd, pte); |
458a3e64 | 276 | if (pte != pte_offset_kernel(pmd, 0)) |
10f22dde | 277 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 | 278 | } |
458a3e64 TH |
279 | return pte_offset_kernel(pmd, vaddr); |
280 | } | |
281 | ||
282 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | |
283 | { | |
284 | pud_t *pud; | |
285 | pmd_t *pmd; | |
286 | pte_t *pte; | |
287 | ||
288 | pud = pud_page + pud_index(vaddr); | |
289 | pmd = fill_pmd(pud, vaddr); | |
290 | pte = fill_pte(pmd, vaddr); | |
1da177e4 | 291 | |
1da177e4 LT |
292 | set_pte(pte, new_pte); |
293 | ||
294 | /* | |
295 | * It's enough to flush this one mapping. | |
296 | * (PGE mappings get flushed as well) | |
297 | */ | |
298 | __flush_tlb_one(vaddr); | |
299 | } | |
300 | ||
458a3e64 | 301 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
0814e0ba EH |
302 | { |
303 | pgd_t *pgd; | |
304 | pud_t *pud_page; | |
305 | ||
306 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
307 | ||
308 | pgd = pgd_offset_k(vaddr); | |
309 | if (pgd_none(*pgd)) { | |
310 | printk(KERN_ERR | |
311 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
312 | return; | |
313 | } | |
314 | pud_page = (pud_t*)pgd_page_vaddr(*pgd); | |
315 | set_pte_vaddr_pud(pud_page, vaddr, pteval); | |
316 | } | |
317 | ||
458a3e64 | 318 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
319 | { |
320 | pgd_t *pgd; | |
321 | pud_t *pud; | |
322 | ||
323 | pgd = pgd_offset_k(vaddr); | |
458a3e64 TH |
324 | pud = fill_pud(pgd, vaddr); |
325 | return fill_pmd(pud, vaddr); | |
326 | } | |
327 | ||
328 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
329 | { | |
330 | pmd_t *pmd; | |
11124411 | 331 | |
458a3e64 TH |
332 | pmd = populate_extra_pmd(vaddr); |
333 | return fill_pte(pmd, vaddr); | |
11124411 TH |
334 | } |
335 | ||
3a9e189d JS |
336 | /* |
337 | * Create large page table mappings for a range of physical addresses. | |
338 | */ | |
339 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
2df58b6d | 340 | enum page_cache_mode cache) |
3a9e189d JS |
341 | { |
342 | pgd_t *pgd; | |
343 | pud_t *pud; | |
344 | pmd_t *pmd; | |
2df58b6d | 345 | pgprot_t prot; |
3a9e189d | 346 | |
2df58b6d JG |
347 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | |
348 | pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); | |
3a9e189d JS |
349 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); |
350 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
351 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
352 | if (pgd_none(*pgd)) { | |
353 | pud = (pud_t *) spp_getpage(); | |
354 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | | |
355 | _PAGE_USER)); | |
356 | } | |
357 | pud = pud_offset(pgd, (unsigned long)__va(phys)); | |
358 | if (pud_none(*pud)) { | |
359 | pmd = (pmd_t *) spp_getpage(); | |
360 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
361 | _PAGE_USER)); | |
362 | } | |
363 | pmd = pmd_offset(pud, phys); | |
364 | BUG_ON(!pmd_none(*pmd)); | |
365 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
366 | } | |
367 | } | |
368 | ||
369 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
370 | { | |
2df58b6d | 371 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); |
3a9e189d JS |
372 | } |
373 | ||
374 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
375 | { | |
2df58b6d | 376 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); |
3a9e189d JS |
377 | } |
378 | ||
31eedd82 | 379 | /* |
88f3aec7 IM |
380 | * The head.S code sets up the kernel high mapping: |
381 | * | |
382 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 | 383 | * |
1e3b3081 | 384 | * phys_base holds the negative offset to the kernel, which is added |
31eedd82 TG |
385 | * to the compile time generated pmds. This results in invalid pmds up |
386 | * to the point where we hit the physaddr 0 mapping. | |
387 | * | |
e5f15b45 YL |
388 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
389 | * is rounded up to the 2MB boundary. This catches the invalid pmds as | |
31eedd82 TG |
390 | * well, as they are located before _text: |
391 | */ | |
392 | void __init cleanup_highmap(void) | |
393 | { | |
394 | unsigned long vaddr = __START_KERNEL_map; | |
10054230 | 395 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; |
e5f15b45 | 396 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; |
31eedd82 | 397 | pmd_t *pmd = level2_kernel_pgt; |
31eedd82 | 398 | |
10054230 YL |
399 | /* |
400 | * Native path, max_pfn_mapped is not set yet. | |
401 | * Xen has valid max_pfn_mapped set in | |
402 | * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). | |
403 | */ | |
404 | if (max_pfn_mapped) | |
405 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); | |
406 | ||
e5f15b45 | 407 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
2884f110 | 408 | if (pmd_none(*pmd)) |
31eedd82 TG |
409 | continue; |
410 | if (vaddr < (unsigned long) _text || vaddr > end) | |
411 | set_pmd(pmd, __pmd(0)); | |
412 | } | |
413 | } | |
414 | ||
7b16eb89 | 415 | static unsigned long __meminit |
b27a43c1 SS |
416 | phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, |
417 | pgprot_t prot) | |
4f9c11dd | 418 | { |
eceb3632 | 419 | unsigned long pages = 0, next; |
7b16eb89 | 420 | unsigned long last_map_addr = end; |
4f9c11dd | 421 | int i; |
7b16eb89 | 422 | |
4f9c11dd JF |
423 | pte_t *pte = pte_page + pte_index(addr); |
424 | ||
eceb3632 YL |
425 | for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) { |
426 | next = (addr & PAGE_MASK) + PAGE_SIZE; | |
4f9c11dd | 427 | if (addr >= end) { |
eceb3632 YL |
428 | if (!after_bootmem && |
429 | !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) && | |
430 | !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN)) | |
431 | set_pte(pte, __pte(0)); | |
432 | continue; | |
4f9c11dd JF |
433 | } |
434 | ||
b27a43c1 SS |
435 | /* |
436 | * We will re-use the existing mapping. | |
437 | * Xen for example has some special requirements, like mapping | |
438 | * pagetable pages as RO. So assume someone who pre-setup | |
439 | * these mappings are more intelligent. | |
440 | */ | |
3afa3949 | 441 | if (pte_val(*pte)) { |
876ee61a JB |
442 | if (!after_bootmem) |
443 | pages++; | |
4f9c11dd | 444 | continue; |
3afa3949 | 445 | } |
4f9c11dd JF |
446 | |
447 | if (0) | |
448 | printk(" pte=%p addr=%lx pte=%016lx\n", | |
449 | pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
4f9c11dd | 450 | pages++; |
b27a43c1 | 451 | set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); |
7b16eb89 | 452 | last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; |
4f9c11dd | 453 | } |
a2699e47 | 454 | |
4f9c11dd | 455 | update_page_count(PG_LEVEL_4K, pages); |
7b16eb89 YL |
456 | |
457 | return last_map_addr; | |
4f9c11dd JF |
458 | } |
459 | ||
cc615032 | 460 | static unsigned long __meminit |
b50efd2a | 461 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, |
b27a43c1 | 462 | unsigned long page_size_mask, pgprot_t prot) |
44df75e6 | 463 | { |
20167d34 | 464 | unsigned long pages = 0, next; |
7b16eb89 | 465 | unsigned long last_map_addr = end; |
ce0c0e50 | 466 | |
6ad91658 | 467 | int i = pmd_index(address); |
44df75e6 | 468 | |
20167d34 | 469 | for (; i < PTRS_PER_PMD; i++, address = next) { |
6ad91658 | 470 | pmd_t *pmd = pmd_page + pmd_index(address); |
4f9c11dd | 471 | pte_t *pte; |
b27a43c1 | 472 | pgprot_t new_prot = prot; |
44df75e6 | 473 | |
eceb3632 | 474 | next = (address & PMD_MASK) + PMD_SIZE; |
5f51e139 | 475 | if (address >= end) { |
eceb3632 YL |
476 | if (!after_bootmem && |
477 | !e820_any_mapped(address & PMD_MASK, next, E820_RAM) && | |
478 | !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN)) | |
479 | set_pmd(pmd, __pmd(0)); | |
480 | continue; | |
44df75e6 | 481 | } |
6ad91658 | 482 | |
4f9c11dd | 483 | if (pmd_val(*pmd)) { |
8ae3a5a8 JB |
484 | if (!pmd_large(*pmd)) { |
485 | spin_lock(&init_mm.page_table_lock); | |
973dc4f3 | 486 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
4b239f45 | 487 | last_map_addr = phys_pte_init(pte, address, |
b27a43c1 | 488 | end, prot); |
8ae3a5a8 | 489 | spin_unlock(&init_mm.page_table_lock); |
a2699e47 | 490 | continue; |
8ae3a5a8 | 491 | } |
b27a43c1 SS |
492 | /* |
493 | * If we are ok with PG_LEVEL_2M mapping, then we will | |
494 | * use the existing mapping, | |
495 | * | |
496 | * Otherwise, we will split the large page mapping but | |
497 | * use the same existing protection bits except for | |
498 | * large page, so that we don't violate Intel's TLB | |
499 | * Application note (317080) which says, while changing | |
500 | * the page sizes, new and old translations should | |
501 | * not differ with respect to page frame and | |
502 | * attributes. | |
503 | */ | |
3afa3949 | 504 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
876ee61a JB |
505 | if (!after_bootmem) |
506 | pages++; | |
20167d34 | 507 | last_map_addr = next; |
b27a43c1 | 508 | continue; |
3afa3949 | 509 | } |
b27a43c1 | 510 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
4f9c11dd JF |
511 | } |
512 | ||
b50efd2a | 513 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 514 | pages++; |
8ae3a5a8 | 515 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 516 | set_pte((pte_t *)pmd, |
960ddb4f | 517 | pfn_pte((address & PMD_MASK) >> PAGE_SHIFT, |
b27a43c1 | 518 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
8ae3a5a8 | 519 | spin_unlock(&init_mm.page_table_lock); |
20167d34 | 520 | last_map_addr = next; |
6ad91658 | 521 | continue; |
4f9c11dd | 522 | } |
6ad91658 | 523 | |
868bf4d6 | 524 | pte = alloc_low_page(); |
b27a43c1 | 525 | last_map_addr = phys_pte_init(pte, address, end, new_prot); |
4f9c11dd | 526 | |
8ae3a5a8 | 527 | spin_lock(&init_mm.page_table_lock); |
868bf4d6 | 528 | pmd_populate_kernel(&init_mm, pmd, pte); |
8ae3a5a8 | 529 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 530 | } |
ce0c0e50 | 531 | update_page_count(PG_LEVEL_2M, pages); |
7b16eb89 | 532 | return last_map_addr; |
44df75e6 MT |
533 | } |
534 | ||
cc615032 | 535 | static unsigned long __meminit |
b50efd2a YL |
536 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, |
537 | unsigned long page_size_mask) | |
14a62c34 | 538 | { |
20167d34 | 539 | unsigned long pages = 0, next; |
cc615032 | 540 | unsigned long last_map_addr = end; |
6ad91658 | 541 | int i = pud_index(addr); |
44df75e6 | 542 | |
20167d34 | 543 | for (; i < PTRS_PER_PUD; i++, addr = next) { |
6ad91658 | 544 | pud_t *pud = pud_page + pud_index(addr); |
1da177e4 | 545 | pmd_t *pmd; |
b27a43c1 | 546 | pgprot_t prot = PAGE_KERNEL; |
1da177e4 | 547 | |
20167d34 | 548 | next = (addr & PUD_MASK) + PUD_SIZE; |
eceb3632 YL |
549 | if (addr >= end) { |
550 | if (!after_bootmem && | |
551 | !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) && | |
552 | !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN)) | |
553 | set_pud(pud, __pud(0)); | |
1da177e4 | 554 | continue; |
14a62c34 | 555 | } |
1da177e4 | 556 | |
6ad91658 | 557 | if (pud_val(*pud)) { |
a2699e47 | 558 | if (!pud_large(*pud)) { |
973dc4f3 | 559 | pmd = pmd_offset(pud, 0); |
4b239f45 | 560 | last_map_addr = phys_pmd_init(pmd, addr, end, |
b27a43c1 | 561 | page_size_mask, prot); |
4b239f45 | 562 | __flush_tlb_all(); |
a2699e47 SS |
563 | continue; |
564 | } | |
b27a43c1 SS |
565 | /* |
566 | * If we are ok with PG_LEVEL_1G mapping, then we will | |
567 | * use the existing mapping. | |
568 | * | |
569 | * Otherwise, we will split the gbpage mapping but use | |
570 | * the same existing protection bits except for large | |
571 | * page, so that we don't violate Intel's TLB | |
572 | * Application note (317080) which says, while changing | |
573 | * the page sizes, new and old translations should | |
574 | * not differ with respect to page frame and | |
575 | * attributes. | |
576 | */ | |
3afa3949 | 577 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
876ee61a JB |
578 | if (!after_bootmem) |
579 | pages++; | |
20167d34 | 580 | last_map_addr = next; |
b27a43c1 | 581 | continue; |
3afa3949 | 582 | } |
b27a43c1 | 583 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
ef925766 AK |
584 | } |
585 | ||
b50efd2a | 586 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 587 | pages++; |
8ae3a5a8 | 588 | spin_lock(&init_mm.page_table_lock); |
ef925766 | 589 | set_pte((pte_t *)pud, |
960ddb4f YL |
590 | pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT, |
591 | PAGE_KERNEL_LARGE)); | |
8ae3a5a8 | 592 | spin_unlock(&init_mm.page_table_lock); |
20167d34 | 593 | last_map_addr = next; |
6ad91658 KM |
594 | continue; |
595 | } | |
596 | ||
868bf4d6 | 597 | pmd = alloc_low_page(); |
b27a43c1 SS |
598 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, |
599 | prot); | |
8ae3a5a8 JB |
600 | |
601 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 602 | pud_populate(&init_mm, pud, pmd); |
44df75e6 | 603 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 604 | } |
1a2b4412 | 605 | __flush_tlb_all(); |
a2699e47 | 606 | |
ce0c0e50 | 607 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 608 | |
1a0db38e | 609 | return last_map_addr; |
14a62c34 | 610 | } |
1da177e4 | 611 | |
41d840e2 | 612 | unsigned long __meminit |
f765090a PE |
613 | kernel_physical_mapping_init(unsigned long start, |
614 | unsigned long end, | |
615 | unsigned long page_size_mask) | |
14a62c34 | 616 | { |
9b861528 | 617 | bool pgd_changed = false; |
b50efd2a | 618 | unsigned long next, last_map_addr = end; |
9b861528 | 619 | unsigned long addr; |
1da177e4 LT |
620 | |
621 | start = (unsigned long)__va(start); | |
622 | end = (unsigned long)__va(end); | |
1c5f50ee | 623 | addr = start; |
1da177e4 LT |
624 | |
625 | for (; start < end; start = next) { | |
44df75e6 MT |
626 | pgd_t *pgd = pgd_offset_k(start); |
627 | pud_t *pud; | |
628 | ||
c2bdee59 | 629 | next = (start & PGDIR_MASK) + PGDIR_SIZE; |
4f9c11dd JF |
630 | |
631 | if (pgd_val(*pgd)) { | |
973dc4f3 | 632 | pud = (pud_t *)pgd_page_vaddr(*pgd); |
4b239f45 | 633 | last_map_addr = phys_pud_init(pud, __pa(start), |
b50efd2a | 634 | __pa(end), page_size_mask); |
4f9c11dd JF |
635 | continue; |
636 | } | |
637 | ||
868bf4d6 | 638 | pud = alloc_low_page(); |
c2bdee59 | 639 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), |
b50efd2a | 640 | page_size_mask); |
8ae3a5a8 JB |
641 | |
642 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 643 | pgd_populate(&init_mm, pgd, pud); |
8ae3a5a8 | 644 | spin_unlock(&init_mm.page_table_lock); |
9b861528 | 645 | pgd_changed = true; |
14a62c34 | 646 | } |
9b861528 HL |
647 | |
648 | if (pgd_changed) | |
9661d5bc | 649 | sync_global_pgds(addr, end - 1, 0); |
9b861528 | 650 | |
a2699e47 | 651 | __flush_tlb_all(); |
1da177e4 | 652 | |
b50efd2a YL |
653 | return last_map_addr; |
654 | } | |
7b16eb89 | 655 | |
2b97690f | 656 | #ifndef CONFIG_NUMA |
d8fc3afc | 657 | void __init initmem_init(void) |
1f75d7e3 | 658 | { |
e7e8de59 | 659 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
1f75d7e3 | 660 | } |
3551f88f | 661 | #endif |
1f75d7e3 | 662 | |
1da177e4 LT |
663 | void __init paging_init(void) |
664 | { | |
3551f88f | 665 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
44df75e6 | 666 | sparse_init(); |
44b57280 YL |
667 | |
668 | /* | |
669 | * clear the default setting with node 0 | |
670 | * note: don't use nodes_clear here, that is really clearing when | |
671 | * numa support is not compiled in, and later node_set_state | |
672 | * will not set it back. | |
673 | */ | |
4b0ef1fe LJ |
674 | node_clear_state(0, N_MEMORY); |
675 | if (N_MEMORY != N_NORMAL_MEMORY) | |
676 | node_clear_state(0, N_NORMAL_MEMORY); | |
44b57280 | 677 | |
4c0b2e5f | 678 | zone_sizes_init(); |
1da177e4 | 679 | } |
1da177e4 | 680 | |
44df75e6 MT |
681 | /* |
682 | * Memory hotplug specific functions | |
44df75e6 | 683 | */ |
bc02af93 | 684 | #ifdef CONFIG_MEMORY_HOTPLUG |
ea085417 SZ |
685 | /* |
686 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | |
687 | * updating. | |
688 | */ | |
689 | static void update_end_of_memory_vars(u64 start, u64 size) | |
690 | { | |
691 | unsigned long end_pfn = PFN_UP(start + size); | |
692 | ||
693 | if (end_pfn > max_pfn) { | |
694 | max_pfn = end_pfn; | |
695 | max_low_pfn = end_pfn; | |
696 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | |
697 | } | |
698 | } | |
699 | ||
9d99aaa3 AK |
700 | /* |
701 | * Memory is added always to NORMAL zone. This means you will never get | |
702 | * additional DMA/DMA32 memory. | |
703 | */ | |
bc02af93 | 704 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 705 | { |
bc02af93 | 706 | struct pglist_data *pgdat = NODE_DATA(nid); |
9bfc4113 WN |
707 | struct zone *zone = pgdat->node_zones + |
708 | zone_for_memory(nid, start, size, ZONE_NORMAL); | |
66520ebc | 709 | unsigned long start_pfn = start >> PAGE_SHIFT; |
44df75e6 MT |
710 | unsigned long nr_pages = size >> PAGE_SHIFT; |
711 | int ret; | |
712 | ||
66520ebc | 713 | init_memory_mapping(start, start + size); |
45e0b78b | 714 | |
c04fc586 | 715 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
fe8b868e | 716 | WARN_ON_ONCE(ret); |
44df75e6 | 717 | |
ea085417 SZ |
718 | /* update max_pfn, max_low_pfn and high_memory */ |
719 | update_end_of_memory_vars(start, size); | |
720 | ||
44df75e6 | 721 | return ret; |
44df75e6 | 722 | } |
bc02af93 | 723 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 724 | |
ae9aae9e WC |
725 | #define PAGE_INUSE 0xFD |
726 | ||
727 | static void __meminit free_pagetable(struct page *page, int order) | |
728 | { | |
ae9aae9e WC |
729 | unsigned long magic; |
730 | unsigned int nr_pages = 1 << order; | |
731 | ||
732 | /* bootmem page has reserved flag */ | |
733 | if (PageReserved(page)) { | |
734 | __ClearPageReserved(page); | |
ae9aae9e WC |
735 | |
736 | magic = (unsigned long)page->lru.next; | |
737 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { | |
738 | while (nr_pages--) | |
739 | put_page_bootmem(page++); | |
740 | } else | |
170a5a7e JL |
741 | while (nr_pages--) |
742 | free_reserved_page(page++); | |
ae9aae9e WC |
743 | } else |
744 | free_pages((unsigned long)page_address(page), order); | |
ae9aae9e WC |
745 | } |
746 | ||
747 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | |
748 | { | |
749 | pte_t *pte; | |
750 | int i; | |
751 | ||
752 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
753 | pte = pte_start + i; | |
754 | if (pte_val(*pte)) | |
755 | return; | |
756 | } | |
757 | ||
758 | /* free a pte talbe */ | |
759 | free_pagetable(pmd_page(*pmd), 0); | |
760 | spin_lock(&init_mm.page_table_lock); | |
761 | pmd_clear(pmd); | |
762 | spin_unlock(&init_mm.page_table_lock); | |
763 | } | |
764 | ||
765 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) | |
766 | { | |
767 | pmd_t *pmd; | |
768 | int i; | |
769 | ||
770 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
771 | pmd = pmd_start + i; | |
772 | if (pmd_val(*pmd)) | |
773 | return; | |
774 | } | |
775 | ||
776 | /* free a pmd talbe */ | |
777 | free_pagetable(pud_page(*pud), 0); | |
778 | spin_lock(&init_mm.page_table_lock); | |
779 | pud_clear(pud); | |
780 | spin_unlock(&init_mm.page_table_lock); | |
781 | } | |
782 | ||
783 | /* Return true if pgd is changed, otherwise return false. */ | |
784 | static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd) | |
785 | { | |
786 | pud_t *pud; | |
787 | int i; | |
788 | ||
789 | for (i = 0; i < PTRS_PER_PUD; i++) { | |
790 | pud = pud_start + i; | |
791 | if (pud_val(*pud)) | |
792 | return false; | |
793 | } | |
794 | ||
795 | /* free a pud table */ | |
796 | free_pagetable(pgd_page(*pgd), 0); | |
797 | spin_lock(&init_mm.page_table_lock); | |
798 | pgd_clear(pgd); | |
799 | spin_unlock(&init_mm.page_table_lock); | |
800 | ||
801 | return true; | |
802 | } | |
803 | ||
804 | static void __meminit | |
805 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |
806 | bool direct) | |
807 | { | |
808 | unsigned long next, pages = 0; | |
809 | pte_t *pte; | |
810 | void *page_addr; | |
811 | phys_addr_t phys_addr; | |
812 | ||
813 | pte = pte_start + pte_index(addr); | |
814 | for (; addr < end; addr = next, pte++) { | |
815 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
816 | if (next > end) | |
817 | next = end; | |
818 | ||
819 | if (!pte_present(*pte)) | |
820 | continue; | |
821 | ||
822 | /* | |
823 | * We mapped [0,1G) memory as identity mapping when | |
824 | * initializing, in arch/x86/kernel/head_64.S. These | |
825 | * pagetables cannot be removed. | |
826 | */ | |
827 | phys_addr = pte_val(*pte) + (addr & PAGE_MASK); | |
828 | if (phys_addr < (phys_addr_t)0x40000000) | |
829 | return; | |
830 | ||
831 | if (IS_ALIGNED(addr, PAGE_SIZE) && | |
832 | IS_ALIGNED(next, PAGE_SIZE)) { | |
833 | /* | |
834 | * Do not free direct mapping pages since they were | |
835 | * freed when offlining, or simplely not in use. | |
836 | */ | |
837 | if (!direct) | |
838 | free_pagetable(pte_page(*pte), 0); | |
839 | ||
840 | spin_lock(&init_mm.page_table_lock); | |
841 | pte_clear(&init_mm, addr, pte); | |
842 | spin_unlock(&init_mm.page_table_lock); | |
843 | ||
844 | /* For non-direct mapping, pages means nothing. */ | |
845 | pages++; | |
846 | } else { | |
847 | /* | |
848 | * If we are here, we are freeing vmemmap pages since | |
849 | * direct mapped memory ranges to be freed are aligned. | |
850 | * | |
851 | * If we are not removing the whole page, it means | |
852 | * other page structs in this page are being used and | |
853 | * we canot remove them. So fill the unused page_structs | |
854 | * with 0xFD, and remove the page when it is wholly | |
855 | * filled with 0xFD. | |
856 | */ | |
857 | memset((void *)addr, PAGE_INUSE, next - addr); | |
858 | ||
859 | page_addr = page_address(pte_page(*pte)); | |
860 | if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { | |
861 | free_pagetable(pte_page(*pte), 0); | |
862 | ||
863 | spin_lock(&init_mm.page_table_lock); | |
864 | pte_clear(&init_mm, addr, pte); | |
865 | spin_unlock(&init_mm.page_table_lock); | |
866 | } | |
867 | } | |
868 | } | |
869 | ||
870 | /* Call free_pte_table() in remove_pmd_table(). */ | |
871 | flush_tlb_all(); | |
872 | if (direct) | |
873 | update_page_count(PG_LEVEL_4K, -pages); | |
874 | } | |
875 | ||
876 | static void __meminit | |
877 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |
878 | bool direct) | |
879 | { | |
880 | unsigned long next, pages = 0; | |
881 | pte_t *pte_base; | |
882 | pmd_t *pmd; | |
883 | void *page_addr; | |
884 | ||
885 | pmd = pmd_start + pmd_index(addr); | |
886 | for (; addr < end; addr = next, pmd++) { | |
887 | next = pmd_addr_end(addr, end); | |
888 | ||
889 | if (!pmd_present(*pmd)) | |
890 | continue; | |
891 | ||
892 | if (pmd_large(*pmd)) { | |
893 | if (IS_ALIGNED(addr, PMD_SIZE) && | |
894 | IS_ALIGNED(next, PMD_SIZE)) { | |
895 | if (!direct) | |
896 | free_pagetable(pmd_page(*pmd), | |
897 | get_order(PMD_SIZE)); | |
898 | ||
899 | spin_lock(&init_mm.page_table_lock); | |
900 | pmd_clear(pmd); | |
901 | spin_unlock(&init_mm.page_table_lock); | |
902 | pages++; | |
903 | } else { | |
904 | /* If here, we are freeing vmemmap pages. */ | |
905 | memset((void *)addr, PAGE_INUSE, next - addr); | |
906 | ||
907 | page_addr = page_address(pmd_page(*pmd)); | |
908 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
909 | PMD_SIZE)) { | |
910 | free_pagetable(pmd_page(*pmd), | |
911 | get_order(PMD_SIZE)); | |
912 | ||
913 | spin_lock(&init_mm.page_table_lock); | |
914 | pmd_clear(pmd); | |
915 | spin_unlock(&init_mm.page_table_lock); | |
916 | } | |
917 | } | |
918 | ||
919 | continue; | |
920 | } | |
921 | ||
922 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | |
923 | remove_pte_table(pte_base, addr, next, direct); | |
924 | free_pte_table(pte_base, pmd); | |
925 | } | |
926 | ||
927 | /* Call free_pmd_table() in remove_pud_table(). */ | |
928 | if (direct) | |
929 | update_page_count(PG_LEVEL_2M, -pages); | |
930 | } | |
931 | ||
932 | static void __meminit | |
933 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |
934 | bool direct) | |
935 | { | |
936 | unsigned long next, pages = 0; | |
937 | pmd_t *pmd_base; | |
938 | pud_t *pud; | |
939 | void *page_addr; | |
940 | ||
941 | pud = pud_start + pud_index(addr); | |
942 | for (; addr < end; addr = next, pud++) { | |
943 | next = pud_addr_end(addr, end); | |
944 | ||
945 | if (!pud_present(*pud)) | |
946 | continue; | |
947 | ||
948 | if (pud_large(*pud)) { | |
949 | if (IS_ALIGNED(addr, PUD_SIZE) && | |
950 | IS_ALIGNED(next, PUD_SIZE)) { | |
951 | if (!direct) | |
952 | free_pagetable(pud_page(*pud), | |
953 | get_order(PUD_SIZE)); | |
954 | ||
955 | spin_lock(&init_mm.page_table_lock); | |
956 | pud_clear(pud); | |
957 | spin_unlock(&init_mm.page_table_lock); | |
958 | pages++; | |
959 | } else { | |
960 | /* If here, we are freeing vmemmap pages. */ | |
961 | memset((void *)addr, PAGE_INUSE, next - addr); | |
962 | ||
963 | page_addr = page_address(pud_page(*pud)); | |
964 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
965 | PUD_SIZE)) { | |
966 | free_pagetable(pud_page(*pud), | |
967 | get_order(PUD_SIZE)); | |
968 | ||
969 | spin_lock(&init_mm.page_table_lock); | |
970 | pud_clear(pud); | |
971 | spin_unlock(&init_mm.page_table_lock); | |
972 | } | |
973 | } | |
974 | ||
975 | continue; | |
976 | } | |
977 | ||
978 | pmd_base = (pmd_t *)pud_page_vaddr(*pud); | |
979 | remove_pmd_table(pmd_base, addr, next, direct); | |
980 | free_pmd_table(pmd_base, pud); | |
981 | } | |
982 | ||
983 | if (direct) | |
984 | update_page_count(PG_LEVEL_1G, -pages); | |
985 | } | |
986 | ||
987 | /* start and end are both virtual address. */ | |
988 | static void __meminit | |
989 | remove_pagetable(unsigned long start, unsigned long end, bool direct) | |
990 | { | |
991 | unsigned long next; | |
5255e0a7 | 992 | unsigned long addr; |
ae9aae9e WC |
993 | pgd_t *pgd; |
994 | pud_t *pud; | |
995 | bool pgd_changed = false; | |
996 | ||
5255e0a7 YI |
997 | for (addr = start; addr < end; addr = next) { |
998 | next = pgd_addr_end(addr, end); | |
ae9aae9e | 999 | |
5255e0a7 | 1000 | pgd = pgd_offset_k(addr); |
ae9aae9e WC |
1001 | if (!pgd_present(*pgd)) |
1002 | continue; | |
1003 | ||
1004 | pud = (pud_t *)pgd_page_vaddr(*pgd); | |
5255e0a7 | 1005 | remove_pud_table(pud, addr, next, direct); |
ae9aae9e WC |
1006 | if (free_pud_table(pud, pgd)) |
1007 | pgd_changed = true; | |
1008 | } | |
1009 | ||
1010 | if (pgd_changed) | |
9661d5bc | 1011 | sync_global_pgds(start, end - 1, 1); |
ae9aae9e WC |
1012 | |
1013 | flush_tlb_all(); | |
1014 | } | |
1015 | ||
0aad818b | 1016 | void __ref vmemmap_free(unsigned long start, unsigned long end) |
0197518c | 1017 | { |
0197518c TC |
1018 | remove_pagetable(start, end, false); |
1019 | } | |
1020 | ||
587ff8c4 | 1021 | #ifdef CONFIG_MEMORY_HOTREMOVE |
bbcab878 TC |
1022 | static void __meminit |
1023 | kernel_physical_mapping_remove(unsigned long start, unsigned long end) | |
1024 | { | |
1025 | start = (unsigned long)__va(start); | |
1026 | end = (unsigned long)__va(end); | |
1027 | ||
1028 | remove_pagetable(start, end, true); | |
1029 | } | |
1030 | ||
24d335ca WC |
1031 | int __ref arch_remove_memory(u64 start, u64 size) |
1032 | { | |
1033 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
1034 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
1035 | struct zone *zone; | |
1036 | int ret; | |
1037 | ||
1038 | zone = page_zone(pfn_to_page(start_pfn)); | |
bbcab878 | 1039 | kernel_physical_mapping_remove(start, start + size); |
24d335ca WC |
1040 | ret = __remove_pages(zone, start_pfn, nr_pages); |
1041 | WARN_ON_ONCE(ret); | |
1042 | ||
1043 | return ret; | |
1044 | } | |
1045 | #endif | |
45e0b78b KM |
1046 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1047 | ||
81ac3ad9 | 1048 | static struct kcore_list kcore_vsyscall; |
1da177e4 | 1049 | |
94b43c3d YL |
1050 | static void __init register_page_bootmem_info(void) |
1051 | { | |
1052 | #ifdef CONFIG_NUMA | |
1053 | int i; | |
1054 | ||
1055 | for_each_online_node(i) | |
1056 | register_page_bootmem_info_node(NODE_DATA(i)); | |
1057 | #endif | |
1058 | } | |
1059 | ||
1da177e4 LT |
1060 | void __init mem_init(void) |
1061 | { | |
0dc243ae | 1062 | pci_iommu_alloc(); |
1da177e4 | 1063 | |
48ddb154 | 1064 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 | 1065 | |
94b43c3d | 1066 | register_page_bootmem_info(); |
bced0e32 JL |
1067 | |
1068 | /* this will put all memory onto the freelists */ | |
0c988534 | 1069 | free_all_bootmem(); |
1da177e4 LT |
1070 | after_bootmem = 1; |
1071 | ||
1da177e4 | 1072 | /* Register memory areas for /proc/kcore */ |
f40c3300 AL |
1073 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, |
1074 | PAGE_SIZE, KCORE_OTHER); | |
1da177e4 | 1075 | |
46a84132 | 1076 | mem_init_print_info(NULL); |
1da177e4 LT |
1077 | } |
1078 | ||
67df197b | 1079 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
1080 | const int rodata_test_data = 0xC3; |
1081 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 1082 | |
502f6604 | 1083 | int kernel_set_to_readonly; |
16239630 SR |
1084 | |
1085 | void set_kernel_text_rw(void) | |
1086 | { | |
b9af7c0d | 1087 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1088 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1089 | |
1090 | if (!kernel_set_to_readonly) | |
1091 | return; | |
1092 | ||
1093 | pr_debug("Set kernel text: %lx - %lx for read write\n", | |
1094 | start, end); | |
1095 | ||
e7d23dde SS |
1096 | /* |
1097 | * Make the kernel identity mapping for text RW. Kernel text | |
1098 | * mapping will always be RO. Refer to the comment in | |
1099 | * static_protections() in pageattr.c | |
1100 | */ | |
16239630 SR |
1101 | set_memory_rw(start, (end - start) >> PAGE_SHIFT); |
1102 | } | |
1103 | ||
1104 | void set_kernel_text_ro(void) | |
1105 | { | |
b9af7c0d | 1106 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1107 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1108 | |
1109 | if (!kernel_set_to_readonly) | |
1110 | return; | |
1111 | ||
1112 | pr_debug("Set kernel text: %lx - %lx for read only\n", | |
1113 | start, end); | |
1114 | ||
e7d23dde SS |
1115 | /* |
1116 | * Set the kernel identity mapping for text RO. | |
1117 | */ | |
16239630 SR |
1118 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1119 | } | |
1120 | ||
67df197b AV |
1121 | void mark_rodata_ro(void) |
1122 | { | |
74e08179 | 1123 | unsigned long start = PFN_ALIGN(_text); |
fc8d7826 | 1124 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); |
74e08179 | 1125 | unsigned long end = (unsigned long) &__end_rodata_hpage_align; |
fc8d7826 AD |
1126 | unsigned long text_end = PFN_ALIGN(&__stop___ex_table); |
1127 | unsigned long rodata_end = PFN_ALIGN(&__end_rodata); | |
45e2a9d4 | 1128 | unsigned long all_end; |
8f0f996e | 1129 | |
6fb14755 | 1130 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 1131 | (end - start) >> 10); |
984bb80d AV |
1132 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1133 | ||
16239630 SR |
1134 | kernel_set_to_readonly = 1; |
1135 | ||
984bb80d | 1136 | /* |
72212675 YL |
1137 | * The rodata/data/bss/brk section (but not the kernel text!) |
1138 | * should also be not-executable. | |
45e2a9d4 KC |
1139 | * |
1140 | * We align all_end to PMD_SIZE because the existing mapping | |
1141 | * is a full PMD. If we would align _brk_end to PAGE_SIZE we | |
1142 | * split the PMD and the reminder between _brk_end and the end | |
1143 | * of the PMD will remain mapped executable. | |
1144 | * | |
1145 | * Any PMD which was setup after the one which covers _brk_end | |
1146 | * has been zapped already via cleanup_highmem(). | |
984bb80d | 1147 | */ |
45e2a9d4 | 1148 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
72212675 | 1149 | set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); |
67df197b | 1150 | |
1a487252 AV |
1151 | rodata_test(); |
1152 | ||
0c42f392 | 1153 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 1154 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 1155 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1156 | |
10f22dde | 1157 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 1158 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1159 | #endif |
74e08179 | 1160 | |
c88442ec | 1161 | free_init_pages("unused kernel", |
fc8d7826 AD |
1162 | (unsigned long) __va(__pa_symbol(text_end)), |
1163 | (unsigned long) __va(__pa_symbol(rodata_start))); | |
c88442ec | 1164 | free_init_pages("unused kernel", |
fc8d7826 AD |
1165 | (unsigned long) __va(__pa_symbol(rodata_end)), |
1166 | (unsigned long) __va(__pa_symbol(_sdata))); | |
67df197b | 1167 | } |
4e4eee0e | 1168 | |
67df197b AV |
1169 | #endif |
1170 | ||
14a62c34 TG |
1171 | int kern_addr_valid(unsigned long addr) |
1172 | { | |
1da177e4 | 1173 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
1174 | pgd_t *pgd; |
1175 | pud_t *pud; | |
1176 | pmd_t *pmd; | |
1177 | pte_t *pte; | |
1da177e4 LT |
1178 | |
1179 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
1180 | return 0; |
1181 | ||
1da177e4 LT |
1182 | pgd = pgd_offset_k(addr); |
1183 | if (pgd_none(*pgd)) | |
1184 | return 0; | |
1185 | ||
1186 | pud = pud_offset(pgd, addr); | |
1187 | if (pud_none(*pud)) | |
14a62c34 | 1188 | return 0; |
1da177e4 | 1189 | |
0ee364eb MG |
1190 | if (pud_large(*pud)) |
1191 | return pfn_valid(pud_pfn(*pud)); | |
1192 | ||
1da177e4 LT |
1193 | pmd = pmd_offset(pud, addr); |
1194 | if (pmd_none(*pmd)) | |
1195 | return 0; | |
14a62c34 | 1196 | |
1da177e4 LT |
1197 | if (pmd_large(*pmd)) |
1198 | return pfn_valid(pmd_pfn(*pmd)); | |
1199 | ||
1200 | pte = pte_offset_kernel(pmd, addr); | |
1201 | if (pte_none(*pte)) | |
1202 | return 0; | |
14a62c34 | 1203 | |
1da177e4 LT |
1204 | return pfn_valid(pte_pfn(*pte)); |
1205 | } | |
1206 | ||
982792c7 | 1207 | static unsigned long probe_memory_block_size(void) |
1dc41aa6 | 1208 | { |
982792c7 YL |
1209 | /* start from 2g */ |
1210 | unsigned long bz = 1UL<<31; | |
1211 | ||
bdee237c DB |
1212 | if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) { |
1213 | pr_info("Using 2GB memory block size for large-memory system\n"); | |
1dc41aa6 NF |
1214 | return 2UL * 1024 * 1024 * 1024; |
1215 | } | |
1dc41aa6 | 1216 | |
982792c7 YL |
1217 | /* less than 64g installed */ |
1218 | if ((max_pfn << PAGE_SHIFT) < (16UL << 32)) | |
1219 | return MIN_MEMORY_BLOCK_SIZE; | |
1220 | ||
1221 | /* get the tail size */ | |
1222 | while (bz > MIN_MEMORY_BLOCK_SIZE) { | |
1223 | if (!((max_pfn << PAGE_SHIFT) & (bz - 1))) | |
1224 | break; | |
1225 | bz >>= 1; | |
1226 | } | |
1227 | ||
1228 | printk(KERN_DEBUG "memory block size : %ldMB\n", bz >> 20); | |
1229 | ||
1230 | return bz; | |
1231 | } | |
1232 | ||
1233 | static unsigned long memory_block_size_probed; | |
1234 | unsigned long memory_block_size_bytes(void) | |
1235 | { | |
1236 | if (!memory_block_size_probed) | |
1237 | memory_block_size_probed = probe_memory_block_size(); | |
1238 | ||
1239 | return memory_block_size_probed; | |
1240 | } | |
1241 | ||
0889eba5 CL |
1242 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1243 | /* | |
1244 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1245 | */ | |
c2b91e2e YL |
1246 | static long __meminitdata addr_start, addr_end; |
1247 | static void __meminitdata *p_start, *p_end; | |
1248 | static int __meminitdata node_start; | |
1249 | ||
e8216da5 JW |
1250 | static int __meminit vmemmap_populate_hugepages(unsigned long start, |
1251 | unsigned long end, int node) | |
0889eba5 | 1252 | { |
0aad818b | 1253 | unsigned long addr; |
0889eba5 CL |
1254 | unsigned long next; |
1255 | pgd_t *pgd; | |
1256 | pud_t *pud; | |
1257 | pmd_t *pmd; | |
1258 | ||
0aad818b | 1259 | for (addr = start; addr < end; addr = next) { |
e8216da5 | 1260 | next = pmd_addr_end(addr, end); |
0889eba5 CL |
1261 | |
1262 | pgd = vmemmap_pgd_populate(addr, node); | |
1263 | if (!pgd) | |
1264 | return -ENOMEM; | |
14a62c34 | 1265 | |
0889eba5 CL |
1266 | pud = vmemmap_pud_populate(pgd, addr, node); |
1267 | if (!pud) | |
1268 | return -ENOMEM; | |
1269 | ||
e8216da5 JW |
1270 | pmd = pmd_offset(pud, addr); |
1271 | if (pmd_none(*pmd)) { | |
e8216da5 | 1272 | void *p; |
14a62c34 | 1273 | |
e8216da5 | 1274 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); |
8e2cdbcb JW |
1275 | if (p) { |
1276 | pte_t entry; | |
1277 | ||
1278 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1279 | PAGE_KERNEL_LARGE); | |
1280 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1281 | ||
1282 | /* check to see if we have contiguous blocks */ | |
1283 | if (p_end != p || node_start != node) { | |
1284 | if (p_start) | |
1285 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1286 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1287 | addr_start = addr; | |
1288 | node_start = node; | |
1289 | p_start = p; | |
1290 | } | |
7c934d39 | 1291 | |
8e2cdbcb JW |
1292 | addr_end = addr + PMD_SIZE; |
1293 | p_end = p + PMD_SIZE; | |
1294 | continue; | |
1295 | } | |
1296 | } else if (pmd_large(*pmd)) { | |
e8216da5 | 1297 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
8e2cdbcb JW |
1298 | continue; |
1299 | } | |
1300 | pr_warn_once("vmemmap: falling back to regular page backing\n"); | |
1301 | if (vmemmap_populate_basepages(addr, next, node)) | |
1302 | return -ENOMEM; | |
0889eba5 | 1303 | } |
0889eba5 CL |
1304 | return 0; |
1305 | } | |
c2b91e2e | 1306 | |
e8216da5 JW |
1307 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
1308 | { | |
1309 | int err; | |
1310 | ||
1311 | if (cpu_has_pse) | |
1312 | err = vmemmap_populate_hugepages(start, end, node); | |
1313 | else | |
1314 | err = vmemmap_populate_basepages(start, end, node); | |
1315 | if (!err) | |
9661d5bc | 1316 | sync_global_pgds(start, end - 1, 0); |
e8216da5 JW |
1317 | return err; |
1318 | } | |
1319 | ||
46723bfa YI |
1320 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) |
1321 | void register_page_bootmem_memmap(unsigned long section_nr, | |
1322 | struct page *start_page, unsigned long size) | |
1323 | { | |
1324 | unsigned long addr = (unsigned long)start_page; | |
1325 | unsigned long end = (unsigned long)(start_page + size); | |
1326 | unsigned long next; | |
1327 | pgd_t *pgd; | |
1328 | pud_t *pud; | |
1329 | pmd_t *pmd; | |
1330 | unsigned int nr_pages; | |
1331 | struct page *page; | |
1332 | ||
1333 | for (; addr < end; addr = next) { | |
1334 | pte_t *pte = NULL; | |
1335 | ||
1336 | pgd = pgd_offset_k(addr); | |
1337 | if (pgd_none(*pgd)) { | |
1338 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1339 | continue; | |
1340 | } | |
1341 | get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); | |
1342 | ||
1343 | pud = pud_offset(pgd, addr); | |
1344 | if (pud_none(*pud)) { | |
1345 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1346 | continue; | |
1347 | } | |
1348 | get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); | |
1349 | ||
1350 | if (!cpu_has_pse) { | |
1351 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1352 | pmd = pmd_offset(pud, addr); | |
1353 | if (pmd_none(*pmd)) | |
1354 | continue; | |
1355 | get_page_bootmem(section_nr, pmd_page(*pmd), | |
1356 | MIX_SECTION_INFO); | |
1357 | ||
1358 | pte = pte_offset_kernel(pmd, addr); | |
1359 | if (pte_none(*pte)) | |
1360 | continue; | |
1361 | get_page_bootmem(section_nr, pte_page(*pte), | |
1362 | SECTION_INFO); | |
1363 | } else { | |
1364 | next = pmd_addr_end(addr, end); | |
1365 | ||
1366 | pmd = pmd_offset(pud, addr); | |
1367 | if (pmd_none(*pmd)) | |
1368 | continue; | |
1369 | ||
1370 | nr_pages = 1 << (get_order(PMD_SIZE)); | |
1371 | page = pmd_page(*pmd); | |
1372 | while (nr_pages--) | |
1373 | get_page_bootmem(section_nr, page++, | |
1374 | SECTION_INFO); | |
1375 | } | |
1376 | } | |
1377 | } | |
1378 | #endif | |
1379 | ||
c2b91e2e YL |
1380 | void __meminit vmemmap_populate_print_last(void) |
1381 | { | |
1382 | if (p_start) { | |
1383 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1384 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1385 | p_start = NULL; | |
1386 | p_end = NULL; | |
1387 | node_start = 0; | |
1388 | } | |
1389 | } | |
0889eba5 | 1390 | #endif |