x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions
[deliverable/linux.git] / arch / x86 / mm / init_64.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
a2531293 5 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
1da177e4
LT
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
1da177e4
LT
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
11034d55 21#include <linux/initrd.h>
1da177e4
LT
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
59170891 25#include <linux/pci.h>
6fb14755 26#include <linux/pfn.h>
c9cf5528 27#include <linux/poison.h>
17a941d8 28#include <linux/dma-mapping.h>
44df75e6
MT
29#include <linux/module.h>
30#include <linux/memory_hotplug.h>
ae32b129 31#include <linux/nmi.h>
5a0e3ad6 32#include <linux/gfp.h>
1da177e4
LT
33
34#include <asm/processor.h>
46eaa670 35#include <asm/bios_ebda.h>
1da177e4
LT
36#include <asm/system.h>
37#include <asm/uaccess.h>
38#include <asm/pgtable.h>
39#include <asm/pgalloc.h>
40#include <asm/dma.h>
41#include <asm/fixmap.h>
42#include <asm/e820.h>
43#include <asm/apic.h>
44#include <asm/tlb.h>
45#include <asm/mmu_context.h>
46#include <asm/proto.h>
47#include <asm/smp.h>
2bc0414e 48#include <asm/sections.h>
718fc13b 49#include <asm/kdebug.h>
aaa64e04 50#include <asm/numa.h>
7bfeab9a 51#include <asm/cacheflush.h>
4fcb2083 52#include <asm/init.h>
ea085417 53#include <linux/bootmem.h>
1da177e4 54
e18c6874
AK
55static unsigned long dma_reserve __initdata;
56
00d1c5e0
IM
57static int __init parse_direct_gbpages_off(char *arg)
58{
59 direct_gbpages = 0;
60 return 0;
61}
62early_param("nogbpages", parse_direct_gbpages_off);
63
64static int __init parse_direct_gbpages_on(char *arg)
65{
66 direct_gbpages = 1;
67 return 0;
68}
69early_param("gbpages", parse_direct_gbpages_on);
70
1da177e4
LT
71/*
72 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
73 * physical space so we can cache the place of the first one and move
74 * around without checking the pgd every time.
75 */
76
be43d728 77pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
bd220a24
YL
78EXPORT_SYMBOL_GPL(__supported_pte_mask);
79
bd220a24
YL
80int force_personality32;
81
deed05b7
IM
82/*
83 * noexec32=on|off
84 * Control non executable heap for 32bit processes.
85 * To control the stack too use noexec=off
86 *
87 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
88 * off PROT_READ implies PROT_EXEC
89 */
bd220a24
YL
90static int __init nonx32_setup(char *str)
91{
92 if (!strcmp(str, "on"))
93 force_personality32 &= ~READ_IMPLIES_EXEC;
94 else if (!strcmp(str, "off"))
95 force_personality32 |= READ_IMPLIES_EXEC;
96 return 1;
97}
98__setup("noexec32=", nonx32_setup);
99
6afb5157
HL
100/*
101 * When memory was added/removed make sure all the processes MM have
102 * suitable PGD entries in the local PGD level page.
103 */
104void sync_global_pgds(unsigned long start, unsigned long end)
105{
106 unsigned long address;
107
108 for (address = start; address <= end; address += PGDIR_SIZE) {
109 const pgd_t *pgd_ref = pgd_offset_k(address);
110 unsigned long flags;
111 struct page *page;
112
113 if (pgd_none(*pgd_ref))
114 continue;
115
116 spin_lock_irqsave(&pgd_lock, flags);
117 list_for_each_entry(page, &pgd_list, lru) {
118 pgd_t *pgd;
119 pgd = (pgd_t *)page_address(page) + pgd_index(address);
120 if (pgd_none(*pgd))
121 set_pgd(pgd, *pgd_ref);
122 else
123 BUG_ON(pgd_page_vaddr(*pgd)
124 != pgd_page_vaddr(*pgd_ref));
125 }
126 spin_unlock_irqrestore(&pgd_lock, flags);
127 }
128}
129
8d6ea967
MS
130/*
131 * NOTE: This function is marked __ref because it calls __init function
132 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
133 */
134static __ref void *spp_getpage(void)
14a62c34 135{
1da177e4 136 void *ptr;
14a62c34 137
1da177e4 138 if (after_bootmem)
9e730237 139 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
1da177e4
LT
140 else
141 ptr = alloc_bootmem_pages(PAGE_SIZE);
14a62c34
TG
142
143 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
144 panic("set_pte_phys: cannot allocate page data %s\n",
145 after_bootmem ? "after bootmem" : "");
146 }
1da177e4 147
10f22dde 148 pr_debug("spp_getpage %p\n", ptr);
14a62c34 149
1da177e4 150 return ptr;
14a62c34 151}
1da177e4 152
f254f390 153static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
1da177e4 154{
458a3e64
TH
155 if (pgd_none(*pgd)) {
156 pud_t *pud = (pud_t *)spp_getpage();
157 pgd_populate(&init_mm, pgd, pud);
158 if (pud != pud_offset(pgd, 0))
159 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
160 pud, pud_offset(pgd, 0));
161 }
162 return pud_offset(pgd, vaddr);
163}
1da177e4 164
f254f390 165static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
458a3e64 166{
1da177e4 167 if (pud_none(*pud)) {
458a3e64 168 pmd_t *pmd = (pmd_t *) spp_getpage();
bb23e403 169 pud_populate(&init_mm, pud, pmd);
458a3e64 170 if (pmd != pmd_offset(pud, 0))
10f22dde 171 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
458a3e64 172 pmd, pmd_offset(pud, 0));
1da177e4 173 }
458a3e64
TH
174 return pmd_offset(pud, vaddr);
175}
176
f254f390 177static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
458a3e64 178{
1da177e4 179 if (pmd_none(*pmd)) {
458a3e64 180 pte_t *pte = (pte_t *) spp_getpage();
bb23e403 181 pmd_populate_kernel(&init_mm, pmd, pte);
458a3e64 182 if (pte != pte_offset_kernel(pmd, 0))
10f22dde 183 printk(KERN_ERR "PAGETABLE BUG #02!\n");
1da177e4 184 }
458a3e64
TH
185 return pte_offset_kernel(pmd, vaddr);
186}
187
188void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
189{
190 pud_t *pud;
191 pmd_t *pmd;
192 pte_t *pte;
193
194 pud = pud_page + pud_index(vaddr);
195 pmd = fill_pmd(pud, vaddr);
196 pte = fill_pte(pmd, vaddr);
1da177e4 197
1da177e4
LT
198 set_pte(pte, new_pte);
199
200 /*
201 * It's enough to flush this one mapping.
202 * (PGE mappings get flushed as well)
203 */
204 __flush_tlb_one(vaddr);
205}
206
458a3e64 207void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
0814e0ba
EH
208{
209 pgd_t *pgd;
210 pud_t *pud_page;
211
212 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
213
214 pgd = pgd_offset_k(vaddr);
215 if (pgd_none(*pgd)) {
216 printk(KERN_ERR
217 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
218 return;
219 }
220 pud_page = (pud_t*)pgd_page_vaddr(*pgd);
221 set_pte_vaddr_pud(pud_page, vaddr, pteval);
222}
223
458a3e64 224pmd_t * __init populate_extra_pmd(unsigned long vaddr)
11124411
TH
225{
226 pgd_t *pgd;
227 pud_t *pud;
228
229 pgd = pgd_offset_k(vaddr);
458a3e64
TH
230 pud = fill_pud(pgd, vaddr);
231 return fill_pmd(pud, vaddr);
232}
233
234pte_t * __init populate_extra_pte(unsigned long vaddr)
235{
236 pmd_t *pmd;
11124411 237
458a3e64
TH
238 pmd = populate_extra_pmd(vaddr);
239 return fill_pte(pmd, vaddr);
11124411
TH
240}
241
3a9e189d
JS
242/*
243 * Create large page table mappings for a range of physical addresses.
244 */
245static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
246 pgprot_t prot)
247{
248 pgd_t *pgd;
249 pud_t *pud;
250 pmd_t *pmd;
251
252 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
253 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
254 pgd = pgd_offset_k((unsigned long)__va(phys));
255 if (pgd_none(*pgd)) {
256 pud = (pud_t *) spp_getpage();
257 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
258 _PAGE_USER));
259 }
260 pud = pud_offset(pgd, (unsigned long)__va(phys));
261 if (pud_none(*pud)) {
262 pmd = (pmd_t *) spp_getpage();
263 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
264 _PAGE_USER));
265 }
266 pmd = pmd_offset(pud, phys);
267 BUG_ON(!pmd_none(*pmd));
268 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
269 }
270}
271
272void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
273{
274 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
275}
276
277void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
278{
279 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
280}
281
31eedd82 282/*
88f3aec7
IM
283 * The head.S code sets up the kernel high mapping:
284 *
285 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
31eedd82
TG
286 *
287 * phys_addr holds the negative offset to the kernel, which is added
288 * to the compile time generated pmds. This results in invalid pmds up
289 * to the point where we hit the physaddr 0 mapping.
290 *
291 * We limit the mappings to the region from _text to _end. _end is
292 * rounded up to the 2MB boundary. This catches the invalid pmds as
293 * well, as they are located before _text:
294 */
295void __init cleanup_highmap(void)
296{
297 unsigned long vaddr = __START_KERNEL_map;
d86bb0da 298 unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
31eedd82
TG
299 pmd_t *pmd = level2_kernel_pgt;
300 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
301
302 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
2884f110 303 if (pmd_none(*pmd))
31eedd82
TG
304 continue;
305 if (vaddr < (unsigned long) _text || vaddr > end)
306 set_pmd(pmd, __pmd(0));
307 }
308}
309
9482ac6e 310static __ref void *alloc_low_page(unsigned long *phys)
14a62c34 311{
298af9d8 312 unsigned long pfn = e820_table_end++;
1da177e4
LT
313 void *adr;
314
44df75e6 315 if (after_bootmem) {
9e730237 316 adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
44df75e6 317 *phys = __pa(adr);
14a62c34 318
44df75e6
MT
319 return adr;
320 }
321
298af9d8 322 if (pfn >= e820_table_top)
14a62c34 323 panic("alloc_low_page: ran out of memory");
dafe41ee 324
14941779 325 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
44df75e6 326 memset(adr, 0, PAGE_SIZE);
dafe41ee
VG
327 *phys = pfn * PAGE_SIZE;
328 return adr;
329}
1da177e4 330
9482ac6e 331static __ref void unmap_low_page(void *adr)
14a62c34 332{
44df75e6
MT
333 if (after_bootmem)
334 return;
335
dafe41ee 336 early_iounmap(adr, PAGE_SIZE);
14a62c34 337}
1da177e4 338
7b16eb89 339static unsigned long __meminit
b27a43c1
SS
340phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
341 pgprot_t prot)
4f9c11dd
JF
342{
343 unsigned pages = 0;
7b16eb89 344 unsigned long last_map_addr = end;
4f9c11dd 345 int i;
7b16eb89 346
4f9c11dd
JF
347 pte_t *pte = pte_page + pte_index(addr);
348
349 for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
350
351 if (addr >= end) {
352 if (!after_bootmem) {
353 for(; i < PTRS_PER_PTE; i++, pte++)
354 set_pte(pte, __pte(0));
355 }
356 break;
357 }
358
b27a43c1
SS
359 /*
360 * We will re-use the existing mapping.
361 * Xen for example has some special requirements, like mapping
362 * pagetable pages as RO. So assume someone who pre-setup
363 * these mappings are more intelligent.
364 */
3afa3949
YL
365 if (pte_val(*pte)) {
366 pages++;
4f9c11dd 367 continue;
3afa3949 368 }
4f9c11dd
JF
369
370 if (0)
371 printk(" pte=%p addr=%lx pte=%016lx\n",
372 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
4f9c11dd 373 pages++;
b27a43c1 374 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
7b16eb89 375 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
4f9c11dd 376 }
a2699e47 377
4f9c11dd 378 update_page_count(PG_LEVEL_4K, pages);
7b16eb89
YL
379
380 return last_map_addr;
4f9c11dd
JF
381}
382
7b16eb89 383static unsigned long __meminit
b27a43c1
SS
384phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
385 pgprot_t prot)
4f9c11dd
JF
386{
387 pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
388
b27a43c1 389 return phys_pte_init(pte, address, end, prot);
4f9c11dd
JF
390}
391
cc615032 392static unsigned long __meminit
b50efd2a 393phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
b27a43c1 394 unsigned long page_size_mask, pgprot_t prot)
44df75e6 395{
ce0c0e50 396 unsigned long pages = 0;
7b16eb89 397 unsigned long last_map_addr = end;
ce0c0e50 398
6ad91658 399 int i = pmd_index(address);
44df75e6 400
6ad91658 401 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
4f9c11dd 402 unsigned long pte_phys;
6ad91658 403 pmd_t *pmd = pmd_page + pmd_index(address);
4f9c11dd 404 pte_t *pte;
b27a43c1 405 pgprot_t new_prot = prot;
44df75e6 406
5f51e139 407 if (address >= end) {
14a62c34 408 if (!after_bootmem) {
5f51e139
JB
409 for (; i < PTRS_PER_PMD; i++, pmd++)
410 set_pmd(pmd, __pmd(0));
14a62c34 411 }
44df75e6
MT
412 break;
413 }
6ad91658 414
4f9c11dd 415 if (pmd_val(*pmd)) {
8ae3a5a8
JB
416 if (!pmd_large(*pmd)) {
417 spin_lock(&init_mm.page_table_lock);
7b16eb89 418 last_map_addr = phys_pte_update(pmd, address,
b27a43c1 419 end, prot);
8ae3a5a8 420 spin_unlock(&init_mm.page_table_lock);
a2699e47 421 continue;
8ae3a5a8 422 }
b27a43c1
SS
423 /*
424 * If we are ok with PG_LEVEL_2M mapping, then we will
425 * use the existing mapping,
426 *
427 * Otherwise, we will split the large page mapping but
428 * use the same existing protection bits except for
429 * large page, so that we don't violate Intel's TLB
430 * Application note (317080) which says, while changing
431 * the page sizes, new and old translations should
432 * not differ with respect to page frame and
433 * attributes.
434 */
3afa3949
YL
435 if (page_size_mask & (1 << PG_LEVEL_2M)) {
436 pages++;
b27a43c1 437 continue;
3afa3949 438 }
b27a43c1 439 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
4f9c11dd
JF
440 }
441
b50efd2a 442 if (page_size_mask & (1<<PG_LEVEL_2M)) {
4f9c11dd 443 pages++;
8ae3a5a8 444 spin_lock(&init_mm.page_table_lock);
4f9c11dd 445 set_pte((pte_t *)pmd,
b27a43c1
SS
446 pfn_pte(address >> PAGE_SHIFT,
447 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
8ae3a5a8 448 spin_unlock(&init_mm.page_table_lock);
7b16eb89 449 last_map_addr = (address & PMD_MASK) + PMD_SIZE;
6ad91658 450 continue;
4f9c11dd 451 }
6ad91658 452
4f9c11dd 453 pte = alloc_low_page(&pte_phys);
b27a43c1 454 last_map_addr = phys_pte_init(pte, address, end, new_prot);
4f9c11dd
JF
455 unmap_low_page(pte);
456
8ae3a5a8 457 spin_lock(&init_mm.page_table_lock);
4f9c11dd 458 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
8ae3a5a8 459 spin_unlock(&init_mm.page_table_lock);
44df75e6 460 }
ce0c0e50 461 update_page_count(PG_LEVEL_2M, pages);
7b16eb89 462 return last_map_addr;
44df75e6
MT
463}
464
cc615032 465static unsigned long __meminit
b50efd2a 466phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
b27a43c1 467 unsigned long page_size_mask, pgprot_t prot)
44df75e6 468{
14a62c34 469 pmd_t *pmd = pmd_offset(pud, 0);
cc615032
AK
470 unsigned long last_map_addr;
471
b27a43c1 472 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
6ad91658 473 __flush_tlb_all();
cc615032 474 return last_map_addr;
44df75e6
MT
475}
476
cc615032 477static unsigned long __meminit
b50efd2a
YL
478phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
479 unsigned long page_size_mask)
14a62c34 480{
ce0c0e50 481 unsigned long pages = 0;
cc615032 482 unsigned long last_map_addr = end;
6ad91658 483 int i = pud_index(addr);
44df75e6 484
14a62c34 485 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
6ad91658
KM
486 unsigned long pmd_phys;
487 pud_t *pud = pud_page + pud_index(addr);
1da177e4 488 pmd_t *pmd;
b27a43c1 489 pgprot_t prot = PAGE_KERNEL;
1da177e4 490
6ad91658 491 if (addr >= end)
1da177e4 492 break;
1da177e4 493
14a62c34
TG
494 if (!after_bootmem &&
495 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
496 set_pud(pud, __pud(0));
1da177e4 497 continue;
14a62c34 498 }
1da177e4 499
6ad91658 500 if (pud_val(*pud)) {
a2699e47 501 if (!pud_large(*pud)) {
b50efd2a 502 last_map_addr = phys_pmd_update(pud, addr, end,
b27a43c1 503 page_size_mask, prot);
a2699e47
SS
504 continue;
505 }
b27a43c1
SS
506 /*
507 * If we are ok with PG_LEVEL_1G mapping, then we will
508 * use the existing mapping.
509 *
510 * Otherwise, we will split the gbpage mapping but use
511 * the same existing protection bits except for large
512 * page, so that we don't violate Intel's TLB
513 * Application note (317080) which says, while changing
514 * the page sizes, new and old translations should
515 * not differ with respect to page frame and
516 * attributes.
517 */
3afa3949
YL
518 if (page_size_mask & (1 << PG_LEVEL_1G)) {
519 pages++;
b27a43c1 520 continue;
3afa3949 521 }
b27a43c1 522 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
ef925766
AK
523 }
524
b50efd2a 525 if (page_size_mask & (1<<PG_LEVEL_1G)) {
ce0c0e50 526 pages++;
8ae3a5a8 527 spin_lock(&init_mm.page_table_lock);
ef925766
AK
528 set_pte((pte_t *)pud,
529 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
8ae3a5a8 530 spin_unlock(&init_mm.page_table_lock);
cc615032 531 last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
6ad91658
KM
532 continue;
533 }
534
dafe41ee 535 pmd = alloc_low_page(&pmd_phys);
b27a43c1
SS
536 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
537 prot);
4f9c11dd 538 unmap_low_page(pmd);
8ae3a5a8
JB
539
540 spin_lock(&init_mm.page_table_lock);
4f9c11dd 541 pud_populate(&init_mm, pud, __va(pmd_phys));
44df75e6 542 spin_unlock(&init_mm.page_table_lock);
1da177e4 543 }
1a2b4412 544 __flush_tlb_all();
a2699e47 545
ce0c0e50 546 update_page_count(PG_LEVEL_1G, pages);
cc615032 547
1a0db38e 548 return last_map_addr;
14a62c34 549}
1da177e4 550
4f9c11dd 551static unsigned long __meminit
b50efd2a
YL
552phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
553 unsigned long page_size_mask)
4f9c11dd
JF
554{
555 pud_t *pud;
556
557 pud = (pud_t *)pgd_page_vaddr(*pgd);
558
b50efd2a 559 return phys_pud_init(pud, addr, end, page_size_mask);
4f9c11dd
JF
560}
561
41d840e2 562unsigned long __meminit
f765090a
PE
563kernel_physical_mapping_init(unsigned long start,
564 unsigned long end,
565 unsigned long page_size_mask)
14a62c34 566{
1da177e4 567
b50efd2a 568 unsigned long next, last_map_addr = end;
1da177e4
LT
569
570 start = (unsigned long)__va(start);
571 end = (unsigned long)__va(end);
572
573 for (; start < end; start = next) {
44df75e6 574 pgd_t *pgd = pgd_offset_k(start);
14a62c34 575 unsigned long pud_phys;
44df75e6
MT
576 pud_t *pud;
577
e22146e6 578 next = (start + PGDIR_SIZE) & PGDIR_MASK;
4f9c11dd
JF
579 if (next > end)
580 next = end;
581
582 if (pgd_val(*pgd)) {
b50efd2a
YL
583 last_map_addr = phys_pud_update(pgd, __pa(start),
584 __pa(end), page_size_mask);
4f9c11dd
JF
585 continue;
586 }
587
8ae3a5a8 588 pud = alloc_low_page(&pud_phys);
b50efd2a
YL
589 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
590 page_size_mask);
4f9c11dd 591 unmap_low_page(pud);
8ae3a5a8
JB
592
593 spin_lock(&init_mm.page_table_lock);
594 pgd_populate(&init_mm, pgd, __va(pud_phys));
595 spin_unlock(&init_mm.page_table_lock);
14a62c34 596 }
a2699e47 597 __flush_tlb_all();
1da177e4 598
b50efd2a
YL
599 return last_map_addr;
600}
7b16eb89 601
2b97690f 602#ifndef CONFIG_NUMA
8ee2debc
DR
603void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
604 int acpi, int k8)
1f75d7e3 605{
08677214 606#ifndef CONFIG_NO_BOOTMEM
1f75d7e3
YL
607 unsigned long bootmap_size, bootmap;
608
609 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
610 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
611 PAGE_SIZE);
612 if (bootmap == -1L)
613 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
1842f90c 614 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
346cafec
YL
615 /* don't touch min_low_pfn */
616 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
617 0, end_pfn);
1f75d7e3
YL
618 e820_register_active_regions(0, start_pfn, end_pfn);
619 free_bootmem_with_active_regions(0, end_pfn);
08677214
YL
620#else
621 e820_register_active_regions(0, start_pfn, end_pfn);
622#endif
1f75d7e3 623}
3551f88f 624#endif
1f75d7e3 625
1da177e4
LT
626void __init paging_init(void)
627{
6391af17 628 unsigned long max_zone_pfns[MAX_NR_ZONES];
14a62c34 629
6391af17
MG
630 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
631 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
632 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
c987d12f 633 max_zone_pfns[ZONE_NORMAL] = max_pfn;
6391af17 634
3551f88f 635 sparse_memory_present_with_active_regions(MAX_NUMNODES);
44df75e6 636 sparse_init();
44b57280
YL
637
638 /*
639 * clear the default setting with node 0
640 * note: don't use nodes_clear here, that is really clearing when
641 * numa support is not compiled in, and later node_set_state
642 * will not set it back.
643 */
644 node_clear_state(0, N_NORMAL_MEMORY);
645
5cb248ab 646 free_area_init_nodes(max_zone_pfns);
1da177e4 647}
1da177e4 648
44df75e6
MT
649/*
650 * Memory hotplug specific functions
44df75e6 651 */
bc02af93 652#ifdef CONFIG_MEMORY_HOTPLUG
ea085417
SZ
653/*
654 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
655 * updating.
656 */
657static void update_end_of_memory_vars(u64 start, u64 size)
658{
659 unsigned long end_pfn = PFN_UP(start + size);
660
661 if (end_pfn > max_pfn) {
662 max_pfn = end_pfn;
663 max_low_pfn = end_pfn;
664 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
665 }
666}
667
9d99aaa3
AK
668/*
669 * Memory is added always to NORMAL zone. This means you will never get
670 * additional DMA/DMA32 memory.
671 */
bc02af93 672int arch_add_memory(int nid, u64 start, u64 size)
44df75e6 673{
bc02af93 674 struct pglist_data *pgdat = NODE_DATA(nid);
776ed98b 675 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
cc615032 676 unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
44df75e6
MT
677 unsigned long nr_pages = size >> PAGE_SHIFT;
678 int ret;
679
60817c9b 680 last_mapped_pfn = init_memory_mapping(start, start + size);
cc615032
AK
681 if (last_mapped_pfn > max_pfn_mapped)
682 max_pfn_mapped = last_mapped_pfn;
45e0b78b 683
c04fc586 684 ret = __add_pages(nid, zone, start_pfn, nr_pages);
fe8b868e 685 WARN_ON_ONCE(ret);
44df75e6 686
ea085417
SZ
687 /* update max_pfn, max_low_pfn and high_memory */
688 update_end_of_memory_vars(start, size);
689
44df75e6 690 return ret;
44df75e6 691}
bc02af93 692EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6 693
8243229f 694#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
4942e998
KM
695int memory_add_physaddr_to_nid(u64 start)
696{
697 return 0;
698}
8c2676a5 699EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
4942e998
KM
700#endif
701
45e0b78b
KM
702#endif /* CONFIG_MEMORY_HOTPLUG */
703
81ac3ad9 704static struct kcore_list kcore_vsyscall;
1da177e4
LT
705
706void __init mem_init(void)
707{
0a43e4bf 708 long codesize, reservedpages, datasize, initsize;
11a6b0c9 709 unsigned long absent_pages;
1da177e4 710
0dc243ae 711 pci_iommu_alloc();
1da177e4 712
48ddb154 713 /* clear_bss() already clear the empty_zero_page */
1da177e4
LT
714
715 reservedpages = 0;
716
717 /* this will put all low memory onto the freelists */
2b97690f 718#ifdef CONFIG_NUMA
0a43e4bf 719 totalram_pages = numa_free_all_bootmem();
1da177e4 720#else
0a43e4bf 721 totalram_pages = free_all_bootmem();
1da177e4 722#endif
11a6b0c9
YL
723
724 absent_pages = absent_pages_in_range(0, max_pfn);
725 reservedpages = max_pfn - totalram_pages - absent_pages;
1da177e4
LT
726 after_bootmem = 1;
727
728 codesize = (unsigned long) &_etext - (unsigned long) &_text;
729 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
730 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
731
732 /* Register memory areas for /proc/kcore */
14a62c34 733 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
c30bb2a2 734 VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
1da177e4 735
10f22dde 736 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
11a6b0c9 737 "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
cc013a88 738 nr_free_pages() << (PAGE_SHIFT-10),
c987d12f 739 max_pfn << (PAGE_SHIFT-10),
1da177e4 740 codesize >> 10,
11a6b0c9 741 absent_pages << (PAGE_SHIFT-10),
1da177e4
LT
742 reservedpages << (PAGE_SHIFT-10),
743 datasize >> 10,
744 initsize >> 10);
1da177e4
LT
745}
746
67df197b 747#ifdef CONFIG_DEBUG_RODATA
edeed305
AV
748const int rodata_test_data = 0xC3;
749EXPORT_SYMBOL_GPL(rodata_test_data);
67df197b 750
502f6604 751int kernel_set_to_readonly;
16239630
SR
752
753void set_kernel_text_rw(void)
754{
b9af7c0d 755 unsigned long start = PFN_ALIGN(_text);
e7d23dde 756 unsigned long end = PFN_ALIGN(__stop___ex_table);
16239630
SR
757
758 if (!kernel_set_to_readonly)
759 return;
760
761 pr_debug("Set kernel text: %lx - %lx for read write\n",
762 start, end);
763
e7d23dde
SS
764 /*
765 * Make the kernel identity mapping for text RW. Kernel text
766 * mapping will always be RO. Refer to the comment in
767 * static_protections() in pageattr.c
768 */
16239630
SR
769 set_memory_rw(start, (end - start) >> PAGE_SHIFT);
770}
771
772void set_kernel_text_ro(void)
773{
b9af7c0d 774 unsigned long start = PFN_ALIGN(_text);
e7d23dde 775 unsigned long end = PFN_ALIGN(__stop___ex_table);
16239630
SR
776
777 if (!kernel_set_to_readonly)
778 return;
779
780 pr_debug("Set kernel text: %lx - %lx for read only\n",
781 start, end);
782
e7d23dde
SS
783 /*
784 * Set the kernel identity mapping for text RO.
785 */
16239630
SR
786 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
787}
788
67df197b
AV
789void mark_rodata_ro(void)
790{
74e08179 791 unsigned long start = PFN_ALIGN(_text);
8f0f996e
SR
792 unsigned long rodata_start =
793 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
74e08179
SS
794 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
795 unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
796 unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
797 unsigned long data_start = (unsigned long) &_sdata;
8f0f996e 798
6fb14755 799 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
e3ebadd9 800 (end - start) >> 10);
984bb80d
AV
801 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
802
16239630
SR
803 kernel_set_to_readonly = 1;
804
984bb80d
AV
805 /*
806 * The rodata section (but not the kernel text!) should also be
807 * not-executable.
808 */
72b59d67 809 set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
67df197b 810
1a487252
AV
811 rodata_test();
812
0c42f392 813#ifdef CONFIG_CPA_DEBUG
10f22dde 814 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
6d238cc4 815 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
0c42f392 816
10f22dde 817 printk(KERN_INFO "Testing CPA: again\n");
6d238cc4 818 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
0c42f392 819#endif
74e08179
SS
820
821 free_init_pages("unused kernel memory",
822 (unsigned long) page_address(virt_to_page(text_end)),
823 (unsigned long)
824 page_address(virt_to_page(rodata_start)));
825 free_init_pages("unused kernel memory",
826 (unsigned long) page_address(virt_to_page(rodata_end)),
827 (unsigned long) page_address(virt_to_page(data_start)));
67df197b 828}
4e4eee0e 829
67df197b
AV
830#endif
831
d2dbf343
YL
832int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
833 int flags)
14a62c34 834{
2b97690f 835#ifdef CONFIG_NUMA
8b3cd09e 836 int nid, next_nid;
6a07a0ed 837 int ret;
5e58a02a
AK
838#endif
839 unsigned long pfn = phys >> PAGE_SHIFT;
14a62c34 840
c987d12f 841 if (pfn >= max_pfn) {
14a62c34
TG
842 /*
843 * This can happen with kdump kernels when accessing
844 * firmware tables:
845 */
67794292 846 if (pfn < max_pfn_mapped)
8b2ef1d7 847 return -EFAULT;
14a62c34 848
6a07a0ed 849 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
5e58a02a 850 phys, len);
8b2ef1d7 851 return -EFAULT;
5e58a02a
AK
852 }
853
854 /* Should check here against the e820 map to avoid double free */
855#ifdef CONFIG_NUMA
8b3cd09e
YL
856 nid = phys_to_nid(phys);
857 next_nid = phys_to_nid(phys + len - 1);
858 if (nid == next_nid)
8b2ef1d7 859 ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
8b3cd09e 860 else
8b2ef1d7
BW
861 ret = reserve_bootmem(phys, len, flags);
862
863 if (ret != 0)
864 return ret;
865
14a62c34 866#else
a6a06f7b 867 reserve_bootmem(phys, len, flags);
1da177e4 868#endif
8b3cd09e 869
0e0b864e 870 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
e18c6874 871 dma_reserve += len / PAGE_SIZE;
0e0b864e
MG
872 set_dma_reserve(dma_reserve);
873 }
8b2ef1d7
BW
874
875 return 0;
1da177e4
LT
876}
877
14a62c34
TG
878int kern_addr_valid(unsigned long addr)
879{
1da177e4 880 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
14a62c34
TG
881 pgd_t *pgd;
882 pud_t *pud;
883 pmd_t *pmd;
884 pte_t *pte;
1da177e4
LT
885
886 if (above != 0 && above != -1UL)
14a62c34
TG
887 return 0;
888
1da177e4
LT
889 pgd = pgd_offset_k(addr);
890 if (pgd_none(*pgd))
891 return 0;
892
893 pud = pud_offset(pgd, addr);
894 if (pud_none(*pud))
14a62c34 895 return 0;
1da177e4
LT
896
897 pmd = pmd_offset(pud, addr);
898 if (pmd_none(*pmd))
899 return 0;
14a62c34 900
1da177e4
LT
901 if (pmd_large(*pmd))
902 return pfn_valid(pmd_pfn(*pmd));
903
904 pte = pte_offset_kernel(pmd, addr);
905 if (pte_none(*pte))
906 return 0;
14a62c34 907
1da177e4
LT
908 return pfn_valid(pte_pfn(*pte));
909}
910
14a62c34
TG
911/*
912 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
913 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
914 * not need special handling anymore:
915 */
1da177e4 916static struct vm_area_struct gate_vma = {
14a62c34
TG
917 .vm_start = VSYSCALL_START,
918 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
919 .vm_page_prot = PAGE_READONLY_EXEC,
920 .vm_flags = VM_READ | VM_EXEC
1da177e4
LT
921};
922
1da177e4
LT
923struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
924{
925#ifdef CONFIG_IA32_EMULATION
1e014410
AK
926 if (test_tsk_thread_flag(tsk, TIF_IA32))
927 return NULL;
1da177e4
LT
928#endif
929 return &gate_vma;
930}
931
932int in_gate_area(struct task_struct *task, unsigned long addr)
933{
934 struct vm_area_struct *vma = get_gate_vma(task);
14a62c34 935
1e014410
AK
936 if (!vma)
937 return 0;
14a62c34 938
1da177e4
LT
939 return (addr >= vma->vm_start) && (addr < vma->vm_end);
940}
941
14a62c34
TG
942/*
943 * Use this when you have no reliable task/vma, typically from interrupt
944 * context. It is less reliable than using the task's vma and may give
945 * false positives:
1da177e4
LT
946 */
947int in_gate_area_no_task(unsigned long addr)
948{
1e014410 949 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 950}
2e1c49db 951
2aae950b
AK
952const char *arch_vma_name(struct vm_area_struct *vma)
953{
954 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
955 return "[vdso]";
956 if (vma == &gate_vma)
957 return "[vsyscall]";
958 return NULL;
959}
0889eba5
CL
960
961#ifdef CONFIG_SPARSEMEM_VMEMMAP
962/*
963 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
964 */
c2b91e2e
YL
965static long __meminitdata addr_start, addr_end;
966static void __meminitdata *p_start, *p_end;
967static int __meminitdata node_start;
968
14a62c34
TG
969int __meminit
970vmemmap_populate(struct page *start_page, unsigned long size, int node)
0889eba5
CL
971{
972 unsigned long addr = (unsigned long)start_page;
973 unsigned long end = (unsigned long)(start_page + size);
974 unsigned long next;
975 pgd_t *pgd;
976 pud_t *pud;
977 pmd_t *pmd;
978
979 for (; addr < end; addr = next) {
7c934d39 980 void *p = NULL;
0889eba5
CL
981
982 pgd = vmemmap_pgd_populate(addr, node);
983 if (!pgd)
984 return -ENOMEM;
14a62c34 985
0889eba5
CL
986 pud = vmemmap_pud_populate(pgd, addr, node);
987 if (!pud)
988 return -ENOMEM;
989
7c934d39
JF
990 if (!cpu_has_pse) {
991 next = (addr + PAGE_SIZE) & PAGE_MASK;
992 pmd = vmemmap_pmd_populate(pud, addr, node);
993
994 if (!pmd)
995 return -ENOMEM;
996
997 p = vmemmap_pte_populate(pmd, addr, node);
14a62c34 998
0889eba5
CL
999 if (!p)
1000 return -ENOMEM;
1001
7c934d39
JF
1002 addr_end = addr + PAGE_SIZE;
1003 p_end = p + PAGE_SIZE;
14a62c34 1004 } else {
7c934d39
JF
1005 next = pmd_addr_end(addr, end);
1006
1007 pmd = pmd_offset(pud, addr);
1008 if (pmd_none(*pmd)) {
1009 pte_t entry;
1010
9bdac914 1011 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
7c934d39
JF
1012 if (!p)
1013 return -ENOMEM;
1014
1015 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1016 PAGE_KERNEL_LARGE);
1017 set_pmd(pmd, __pmd(pte_val(entry)));
1018
7c934d39
JF
1019 /* check to see if we have contiguous blocks */
1020 if (p_end != p || node_start != node) {
1021 if (p_start)
1022 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1023 addr_start, addr_end-1, p_start, p_end-1, node_start);
1024 addr_start = addr;
1025 node_start = node;
1026 p_start = p;
1027 }
49c980df
YL
1028
1029 addr_end = addr + PMD_SIZE;
1030 p_end = p + PMD_SIZE;
7c934d39
JF
1031 } else
1032 vmemmap_verify((pte_t *)pmd, node, addr, next);
14a62c34 1033 }
7c934d39 1034
0889eba5 1035 }
0889eba5
CL
1036 return 0;
1037}
c2b91e2e
YL
1038
1039void __meminit vmemmap_populate_print_last(void)
1040{
1041 if (p_start) {
1042 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1043 addr_start, addr_end-1, p_start, p_end-1, node_start);
1044 p_start = NULL;
1045 p_end = NULL;
1046 node_start = 0;
1047 }
1048}
0889eba5 1049#endif
This page took 0.569414 seconds and 5 git commands to generate.