[PATCH] x86_64: Implement early DMI scanning
[deliverable/linux.git] / arch / x86_64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
59170891 25#include <linux/pci.h>
17a941d8 26#include <linux/dma-mapping.h>
44df75e6
MT
27#include <linux/module.h>
28#include <linux/memory_hotplug.h>
1da177e4
LT
29
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
2bc0414e 43#include <asm/sections.h>
17a941d8
MBY
44#include <asm/dma-mapping.h>
45#include <asm/swiotlb.h>
1da177e4
LT
46
47#ifndef Dprintk
48#define Dprintk(x...)
49#endif
50
17a941d8
MBY
51struct dma_mapping_ops* dma_ops;
52EXPORT_SYMBOL(dma_ops);
53
e18c6874
AK
54static unsigned long dma_reserve __initdata;
55
1da177e4
LT
56DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57
58/*
59 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
60 * physical space so we can cache the place of the first one and move
61 * around without checking the pgd every time.
62 */
63
64void show_mem(void)
65{
e92343cc
AK
66 long i, total = 0, reserved = 0;
67 long shared = 0, cached = 0;
1da177e4
LT
68 pg_data_t *pgdat;
69 struct page *page;
70
e92343cc 71 printk(KERN_INFO "Mem-info:\n");
1da177e4 72 show_free_areas();
e92343cc 73 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
1da177e4
LT
74
75 for_each_pgdat(pgdat) {
76 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
77 page = pfn_to_page(pgdat->node_start_pfn + i);
78 total++;
e92343cc
AK
79 if (PageReserved(page))
80 reserved++;
81 else if (PageSwapCache(page))
82 cached++;
83 else if (page_count(page))
84 shared += page_count(page) - 1;
1da177e4
LT
85 }
86 }
e92343cc
AK
87 printk(KERN_INFO "%lu pages of RAM\n", total);
88 printk(KERN_INFO "%lu reserved pages\n",reserved);
89 printk(KERN_INFO "%lu pages shared\n",shared);
90 printk(KERN_INFO "%lu pages swap cached\n",cached);
1da177e4
LT
91}
92
93/* References to section boundaries */
94
1da177e4
LT
95int after_bootmem;
96
97static void *spp_getpage(void)
98{
99 void *ptr;
100 if (after_bootmem)
101 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
102 else
103 ptr = alloc_bootmem_pages(PAGE_SIZE);
104 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
105 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
106
107 Dprintk("spp_getpage %p\n", ptr);
108 return ptr;
109}
110
111static void set_pte_phys(unsigned long vaddr,
112 unsigned long phys, pgprot_t prot)
113{
114 pgd_t *pgd;
115 pud_t *pud;
116 pmd_t *pmd;
117 pte_t *pte, new_pte;
118
119 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
120
121 pgd = pgd_offset_k(vaddr);
122 if (pgd_none(*pgd)) {
123 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
124 return;
125 }
126 pud = pud_offset(pgd, vaddr);
127 if (pud_none(*pud)) {
128 pmd = (pmd_t *) spp_getpage();
129 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
130 if (pmd != pmd_offset(pud, 0)) {
131 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
132 return;
133 }
134 }
135 pmd = pmd_offset(pud, vaddr);
136 if (pmd_none(*pmd)) {
137 pte = (pte_t *) spp_getpage();
138 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
139 if (pte != pte_offset_kernel(pmd, 0)) {
140 printk("PAGETABLE BUG #02!\n");
141 return;
142 }
143 }
144 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
145
146 pte = pte_offset_kernel(pmd, vaddr);
147 if (!pte_none(*pte) &&
148 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
149 pte_ERROR(*pte);
150 set_pte(pte, new_pte);
151
152 /*
153 * It's enough to flush this one mapping.
154 * (PGE mappings get flushed as well)
155 */
156 __flush_tlb_one(vaddr);
157}
158
159/* NOTE: this is meant to be run only at boot */
160void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
161{
162 unsigned long address = __fix_to_virt(idx);
163
164 if (idx >= __end_of_fixed_addresses) {
165 printk("Invalid __set_fixmap\n");
166 return;
167 }
168 set_pte_phys(address, phys, prot);
169}
170
171unsigned long __initdata table_start, table_end;
172
173extern pmd_t temp_boot_pmds[];
174
175static struct temp_map {
176 pmd_t *pmd;
177 void *address;
178 int allocated;
179} temp_mappings[] __initdata = {
180 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
181 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 {}
183};
184
44df75e6 185static __meminit void *alloc_low_page(int *index, unsigned long *phys)
1da177e4
LT
186{
187 struct temp_map *ti;
188 int i;
189 unsigned long pfn = table_end++, paddr;
190 void *adr;
191
44df75e6
MT
192 if (after_bootmem) {
193 adr = (void *)get_zeroed_page(GFP_ATOMIC);
194 *phys = __pa(adr);
195 return adr;
196 }
197
1da177e4
LT
198 if (pfn >= end_pfn)
199 panic("alloc_low_page: ran out of memory");
200 for (i = 0; temp_mappings[i].allocated; i++) {
201 if (!temp_mappings[i].pmd)
202 panic("alloc_low_page: ran out of temp mappings");
203 }
204 ti = &temp_mappings[i];
205 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
206 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
207 ti->allocated = 1;
208 __flush_tlb();
209 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
44df75e6 210 memset(adr, 0, PAGE_SIZE);
1da177e4
LT
211 *index = i;
212 *phys = pfn * PAGE_SIZE;
213 return adr;
214}
215
44df75e6 216static __meminit void unmap_low_page(int i)
1da177e4 217{
44df75e6
MT
218 struct temp_map *ti;
219
220 if (after_bootmem)
221 return;
222
223 ti = &temp_mappings[i];
1da177e4
LT
224 set_pmd(ti->pmd, __pmd(0));
225 ti->allocated = 0;
226}
227
f2d3efed
AK
228/* Must run before zap_low_mappings */
229__init void *early_ioremap(unsigned long addr, unsigned long size)
230{
231 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
232
233 /* actually usually some more */
234 if (size >= LARGE_PAGE_SIZE) {
235 printk("SMBIOS area too long %lu\n", size);
236 return NULL;
237 }
238 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
239 map += LARGE_PAGE_SIZE;
240 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
241 __flush_tlb();
242 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
243}
244
245/* To avoid virtual aliases later */
246__init void early_iounmap(void *addr, unsigned long size)
247{
248 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
249 printk("early_iounmap: bad address %p\n", addr);
250 set_pmd(temp_mappings[0].pmd, __pmd(0));
251 set_pmd(temp_mappings[1].pmd, __pmd(0));
252 __flush_tlb();
253}
254
44df75e6
MT
255static void __meminit
256phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
257{
258 int i;
259
260 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
261 unsigned long entry;
262
263 if (address > end) {
264 for (; i < PTRS_PER_PMD; i++, pmd++)
265 set_pmd(pmd, __pmd(0));
266 break;
267 }
268 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
269 entry &= __supported_pte_mask;
270 set_pmd(pmd, __pmd(entry));
271 }
272}
273
274static void __meminit
275phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
276{
277 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
278
279 if (pmd_none(*pmd)) {
280 spin_lock(&init_mm.page_table_lock);
281 phys_pmd_init(pmd, address, end);
282 spin_unlock(&init_mm.page_table_lock);
283 __flush_tlb_all();
284 }
285}
286
287static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
1da177e4 288{
44df75e6 289 long i = pud_index(address);
1da177e4 290
1da177e4 291 pud = pud + i;
44df75e6
MT
292
293 if (after_bootmem && pud_val(*pud)) {
294 phys_pmd_update(pud, address, end);
295 return;
296 }
297
1da177e4
LT
298 for (; i < PTRS_PER_PUD; pud++, i++) {
299 int map;
300 unsigned long paddr, pmd_phys;
301 pmd_t *pmd;
302
44df75e6
MT
303 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
304 if (paddr >= end)
1da177e4 305 break;
1da177e4 306
44df75e6 307 if (!after_bootmem && !e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
1da177e4
LT
308 set_pud(pud, __pud(0));
309 continue;
310 }
311
312 pmd = alloc_low_page(&map, &pmd_phys);
44df75e6 313 spin_lock(&init_mm.page_table_lock);
1da177e4 314 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
44df75e6
MT
315 phys_pmd_init(pmd, paddr, end);
316 spin_unlock(&init_mm.page_table_lock);
1da177e4
LT
317 unmap_low_page(map);
318 }
319 __flush_tlb();
320}
321
322static void __init find_early_table_space(unsigned long end)
323{
6c5acd16 324 unsigned long puds, pmds, tables, start;
1da177e4
LT
325
326 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
327 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
328 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
329 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
330
ee408c79
AK
331 /* RED-PEN putting page tables only on node 0 could
332 cause a hotspot and fill up ZONE_DMA. The page tables
333 need roughly 0.5KB per GB. */
334 start = 0x8000;
335 table_start = find_e820_area(start, end, tables);
1da177e4
LT
336 if (table_start == -1UL)
337 panic("Cannot find space for the kernel page tables");
338
339 table_start >>= PAGE_SHIFT;
340 table_end = table_start;
44df75e6
MT
341
342 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
343 end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
1da177e4
LT
344}
345
346/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
347 This runs before bootmem is initialized and gets pages directly from the
348 physical memory. To access them they are temporarily mapped. */
44df75e6 349void __meminit init_memory_mapping(unsigned long start, unsigned long end)
1da177e4
LT
350{
351 unsigned long next;
352
353 Dprintk("init_memory_mapping\n");
354
355 /*
356 * Find space for the kernel direct mapping tables.
357 * Later we should allocate these tables in the local node of the memory
358 * mapped. Unfortunately this is done currently before the nodes are
359 * discovered.
360 */
44df75e6
MT
361 if (!after_bootmem)
362 find_early_table_space(end);
1da177e4
LT
363
364 start = (unsigned long)__va(start);
365 end = (unsigned long)__va(end);
366
367 for (; start < end; start = next) {
368 int map;
369 unsigned long pud_phys;
44df75e6
MT
370 pgd_t *pgd = pgd_offset_k(start);
371 pud_t *pud;
372
373 if (after_bootmem)
c7ea1a96 374 pud = pud_offset_k(pgd, start & PGDIR_MASK);
44df75e6
MT
375 else
376 pud = alloc_low_page(&map, &pud_phys);
377
1da177e4
LT
378 next = start + PGDIR_SIZE;
379 if (next > end)
380 next = end;
381 phys_pud_init(pud, __pa(start), __pa(next));
44df75e6
MT
382 if (!after_bootmem)
383 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
1da177e4
LT
384 unmap_low_page(map);
385 }
386
44df75e6
MT
387 if (!after_bootmem)
388 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
1da177e4 389 __flush_tlb_all();
1da177e4
LT
390}
391
f6c2e333 392void __cpuinit zap_low_mappings(int cpu)
1da177e4 393{
f6c2e333
SS
394 if (cpu == 0) {
395 pgd_t *pgd = pgd_offset_k(0UL);
396 pgd_clear(pgd);
397 } else {
398 /*
399 * For AP's, zap the low identity mappings by changing the cr3
400 * to init_level4_pgt and doing local flush tlb all
401 */
402 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
403 }
404 __flush_tlb_all();
1da177e4
LT
405}
406
a2f1b424
AK
407/* Compute zone sizes for the DMA and DMA32 zones in a node. */
408__init void
409size_zones(unsigned long *z, unsigned long *h,
410 unsigned long start_pfn, unsigned long end_pfn)
411{
412 int i;
413 unsigned long w;
414
415 for (i = 0; i < MAX_NR_ZONES; i++)
416 z[i] = 0;
417
418 if (start_pfn < MAX_DMA_PFN)
419 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
420 if (start_pfn < MAX_DMA32_PFN) {
421 unsigned long dma32_pfn = MAX_DMA32_PFN;
422 if (dma32_pfn > end_pfn)
423 dma32_pfn = end_pfn;
424 z[ZONE_DMA32] = dma32_pfn - start_pfn;
425 }
426 z[ZONE_NORMAL] = end_pfn - start_pfn;
427
428 /* Remove lower zones from higher ones. */
429 w = 0;
430 for (i = 0; i < MAX_NR_ZONES; i++) {
431 if (z[i])
432 z[i] -= w;
433 w += z[i];
434 }
435
436 /* Compute holes */
576fc097 437 w = start_pfn;
a2f1b424
AK
438 for (i = 0; i < MAX_NR_ZONES; i++) {
439 unsigned long s = w;
440 w += z[i];
441 h[i] = e820_hole_size(s, w);
442 }
e18c6874
AK
443
444 /* Add the space pace needed for mem_map to the holes too. */
445 for (i = 0; i < MAX_NR_ZONES; i++)
446 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
447
448 /* The 16MB DMA zone has the kernel and other misc mappings.
449 Account them too */
450 if (h[ZONE_DMA]) {
451 h[ZONE_DMA] += dma_reserve;
452 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
453 printk(KERN_WARNING
454 "Kernel too large and filling up ZONE_DMA?\n");
455 h[ZONE_DMA] = z[ZONE_DMA];
456 }
457 }
a2f1b424
AK
458}
459
2b97690f 460#ifndef CONFIG_NUMA
1da177e4
LT
461void __init paging_init(void)
462{
a2f1b424 463 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
44df75e6
MT
464
465 memory_present(0, 0, end_pfn);
466 sparse_init();
a2f1b424
AK
467 size_zones(zones, holes, 0, end_pfn);
468 free_area_init_node(0, NODE_DATA(0), zones,
469 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
1da177e4
LT
470}
471#endif
472
473/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
474 from the CPU leading to inconsistent cache lines. address and size
475 must be aligned to 2MB boundaries.
476 Does nothing when the mapping doesn't exist. */
477void __init clear_kernel_mapping(unsigned long address, unsigned long size)
478{
479 unsigned long end = address + size;
480
481 BUG_ON(address & ~LARGE_PAGE_MASK);
482 BUG_ON(size & ~LARGE_PAGE_MASK);
483
484 for (; address < end; address += LARGE_PAGE_SIZE) {
485 pgd_t *pgd = pgd_offset_k(address);
486 pud_t *pud;
487 pmd_t *pmd;
488 if (pgd_none(*pgd))
489 continue;
490 pud = pud_offset(pgd, address);
491 if (pud_none(*pud))
492 continue;
493 pmd = pmd_offset(pud, address);
494 if (!pmd || pmd_none(*pmd))
495 continue;
496 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
497 /* Could handle this, but it should not happen currently. */
498 printk(KERN_ERR
499 "clear_kernel_mapping: mapping has been split. will leak memory\n");
500 pmd_ERROR(*pmd);
501 }
502 set_pmd(pmd, __pmd(0));
503 }
504 __flush_tlb_all();
505}
506
44df75e6
MT
507/*
508 * Memory hotplug specific functions
509 * These are only for non-NUMA machines right now.
510 */
511#ifdef CONFIG_MEMORY_HOTPLUG
512
513void online_page(struct page *page)
514{
515 ClearPageReserved(page);
7835e98b 516 init_page_count(page);
44df75e6
MT
517 __free_page(page);
518 totalram_pages++;
519 num_physpages++;
520}
521
522int add_memory(u64 start, u64 size)
523{
524 struct pglist_data *pgdat = NODE_DATA(0);
525 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
526 unsigned long start_pfn = start >> PAGE_SHIFT;
527 unsigned long nr_pages = size >> PAGE_SHIFT;
528 int ret;
529
530 ret = __add_pages(zone, start_pfn, nr_pages);
531 if (ret)
532 goto error;
533
534 init_memory_mapping(start, (start + size -1));
535
536 return ret;
537error:
538 printk("%s: Problem encountered in __add_pages!\n", __func__);
539 return ret;
540}
541EXPORT_SYMBOL_GPL(add_memory);
542
543int remove_memory(u64 start, u64 size)
544{
545 return -EINVAL;
546}
547EXPORT_SYMBOL_GPL(remove_memory);
548
549#endif
550
1da177e4
LT
551static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
552 kcore_vsyscall;
553
554void __init mem_init(void)
555{
0a43e4bf 556 long codesize, reservedpages, datasize, initsize;
1da177e4
LT
557
558#ifdef CONFIG_SWIOTLB
17a941d8 559 pci_swiotlb_init();
1da177e4 560#endif
17a941d8 561 no_iommu_init();
1da177e4
LT
562
563 /* How many end-of-memory variables you have, grandma! */
564 max_low_pfn = end_pfn;
565 max_pfn = end_pfn;
566 num_physpages = end_pfn;
567 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
568
569 /* clear the zero-page */
570 memset(empty_zero_page, 0, PAGE_SIZE);
571
572 reservedpages = 0;
573
574 /* this will put all low memory onto the freelists */
2b97690f 575#ifdef CONFIG_NUMA
0a43e4bf 576 totalram_pages = numa_free_all_bootmem();
1da177e4 577#else
0a43e4bf 578 totalram_pages = free_all_bootmem();
1da177e4 579#endif
0a43e4bf 580 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
1da177e4
LT
581
582 after_bootmem = 1;
583
584 codesize = (unsigned long) &_etext - (unsigned long) &_text;
585 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
586 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
587
588 /* Register memory areas for /proc/kcore */
589 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
590 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
591 VMALLOC_END-VMALLOC_START);
592 kclist_add(&kcore_kernel, &_stext, _end - _stext);
593 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
594 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
595 VSYSCALL_END - VSYSCALL_START);
596
0a43e4bf 597 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
1da177e4
LT
598 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
599 end_pfn << (PAGE_SHIFT-10),
600 codesize >> 10,
601 reservedpages << (PAGE_SHIFT-10),
602 datasize >> 10,
603 initsize >> 10);
604
f6c2e333 605#ifdef CONFIG_SMP
1da177e4 606 /*
f6c2e333
SS
607 * Sync boot_level4_pgt mappings with the init_level4_pgt
608 * except for the low identity mappings which are already zapped
609 * in init_level4_pgt. This sync-up is essential for AP's bringup
1da177e4 610 */
f6c2e333 611 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
1da177e4
LT
612#endif
613}
614
1da177e4
LT
615void free_initmem(void)
616{
617 unsigned long addr;
618
619 addr = (unsigned long)(&__init_begin);
620 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
621 ClearPageReserved(virt_to_page(addr));
7835e98b 622 init_page_count(virt_to_page(addr));
1da177e4
LT
623 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
624 free_page(addr);
625 totalram_pages++;
626 }
627 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
2bc0414e 628 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
1da177e4
LT
629}
630
67df197b
AV
631#ifdef CONFIG_DEBUG_RODATA
632
633extern char __start_rodata, __end_rodata;
634void mark_rodata_ro(void)
635{
636 unsigned long addr = (unsigned long)&__start_rodata;
637
638 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
639 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
640
641 printk ("Write protecting the kernel read-only data: %luk\n",
642 (&__end_rodata - &__start_rodata) >> 10);
643
644 /*
645 * change_page_attr_addr() requires a global_flush_tlb() call after it.
646 * We do this after the printk so that if something went wrong in the
647 * change, the printk gets out at least to give a better debug hint
648 * of who is the culprit.
649 */
650 global_flush_tlb();
651}
652#endif
653
1da177e4
LT
654#ifdef CONFIG_BLK_DEV_INITRD
655void free_initrd_mem(unsigned long start, unsigned long end)
656{
f74e6670 657 if (start >= end)
1da177e4
LT
658 return;
659 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
660 for (; start < end; start += PAGE_SIZE) {
661 ClearPageReserved(virt_to_page(start));
7835e98b 662 init_page_count(virt_to_page(start));
1da177e4
LT
663 free_page(start);
664 totalram_pages++;
665 }
666}
667#endif
668
669void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
670{
671 /* Should check here against the e820 map to avoid double free */
2b97690f 672#ifdef CONFIG_NUMA
1da177e4
LT
673 int nid = phys_to_nid(phys);
674 reserve_bootmem_node(NODE_DATA(nid), phys, len);
675#else
676 reserve_bootmem(phys, len);
677#endif
e18c6874
AK
678 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
679 dma_reserve += len / PAGE_SIZE;
1da177e4
LT
680}
681
682int kern_addr_valid(unsigned long addr)
683{
684 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
685 pgd_t *pgd;
686 pud_t *pud;
687 pmd_t *pmd;
688 pte_t *pte;
689
690 if (above != 0 && above != -1UL)
691 return 0;
692
693 pgd = pgd_offset_k(addr);
694 if (pgd_none(*pgd))
695 return 0;
696
697 pud = pud_offset(pgd, addr);
698 if (pud_none(*pud))
699 return 0;
700
701 pmd = pmd_offset(pud, addr);
702 if (pmd_none(*pmd))
703 return 0;
704 if (pmd_large(*pmd))
705 return pfn_valid(pmd_pfn(*pmd));
706
707 pte = pte_offset_kernel(pmd, addr);
708 if (pte_none(*pte))
709 return 0;
710 return pfn_valid(pte_pfn(*pte));
711}
712
713#ifdef CONFIG_SYSCTL
714#include <linux/sysctl.h>
715
716extern int exception_trace, page_fault_trace;
717
718static ctl_table debug_table2[] = {
719 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
720 proc_dointvec },
1da177e4
LT
721 { 0, }
722};
723
724static ctl_table debug_root_table2[] = {
725 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
726 .child = debug_table2 },
727 { 0 },
728};
729
730static __init int x8664_sysctl_init(void)
731{
732 register_sysctl_table(debug_root_table2, 1);
733 return 0;
734}
735__initcall(x8664_sysctl_init);
736#endif
737
1e014410
AK
738/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
739 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
740 not need special handling anymore. */
1da177e4
LT
741
742static struct vm_area_struct gate_vma = {
743 .vm_start = VSYSCALL_START,
744 .vm_end = VSYSCALL_END,
745 .vm_page_prot = PAGE_READONLY
746};
747
1da177e4
LT
748struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
749{
750#ifdef CONFIG_IA32_EMULATION
1e014410
AK
751 if (test_tsk_thread_flag(tsk, TIF_IA32))
752 return NULL;
1da177e4
LT
753#endif
754 return &gate_vma;
755}
756
757int in_gate_area(struct task_struct *task, unsigned long addr)
758{
759 struct vm_area_struct *vma = get_gate_vma(task);
1e014410
AK
760 if (!vma)
761 return 0;
1da177e4
LT
762 return (addr >= vma->vm_start) && (addr < vma->vm_end);
763}
764
765/* Use this when you have no reliable task/vma, typically from interrupt
766 * context. It is less reliable than using the task's vma and may give
767 * false positives.
768 */
769int in_gate_area_no_task(unsigned long addr)
770{
1e014410 771 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 772}
This page took 0.138203 seconds and 5 git commands to generate.