Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1995 Linus Torvalds | |
4 | * | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/signal.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/hugetlb.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/pagemap.h> | |
cfb80c9e | 24 | #include <linux/pci.h> |
6fb14755 | 25 | #include <linux/pfn.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
1da177e4 LT |
27 | #include <linux/bootmem.h> |
28 | #include <linux/slab.h> | |
29 | #include <linux/proc_fs.h> | |
05039b92 | 30 | #include <linux/memory_hotplug.h> |
27d99f7e | 31 | #include <linux/initrd.h> |
55b2355e | 32 | #include <linux/cpumask.h> |
1da177e4 | 33 | |
f832ff18 | 34 | #include <asm/asm.h> |
46eaa670 | 35 | #include <asm/bios_ebda.h> |
1da177e4 LT |
36 | #include <asm/processor.h> |
37 | #include <asm/system.h> | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/dma.h> | |
41 | #include <asm/fixmap.h> | |
42 | #include <asm/e820.h> | |
43 | #include <asm/apic.h> | |
8550eb99 | 44 | #include <asm/bugs.h> |
1da177e4 LT |
45 | #include <asm/tlb.h> |
46 | #include <asm/tlbflush.h> | |
a5a19c63 | 47 | #include <asm/pgalloc.h> |
1da177e4 | 48 | #include <asm/sections.h> |
b239fb25 | 49 | #include <asm/paravirt.h> |
551889a6 | 50 | #include <asm/setup.h> |
7bfeab9a | 51 | #include <asm/cacheflush.h> |
4fcb2083 | 52 | #include <asm/init.h> |
1da177e4 | 53 | |
f361a450 | 54 | unsigned long max_low_pfn_mapped; |
67794292 | 55 | unsigned long max_pfn_mapped; |
7d1116a9 | 56 | |
1da177e4 LT |
57 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
58 | unsigned long highstart_pfn, highend_pfn; | |
59 | ||
8550eb99 | 60 | static noinline int do_test_wp_bit(void); |
1da177e4 | 61 | |
dc16ecf7 | 62 | bool __read_mostly __vmalloc_start_set = false; |
4e29684c | 63 | |
d6be89ad | 64 | static __init void *alloc_low_page(void) |
4e29684c | 65 | { |
298af9d8 | 66 | unsigned long pfn = e820_table_end++; |
4e29684c YL |
67 | void *adr; |
68 | ||
298af9d8 | 69 | if (pfn >= e820_table_top) |
4e29684c YL |
70 | panic("alloc_low_page: ran out of memory"); |
71 | ||
72 | adr = __va(pfn * PAGE_SIZE); | |
73 | memset(adr, 0, PAGE_SIZE); | |
4e29684c YL |
74 | return adr; |
75 | } | |
76 | ||
1da177e4 LT |
77 | /* |
78 | * Creates a middle page table and puts a pointer to it in the | |
79 | * given global directory entry. This only returns the gd entry | |
80 | * in non-PAE compilation mode, since the middle layer is folded. | |
81 | */ | |
82 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | |
83 | { | |
84 | pud_t *pud; | |
85 | pmd_t *pmd_table; | |
8550eb99 | 86 | |
1da177e4 | 87 | #ifdef CONFIG_X86_PAE |
b239fb25 | 88 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
c464573c | 89 | if (after_bootmem) |
4e29684c YL |
90 | pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
91 | else | |
d6be89ad | 92 | pmd_table = (pmd_t *)alloc_low_page(); |
6944a9c8 | 93 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); |
b239fb25 JF |
94 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
95 | pud = pud_offset(pgd, 0); | |
8550eb99 | 96 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
a376f30a Z |
97 | |
98 | return pmd_table; | |
b239fb25 JF |
99 | } |
100 | #endif | |
1da177e4 LT |
101 | pud = pud_offset(pgd, 0); |
102 | pmd_table = pmd_offset(pud, 0); | |
8550eb99 | 103 | |
1da177e4 LT |
104 | return pmd_table; |
105 | } | |
106 | ||
107 | /* | |
108 | * Create a page table and place a pointer to it in a middle page | |
8550eb99 | 109 | * directory entry: |
1da177e4 LT |
110 | */ |
111 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
112 | { | |
b239fb25 | 113 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
509a80c4 IM |
114 | pte_t *page_table = NULL; |
115 | ||
c464573c | 116 | if (after_bootmem) { |
509a80c4 | 117 | #ifdef CONFIG_DEBUG_PAGEALLOC |
4e29684c | 118 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); |
509a80c4 | 119 | #endif |
4e29684c YL |
120 | if (!page_table) |
121 | page_table = | |
509a80c4 | 122 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
d6be89ad JB |
123 | } else |
124 | page_table = (pte_t *)alloc_low_page(); | |
b239fb25 | 125 | |
6944a9c8 | 126 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
1da177e4 | 127 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
b239fb25 | 128 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
1da177e4 | 129 | } |
509a80c4 | 130 | |
1da177e4 LT |
131 | return pte_offset_kernel(pmd, 0); |
132 | } | |
133 | ||
458a3e64 | 134 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
135 | { |
136 | int pgd_idx = pgd_index(vaddr); | |
137 | int pmd_idx = pmd_index(vaddr); | |
458a3e64 TH |
138 | |
139 | return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; | |
140 | } | |
141 | ||
142 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
143 | { | |
144 | int pte_idx = pte_index(vaddr); | |
11124411 TH |
145 | pmd_t *pmd; |
146 | ||
458a3e64 TH |
147 | pmd = populate_extra_pmd(vaddr); |
148 | return one_page_table_init(pmd) + pte_idx; | |
11124411 TH |
149 | } |
150 | ||
a3c6018e JB |
151 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, |
152 | unsigned long vaddr, pte_t *lastpte) | |
153 | { | |
154 | #ifdef CONFIG_HIGHMEM | |
155 | /* | |
156 | * Something (early fixmap) may already have put a pte | |
157 | * page here, which causes the page table allocation | |
158 | * to become nonlinear. Attempt to fix it, and if it | |
159 | * is still nonlinear then we have to bug. | |
160 | */ | |
161 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | |
162 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | |
163 | ||
164 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | |
165 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | |
166 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | |
298af9d8 PE |
167 | && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start |
168 | || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) { | |
a3c6018e JB |
169 | pte_t *newpte; |
170 | int i; | |
171 | ||
c464573c | 172 | BUG_ON(after_bootmem); |
a3c6018e JB |
173 | newpte = alloc_low_page(); |
174 | for (i = 0; i < PTRS_PER_PTE; i++) | |
175 | set_pte(newpte + i, pte[i]); | |
176 | ||
177 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | |
178 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | |
179 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | |
180 | __flush_tlb_all(); | |
181 | ||
182 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | |
183 | pte = newpte; | |
184 | } | |
185 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | |
186 | && vaddr > fix_to_virt(FIX_KMAP_END) | |
187 | && lastpte && lastpte + PTRS_PER_PTE != pte); | |
188 | #endif | |
189 | return pte; | |
190 | } | |
191 | ||
1da177e4 | 192 | /* |
8550eb99 | 193 | * This function initializes a certain range of kernel virtual memory |
1da177e4 LT |
194 | * with new bootmem page tables, everywhere page tables are missing in |
195 | * the given range. | |
8550eb99 IM |
196 | * |
197 | * NOTE: The pagetables are allocated contiguous on the physical space | |
198 | * so we can cache the place of the first one and move around without | |
1da177e4 LT |
199 | * checking the pgd every time. |
200 | */ | |
8550eb99 IM |
201 | static void __init |
202 | page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |
1da177e4 | 203 | { |
1da177e4 LT |
204 | int pgd_idx, pmd_idx; |
205 | unsigned long vaddr; | |
8550eb99 IM |
206 | pgd_t *pgd; |
207 | pmd_t *pmd; | |
a3c6018e | 208 | pte_t *pte = NULL; |
1da177e4 LT |
209 | |
210 | vaddr = start; | |
211 | pgd_idx = pgd_index(vaddr); | |
212 | pmd_idx = pmd_index(vaddr); | |
213 | pgd = pgd_base + pgd_idx; | |
214 | ||
215 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | |
b239fb25 JF |
216 | pmd = one_md_table_init(pgd); |
217 | pmd = pmd + pmd_index(vaddr); | |
8550eb99 IM |
218 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
219 | pmd++, pmd_idx++) { | |
a3c6018e JB |
220 | pte = page_table_kmap_check(one_page_table_init(pmd), |
221 | pmd, vaddr, pte); | |
1da177e4 LT |
222 | |
223 | vaddr += PMD_SIZE; | |
224 | } | |
225 | pmd_idx = 0; | |
226 | } | |
227 | } | |
228 | ||
229 | static inline int is_kernel_text(unsigned long addr) | |
230 | { | |
231 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | |
232 | return 1; | |
233 | return 0; | |
234 | } | |
235 | ||
236 | /* | |
8550eb99 IM |
237 | * This maps the physical memory to kernel virtual address space, a total |
238 | * of max_low_pfn pages, by creating page tables starting from address | |
239 | * PAGE_OFFSET: | |
1da177e4 | 240 | */ |
e53fb04f PE |
241 | unsigned long __init |
242 | kernel_physical_mapping_init(unsigned long start, | |
243 | unsigned long end, | |
244 | unsigned long page_size_mask) | |
1da177e4 | 245 | { |
e53fb04f PE |
246 | int use_pse = page_size_mask == (1<<PG_LEVEL_2M); |
247 | unsigned long start_pfn, end_pfn; | |
e7179853 | 248 | pgd_t *pgd_base = swapper_pg_dir; |
8550eb99 | 249 | int pgd_idx, pmd_idx, pte_ofs; |
1da177e4 LT |
250 | unsigned long pfn; |
251 | pgd_t *pgd; | |
252 | pmd_t *pmd; | |
253 | pte_t *pte; | |
a2699e47 SS |
254 | unsigned pages_2m, pages_4k; |
255 | int mapping_iter; | |
256 | ||
e53fb04f PE |
257 | start_pfn = start >> PAGE_SHIFT; |
258 | end_pfn = end >> PAGE_SHIFT; | |
259 | ||
a2699e47 SS |
260 | /* |
261 | * First iteration will setup identity mapping using large/small pages | |
262 | * based on use_pse, with other attributes same as set by | |
263 | * the early code in head_32.S | |
264 | * | |
265 | * Second iteration will setup the appropriate attributes (NX, GLOBAL..) | |
266 | * as desired for the kernel identity mapping. | |
267 | * | |
268 | * This two pass mechanism conforms to the TLB app note which says: | |
269 | * | |
270 | * "Software should not write to a paging-structure entry in a way | |
271 | * that would change, for any linear address, both the page size | |
272 | * and either the page frame or attributes." | |
273 | */ | |
274 | mapping_iter = 1; | |
1da177e4 | 275 | |
a04ad82d YL |
276 | if (!cpu_has_pse) |
277 | use_pse = 0; | |
1da177e4 | 278 | |
a2699e47 SS |
279 | repeat: |
280 | pages_2m = pages_4k = 0; | |
a04ad82d YL |
281 | pfn = start_pfn; |
282 | pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
283 | pgd = pgd_base + pgd_idx; | |
1da177e4 LT |
284 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { |
285 | pmd = one_md_table_init(pgd); | |
8550eb99 | 286 | |
a04ad82d YL |
287 | if (pfn >= end_pfn) |
288 | continue; | |
289 | #ifdef CONFIG_X86_PAE | |
290 | pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
291 | pmd += pmd_idx; | |
292 | #else | |
293 | pmd_idx = 0; | |
294 | #endif | |
295 | for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; | |
f3f20de8 | 296 | pmd++, pmd_idx++) { |
8550eb99 | 297 | unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; |
1da177e4 | 298 | |
8550eb99 IM |
299 | /* |
300 | * Map with big pages if possible, otherwise | |
301 | * create normal page tables: | |
302 | */ | |
a04ad82d | 303 | if (use_pse) { |
8550eb99 | 304 | unsigned int addr2; |
f3f20de8 | 305 | pgprot_t prot = PAGE_KERNEL_LARGE; |
a2699e47 SS |
306 | /* |
307 | * first pass will use the same initial | |
308 | * identity mapping attribute + _PAGE_PSE. | |
309 | */ | |
310 | pgprot_t init_prot = | |
311 | __pgprot(PTE_IDENT_ATTR | | |
312 | _PAGE_PSE); | |
f3f20de8 | 313 | |
8550eb99 | 314 | addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + |
f3f20de8 JF |
315 | PAGE_OFFSET + PAGE_SIZE-1; |
316 | ||
8550eb99 IM |
317 | if (is_kernel_text(addr) || |
318 | is_kernel_text(addr2)) | |
f3f20de8 JF |
319 | prot = PAGE_KERNEL_LARGE_EXEC; |
320 | ||
ce0c0e50 | 321 | pages_2m++; |
a2699e47 SS |
322 | if (mapping_iter == 1) |
323 | set_pmd(pmd, pfn_pmd(pfn, init_prot)); | |
324 | else | |
325 | set_pmd(pmd, pfn_pmd(pfn, prot)); | |
b239fb25 | 326 | |
1da177e4 | 327 | pfn += PTRS_PER_PTE; |
8550eb99 IM |
328 | continue; |
329 | } | |
330 | pte = one_page_table_init(pmd); | |
1da177e4 | 331 | |
a04ad82d YL |
332 | pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); |
333 | pte += pte_ofs; | |
334 | for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; | |
8550eb99 IM |
335 | pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { |
336 | pgprot_t prot = PAGE_KERNEL; | |
a2699e47 SS |
337 | /* |
338 | * first pass will use the same initial | |
339 | * identity mapping attribute. | |
340 | */ | |
341 | pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); | |
f3f20de8 | 342 | |
8550eb99 IM |
343 | if (is_kernel_text(addr)) |
344 | prot = PAGE_KERNEL_EXEC; | |
f3f20de8 | 345 | |
ce0c0e50 | 346 | pages_4k++; |
a2699e47 SS |
347 | if (mapping_iter == 1) |
348 | set_pte(pte, pfn_pte(pfn, init_prot)); | |
349 | else | |
350 | set_pte(pte, pfn_pte(pfn, prot)); | |
1da177e4 LT |
351 | } |
352 | } | |
353 | } | |
a2699e47 SS |
354 | if (mapping_iter == 1) { |
355 | /* | |
356 | * update direct mapping page count only in the first | |
357 | * iteration. | |
358 | */ | |
359 | update_page_count(PG_LEVEL_2M, pages_2m); | |
360 | update_page_count(PG_LEVEL_4K, pages_4k); | |
361 | ||
362 | /* | |
363 | * local global flush tlb, which will flush the previous | |
364 | * mappings present in both small and large page TLB's. | |
365 | */ | |
366 | __flush_tlb_all(); | |
367 | ||
368 | /* | |
369 | * Second iteration will set the actual desired PTE attributes. | |
370 | */ | |
371 | mapping_iter = 2; | |
372 | goto repeat; | |
373 | } | |
ae531c26 AV |
374 | return 0; |
375 | } | |
376 | ||
1da177e4 LT |
377 | pte_t *kmap_pte; |
378 | pgprot_t kmap_prot; | |
379 | ||
8550eb99 IM |
380 | static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) |
381 | { | |
382 | return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), | |
383 | vaddr), vaddr), vaddr); | |
384 | } | |
1da177e4 LT |
385 | |
386 | static void __init kmap_init(void) | |
387 | { | |
388 | unsigned long kmap_vstart; | |
389 | ||
8550eb99 IM |
390 | /* |
391 | * Cache the first kmap pte: | |
392 | */ | |
1da177e4 LT |
393 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
394 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
395 | ||
396 | kmap_prot = PAGE_KERNEL; | |
397 | } | |
398 | ||
fd940934 | 399 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
400 | static void __init permanent_kmaps_init(pgd_t *pgd_base) |
401 | { | |
8550eb99 | 402 | unsigned long vaddr; |
1da177e4 LT |
403 | pgd_t *pgd; |
404 | pud_t *pud; | |
405 | pmd_t *pmd; | |
406 | pte_t *pte; | |
1da177e4 LT |
407 | |
408 | vaddr = PKMAP_BASE; | |
409 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | |
410 | ||
411 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
412 | pud = pud_offset(pgd, vaddr); | |
413 | pmd = pmd_offset(pud, vaddr); | |
414 | pte = pte_offset_kernel(pmd, vaddr); | |
8550eb99 | 415 | pkmap_page_table = pte; |
1da177e4 LT |
416 | } |
417 | ||
cc9f7a0c | 418 | static void __init add_one_highpage_init(struct page *page, int pfn) |
1da177e4 | 419 | { |
cc9f7a0c YL |
420 | ClearPageReserved(page); |
421 | init_page_count(page); | |
422 | __free_page(page); | |
423 | totalhigh_pages++; | |
1da177e4 LT |
424 | } |
425 | ||
b5bc6c0e YL |
426 | struct add_highpages_data { |
427 | unsigned long start_pfn; | |
428 | unsigned long end_pfn; | |
b5bc6c0e YL |
429 | }; |
430 | ||
d52d53b8 | 431 | static int __init add_highpages_work_fn(unsigned long start_pfn, |
b5bc6c0e | 432 | unsigned long end_pfn, void *datax) |
1da177e4 | 433 | { |
b5bc6c0e YL |
434 | int node_pfn; |
435 | struct page *page; | |
436 | unsigned long final_start_pfn, final_end_pfn; | |
437 | struct add_highpages_data *data; | |
8550eb99 | 438 | |
b5bc6c0e | 439 | data = (struct add_highpages_data *)datax; |
b5bc6c0e YL |
440 | |
441 | final_start_pfn = max(start_pfn, data->start_pfn); | |
442 | final_end_pfn = min(end_pfn, data->end_pfn); | |
443 | if (final_start_pfn >= final_end_pfn) | |
d52d53b8 | 444 | return 0; |
b5bc6c0e YL |
445 | |
446 | for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; | |
447 | node_pfn++) { | |
448 | if (!pfn_valid(node_pfn)) | |
449 | continue; | |
450 | page = pfn_to_page(node_pfn); | |
cc9f7a0c | 451 | add_one_highpage_init(page, node_pfn); |
23be8c7d | 452 | } |
b5bc6c0e | 453 | |
d52d53b8 YL |
454 | return 0; |
455 | ||
b5bc6c0e YL |
456 | } |
457 | ||
458 | void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, | |
cc9f7a0c | 459 | unsigned long end_pfn) |
b5bc6c0e YL |
460 | { |
461 | struct add_highpages_data data; | |
462 | ||
463 | data.start_pfn = start_pfn; | |
464 | data.end_pfn = end_pfn; | |
b5bc6c0e YL |
465 | |
466 | work_with_active_regions(nid, add_highpages_work_fn, &data); | |
467 | } | |
468 | ||
1da177e4 | 469 | #else |
e8e32326 IB |
470 | static inline void permanent_kmaps_init(pgd_t *pgd_base) |
471 | { | |
472 | } | |
1da177e4 LT |
473 | #endif /* CONFIG_HIGHMEM */ |
474 | ||
b239fb25 | 475 | void __init native_pagetable_setup_start(pgd_t *base) |
1da177e4 | 476 | { |
551889a6 IC |
477 | unsigned long pfn, va; |
478 | pgd_t *pgd; | |
479 | pud_t *pud; | |
480 | pmd_t *pmd; | |
481 | pte_t *pte; | |
b239fb25 JF |
482 | |
483 | /* | |
551889a6 IC |
484 | * Remove any mappings which extend past the end of physical |
485 | * memory from the boot time page table: | |
b239fb25 | 486 | */ |
551889a6 IC |
487 | for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { |
488 | va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); | |
489 | pgd = base + pgd_index(va); | |
490 | if (!pgd_present(*pgd)) | |
491 | break; | |
492 | ||
493 | pud = pud_offset(pgd, va); | |
494 | pmd = pmd_offset(pud, va); | |
495 | if (!pmd_present(*pmd)) | |
496 | break; | |
497 | ||
498 | pte = pte_offset_kernel(pmd, va); | |
499 | if (!pte_present(*pte)) | |
500 | break; | |
501 | ||
502 | pte_clear(NULL, va, pte); | |
503 | } | |
6944a9c8 | 504 | paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); |
b239fb25 JF |
505 | } |
506 | ||
507 | void __init native_pagetable_setup_done(pgd_t *base) | |
508 | { | |
b239fb25 JF |
509 | } |
510 | ||
511 | /* | |
512 | * Build a proper pagetable for the kernel mappings. Up until this | |
513 | * point, we've been running on some set of pagetables constructed by | |
514 | * the boot process. | |
515 | * | |
516 | * If we're booting on native hardware, this will be a pagetable | |
551889a6 IC |
517 | * constructed in arch/x86/kernel/head_32.S. The root of the |
518 | * pagetable will be swapper_pg_dir. | |
b239fb25 JF |
519 | * |
520 | * If we're booting paravirtualized under a hypervisor, then there are | |
521 | * more options: we may already be running PAE, and the pagetable may | |
522 | * or may not be based in swapper_pg_dir. In any case, | |
523 | * paravirt_pagetable_setup_start() will set up swapper_pg_dir | |
524 | * appropriately for the rest of the initialization to work. | |
525 | * | |
526 | * In general, pagetable_init() assumes that the pagetable may already | |
527 | * be partially populated, and so it avoids stomping on any existing | |
528 | * mappings. | |
529 | */ | |
f765090a | 530 | void __init early_ioremap_page_table_range_init(void) |
b239fb25 | 531 | { |
e7179853 | 532 | pgd_t *pgd_base = swapper_pg_dir; |
8550eb99 | 533 | unsigned long vaddr, end; |
b239fb25 | 534 | |
1da177e4 LT |
535 | /* |
536 | * Fixed mappings, only the page table structure has to be | |
537 | * created - mappings will be set by set_fixmap(): | |
538 | */ | |
539 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
b239fb25 JF |
540 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
541 | page_table_range_init(vaddr, end, pgd_base); | |
beacfaac | 542 | early_ioremap_reset(); |
e7b37895 YL |
543 | } |
544 | ||
545 | static void __init pagetable_init(void) | |
546 | { | |
547 | pgd_t *pgd_base = swapper_pg_dir; | |
548 | ||
1da177e4 | 549 | permanent_kmaps_init(pgd_base); |
1da177e4 LT |
550 | } |
551 | ||
a6eb84bc | 552 | #ifdef CONFIG_ACPI_SLEEP |
1da177e4 | 553 | /* |
a6eb84bc | 554 | * ACPI suspend needs this for resume, because things like the intel-agp |
1da177e4 LT |
555 | * driver might have split up a kernel 4MB mapping. |
556 | */ | |
a6eb84bc | 557 | char swsusp_pg_dir[PAGE_SIZE] |
8550eb99 | 558 | __attribute__ ((aligned(PAGE_SIZE))); |
1da177e4 LT |
559 | |
560 | static inline void save_pg_dir(void) | |
561 | { | |
562 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | |
563 | } | |
a6eb84bc | 564 | #else /* !CONFIG_ACPI_SLEEP */ |
1da177e4 LT |
565 | static inline void save_pg_dir(void) |
566 | { | |
567 | } | |
a6eb84bc | 568 | #endif /* !CONFIG_ACPI_SLEEP */ |
1da177e4 | 569 | |
8550eb99 | 570 | void zap_low_mappings(void) |
1da177e4 LT |
571 | { |
572 | int i; | |
573 | ||
1da177e4 LT |
574 | /* |
575 | * Zap initial low-memory mappings. | |
576 | * | |
577 | * Note that "pgd_clear()" doesn't do it for | |
578 | * us, because pgd_clear() is a no-op on i386. | |
579 | */ | |
68db065c | 580 | for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) { |
1da177e4 LT |
581 | #ifdef CONFIG_X86_PAE |
582 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | |
583 | #else | |
584 | set_pgd(swapper_pg_dir+i, __pgd(0)); | |
585 | #endif | |
8550eb99 | 586 | } |
1da177e4 LT |
587 | flush_tlb_all(); |
588 | } | |
589 | ||
8550eb99 | 590 | int nx_enabled; |
d5321abe | 591 | |
be43d728 | 592 | pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); |
6fdc05d4 JF |
593 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
594 | ||
d5321abe JB |
595 | #ifdef CONFIG_X86_PAE |
596 | ||
8550eb99 | 597 | static int disable_nx __initdata; |
1da177e4 LT |
598 | |
599 | /* | |
600 | * noexec = on|off | |
601 | * | |
602 | * Control non executable mappings. | |
603 | * | |
604 | * on Enable | |
605 | * off Disable | |
606 | */ | |
1a3f239d | 607 | static int __init noexec_setup(char *str) |
1da177e4 | 608 | { |
1a3f239d RR |
609 | if (!str || !strcmp(str, "on")) { |
610 | if (cpu_has_nx) { | |
611 | __supported_pte_mask |= _PAGE_NX; | |
612 | disable_nx = 0; | |
613 | } | |
8550eb99 IM |
614 | } else { |
615 | if (!strcmp(str, "off")) { | |
616 | disable_nx = 1; | |
617 | __supported_pte_mask &= ~_PAGE_NX; | |
618 | } else { | |
619 | return -EINVAL; | |
620 | } | |
621 | } | |
1a3f239d RR |
622 | |
623 | return 0; | |
1da177e4 | 624 | } |
1a3f239d | 625 | early_param("noexec", noexec_setup); |
1da177e4 | 626 | |
62436fe9 | 627 | void __init set_nx(void) |
1da177e4 LT |
628 | { |
629 | unsigned int v[4], l, h; | |
630 | ||
631 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | |
632 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | |
8550eb99 | 633 | |
1da177e4 LT |
634 | if ((v[3] & (1 << 20)) && !disable_nx) { |
635 | rdmsr(MSR_EFER, l, h); | |
636 | l |= EFER_NX; | |
637 | wrmsr(MSR_EFER, l, h); | |
638 | nx_enabled = 1; | |
639 | __supported_pte_mask |= _PAGE_NX; | |
640 | } | |
641 | } | |
642 | } | |
1da177e4 LT |
643 | #endif |
644 | ||
90d967e0 YL |
645 | /* user-defined highmem size */ |
646 | static unsigned int highmem_pages = -1; | |
647 | ||
648 | /* | |
649 | * highmem=size forces highmem to be exactly 'size' bytes. | |
650 | * This works even on boxes that have no highmem otherwise. | |
651 | * This also works to reduce highmem size on bigger boxes. | |
652 | */ | |
653 | static int __init parse_highmem(char *arg) | |
654 | { | |
655 | if (!arg) | |
656 | return -EINVAL; | |
657 | ||
658 | highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; | |
659 | return 0; | |
660 | } | |
661 | early_param("highmem", parse_highmem); | |
662 | ||
4769843b IM |
663 | #define MSG_HIGHMEM_TOO_BIG \ |
664 | "highmem size (%luMB) is bigger than pages available (%luMB)!\n" | |
665 | ||
666 | #define MSG_LOWMEM_TOO_SMALL \ | |
667 | "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" | |
90d967e0 | 668 | /* |
4769843b IM |
669 | * All of RAM fits into lowmem - but if user wants highmem |
670 | * artificially via the highmem=x boot parameter then create | |
671 | * it: | |
90d967e0 | 672 | */ |
4769843b | 673 | void __init lowmem_pfn_init(void) |
90d967e0 | 674 | { |
346cafec | 675 | /* max_low_pfn is 0, we already have early_res support */ |
90d967e0 | 676 | max_low_pfn = max_pfn; |
d88316c2 | 677 | |
4769843b IM |
678 | if (highmem_pages == -1) |
679 | highmem_pages = 0; | |
680 | #ifdef CONFIG_HIGHMEM | |
681 | if (highmem_pages >= max_pfn) { | |
682 | printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, | |
683 | pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); | |
684 | highmem_pages = 0; | |
685 | } | |
686 | if (highmem_pages) { | |
687 | if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { | |
688 | printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, | |
90d967e0 YL |
689 | pages_to_mb(highmem_pages)); |
690 | highmem_pages = 0; | |
691 | } | |
4769843b IM |
692 | max_low_pfn -= highmem_pages; |
693 | } | |
694 | #else | |
695 | if (highmem_pages) | |
696 | printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); | |
697 | #endif | |
698 | } | |
699 | ||
700 | #define MSG_HIGHMEM_TOO_SMALL \ | |
701 | "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" | |
702 | ||
703 | #define MSG_HIGHMEM_TRIMMED \ | |
704 | "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" | |
705 | /* | |
706 | * We have more RAM than fits into lowmem - we try to put it into | |
707 | * highmem, also taking the highmem=x boot parameter into account: | |
708 | */ | |
709 | void __init highmem_pfn_init(void) | |
710 | { | |
d88316c2 IM |
711 | max_low_pfn = MAXMEM_PFN; |
712 | ||
4769843b IM |
713 | if (highmem_pages == -1) |
714 | highmem_pages = max_pfn - MAXMEM_PFN; | |
715 | ||
716 | if (highmem_pages + MAXMEM_PFN < max_pfn) | |
717 | max_pfn = MAXMEM_PFN + highmem_pages; | |
718 | ||
719 | if (highmem_pages + MAXMEM_PFN > max_pfn) { | |
720 | printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, | |
721 | pages_to_mb(max_pfn - MAXMEM_PFN), | |
722 | pages_to_mb(highmem_pages)); | |
723 | highmem_pages = 0; | |
724 | } | |
90d967e0 | 725 | #ifndef CONFIG_HIGHMEM |
4769843b IM |
726 | /* Maximum memory usable is what is directly addressable */ |
727 | printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); | |
728 | if (max_pfn > MAX_NONPAE_PFN) | |
729 | printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); | |
730 | else | |
731 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); | |
732 | max_pfn = MAXMEM_PFN; | |
90d967e0 YL |
733 | #else /* !CONFIG_HIGHMEM */ |
734 | #ifndef CONFIG_HIGHMEM64G | |
4769843b IM |
735 | if (max_pfn > MAX_NONPAE_PFN) { |
736 | max_pfn = MAX_NONPAE_PFN; | |
737 | printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); | |
738 | } | |
90d967e0 YL |
739 | #endif /* !CONFIG_HIGHMEM64G */ |
740 | #endif /* !CONFIG_HIGHMEM */ | |
4769843b IM |
741 | } |
742 | ||
743 | /* | |
744 | * Determine low and high memory ranges: | |
745 | */ | |
746 | void __init find_low_pfn_range(void) | |
747 | { | |
748 | /* it could update max_pfn */ | |
749 | ||
d88316c2 | 750 | if (max_pfn <= MAXMEM_PFN) |
4769843b | 751 | lowmem_pfn_init(); |
d88316c2 IM |
752 | else |
753 | highmem_pfn_init(); | |
90d967e0 YL |
754 | } |
755 | ||
b2ac82a0 | 756 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
2ec65f8b | 757 | void __init initmem_init(unsigned long start_pfn, |
b2ac82a0 YL |
758 | unsigned long end_pfn) |
759 | { | |
b2ac82a0 YL |
760 | #ifdef CONFIG_HIGHMEM |
761 | highstart_pfn = highend_pfn = max_pfn; | |
762 | if (max_pfn > max_low_pfn) | |
763 | highstart_pfn = max_low_pfn; | |
764 | memory_present(0, 0, highend_pfn); | |
cb95a13a | 765 | e820_register_active_regions(0, 0, highend_pfn); |
b2ac82a0 YL |
766 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
767 | pages_to_mb(highend_pfn - highstart_pfn)); | |
768 | num_physpages = highend_pfn; | |
769 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | |
770 | #else | |
771 | memory_present(0, 0, max_low_pfn); | |
cb95a13a | 772 | e820_register_active_regions(0, 0, max_low_pfn); |
b2ac82a0 YL |
773 | num_physpages = max_low_pfn; |
774 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | |
775 | #endif | |
776 | #ifdef CONFIG_FLATMEM | |
777 | max_mapnr = num_physpages; | |
778 | #endif | |
dc16ecf7 JF |
779 | __vmalloc_start_set = true; |
780 | ||
b2ac82a0 YL |
781 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
782 | pages_to_mb(max_low_pfn)); | |
783 | ||
784 | setup_bootmem_allocator(); | |
b2ac82a0 | 785 | } |
cb95a13a | 786 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
b2ac82a0 | 787 | |
cb95a13a | 788 | static void __init zone_sizes_init(void) |
b2ac82a0 YL |
789 | { |
790 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | |
791 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
792 | max_zone_pfns[ZONE_DMA] = | |
793 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | |
794 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | |
b2ac82a0 YL |
795 | #ifdef CONFIG_HIGHMEM |
796 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | |
b2ac82a0 YL |
797 | #endif |
798 | ||
799 | free_area_init_nodes(max_zone_pfns); | |
800 | } | |
b2ac82a0 | 801 | |
a71edd1f YL |
802 | static unsigned long __init setup_node_bootmem(int nodeid, |
803 | unsigned long start_pfn, | |
804 | unsigned long end_pfn, | |
805 | unsigned long bootmap) | |
806 | { | |
807 | unsigned long bootmap_size; | |
808 | ||
a71edd1f YL |
809 | /* don't touch min_low_pfn */ |
810 | bootmap_size = init_bootmem_node(NODE_DATA(nodeid), | |
811 | bootmap >> PAGE_SHIFT, | |
812 | start_pfn, end_pfn); | |
813 | printk(KERN_INFO " node %d low ram: %08lx - %08lx\n", | |
814 | nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | |
815 | printk(KERN_INFO " node %d bootmap %08lx - %08lx\n", | |
816 | nodeid, bootmap, bootmap + bootmap_size); | |
817 | free_bootmem_with_active_regions(nodeid, end_pfn); | |
818 | early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | |
819 | ||
820 | return bootmap + bootmap_size; | |
821 | } | |
a71edd1f | 822 | |
b2ac82a0 YL |
823 | void __init setup_bootmem_allocator(void) |
824 | { | |
a71edd1f | 825 | int nodeid; |
b2ac82a0 YL |
826 | unsigned long bootmap_size, bootmap; |
827 | /* | |
828 | * Initialize the boot-time allocator (with low memory only): | |
829 | */ | |
830 | bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT; | |
fc5efe39 | 831 | bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size, |
b2ac82a0 YL |
832 | PAGE_SIZE); |
833 | if (bootmap == -1L) | |
834 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | |
835 | reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); | |
225c37d7 | 836 | |
b2ac82a0 YL |
837 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", |
838 | max_pfn_mapped<<PAGE_SHIFT); | |
fc5efe39 | 839 | printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); |
7482b0e9 | 840 | |
e954ef20 YL |
841 | for_each_online_node(nodeid) { |
842 | unsigned long start_pfn, end_pfn; | |
843 | ||
a71edd1f | 844 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
e954ef20 YL |
845 | start_pfn = node_start_pfn[nodeid]; |
846 | end_pfn = node_end_pfn[nodeid]; | |
847 | if (start_pfn > max_low_pfn) | |
848 | continue; | |
849 | if (end_pfn > max_low_pfn) | |
850 | end_pfn = max_low_pfn; | |
0b8fdcbc | 851 | #else |
e954ef20 YL |
852 | start_pfn = 0; |
853 | end_pfn = max_low_pfn; | |
4e29684c | 854 | #endif |
e954ef20 YL |
855 | bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn, |
856 | bootmap); | |
857 | } | |
4e29684c | 858 | |
c464573c | 859 | after_bootmem = 1; |
4e29684c YL |
860 | } |
861 | ||
1da177e4 LT |
862 | /* |
863 | * paging_init() sets up the page tables - note that the first 8MB are | |
864 | * already mapped by head.S. | |
865 | * | |
866 | * This routines also unmaps the page at virtual kernel address 0, so | |
867 | * that we can trap those pesky NULL-reference errors in the kernel. | |
868 | */ | |
869 | void __init paging_init(void) | |
870 | { | |
1da177e4 LT |
871 | pagetable_init(); |
872 | ||
1da177e4 LT |
873 | __flush_tlb_all(); |
874 | ||
875 | kmap_init(); | |
11cd0bc1 YL |
876 | |
877 | /* | |
878 | * NOTE: at this point the bootmem allocator is fully available. | |
879 | */ | |
11cd0bc1 YL |
880 | sparse_init(); |
881 | zone_sizes_init(); | |
1da177e4 LT |
882 | } |
883 | ||
884 | /* | |
885 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | |
f7f17a67 DV |
886 | * and also on some strange 486's. All 586+'s are OK. This used to involve |
887 | * black magic jumps to work around some nasty CPU bugs, but fortunately the | |
888 | * switch to using exceptions got rid of all that. | |
1da177e4 | 889 | */ |
1da177e4 LT |
890 | static void __init test_wp_bit(void) |
891 | { | |
d7d119d7 IM |
892 | printk(KERN_INFO |
893 | "Checking if this processor honours the WP bit even in supervisor mode..."); | |
1da177e4 LT |
894 | |
895 | /* Any page-aligned address will do, the test is non-destructive */ | |
896 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | |
897 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | |
898 | clear_fixmap(FIX_WP_TEST); | |
899 | ||
900 | if (!boot_cpu_data.wp_works_ok) { | |
d7d119d7 | 901 | printk(KERN_CONT "No.\n"); |
1da177e4 | 902 | #ifdef CONFIG_X86_WP_WORKS_OK |
d7d119d7 IM |
903 | panic( |
904 | "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | |
1da177e4 LT |
905 | #endif |
906 | } else { | |
d7d119d7 | 907 | printk(KERN_CONT "Ok.\n"); |
1da177e4 LT |
908 | } |
909 | } | |
910 | ||
8550eb99 | 911 | static struct kcore_list kcore_mem, kcore_vmalloc; |
1da177e4 LT |
912 | |
913 | void __init mem_init(void) | |
914 | { | |
1da177e4 | 915 | int codesize, reservedpages, datasize, initsize; |
cc9f7a0c | 916 | int tmp; |
1da177e4 | 917 | |
cfb80c9e JF |
918 | pci_iommu_alloc(); |
919 | ||
05b79bdc | 920 | #ifdef CONFIG_FLATMEM |
8d8f3cbe | 921 | BUG_ON(!mem_map); |
1da177e4 | 922 | #endif |
1da177e4 LT |
923 | /* this will put all low memory onto the freelists */ |
924 | totalram_pages += free_all_bootmem(); | |
925 | ||
926 | reservedpages = 0; | |
927 | for (tmp = 0; tmp < max_low_pfn; tmp++) | |
928 | /* | |
8550eb99 | 929 | * Only count reserved RAM pages: |
1da177e4 LT |
930 | */ |
931 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | |
932 | reservedpages++; | |
933 | ||
cc9f7a0c | 934 | set_highmem_pages_init(); |
1da177e4 LT |
935 | |
936 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
937 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
938 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
939 | ||
8550eb99 IM |
940 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
941 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
942 | VMALLOC_END-VMALLOC_START); |
943 | ||
8550eb99 IM |
944 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
945 | "%dk reserved, %dk data, %dk init, %ldk highmem)\n", | |
1da177e4 LT |
946 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
947 | num_physpages << (PAGE_SHIFT-10), | |
948 | codesize >> 10, | |
949 | reservedpages << (PAGE_SHIFT-10), | |
950 | datasize >> 10, | |
951 | initsize >> 10, | |
952 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | |
953 | ); | |
954 | ||
d7d119d7 | 955 | printk(KERN_INFO "virtual kernel memory layout:\n" |
8550eb99 | 956 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 957 | #ifdef CONFIG_HIGHMEM |
8550eb99 | 958 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 959 | #endif |
8550eb99 IM |
960 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
961 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
962 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
963 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
964 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
965 | FIXADDR_START, FIXADDR_TOP, | |
966 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
052e7994 JF |
967 | |
968 | #ifdef CONFIG_HIGHMEM | |
8550eb99 IM |
969 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
970 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
052e7994 JF |
971 | #endif |
972 | ||
8550eb99 IM |
973 | VMALLOC_START, VMALLOC_END, |
974 | (VMALLOC_END - VMALLOC_START) >> 20, | |
052e7994 | 975 | |
8550eb99 IM |
976 | (unsigned long)__va(0), (unsigned long)high_memory, |
977 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
052e7994 | 978 | |
8550eb99 IM |
979 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
980 | ((unsigned long)&__init_end - | |
981 | (unsigned long)&__init_begin) >> 10, | |
052e7994 | 982 | |
8550eb99 IM |
983 | (unsigned long)&_etext, (unsigned long)&_edata, |
984 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
052e7994 | 985 | |
8550eb99 IM |
986 | (unsigned long)&_text, (unsigned long)&_etext, |
987 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
052e7994 | 988 | |
beeb4195 JB |
989 | /* |
990 | * Check boundaries twice: Some fundamental inconsistencies can | |
991 | * be detected at build time already. | |
992 | */ | |
993 | #define __FIXADDR_TOP (-PAGE_SIZE) | |
994 | #ifdef CONFIG_HIGHMEM | |
995 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | |
996 | BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); | |
997 | #endif | |
998 | #define high_memory (-128UL << 20) | |
999 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | |
1000 | #undef high_memory | |
1001 | #undef __FIXADDR_TOP | |
1002 | ||
052e7994 | 1003 | #ifdef CONFIG_HIGHMEM |
8550eb99 IM |
1004 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
1005 | BUG_ON(VMALLOC_END > PKMAP_BASE); | |
052e7994 | 1006 | #endif |
beeb4195 | 1007 | BUG_ON(VMALLOC_START >= VMALLOC_END); |
8550eb99 | 1008 | BUG_ON((unsigned long)high_memory > VMALLOC_START); |
052e7994 | 1009 | |
1da177e4 LT |
1010 | if (boot_cpu_data.wp_works_ok < 0) |
1011 | test_wp_bit(); | |
1012 | ||
61165d7a | 1013 | save_pg_dir(); |
1da177e4 | 1014 | zap_low_mappings(); |
1da177e4 LT |
1015 | } |
1016 | ||
ad8f5797 | 1017 | #ifdef CONFIG_MEMORY_HOTPLUG |
bc02af93 | 1018 | int arch_add_memory(int nid, u64 start, u64 size) |
05039b92 | 1019 | { |
7c7e9425 | 1020 | struct pglist_data *pgdata = NODE_DATA(nid); |
776ed98b | 1021 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
05039b92 DH |
1022 | unsigned long start_pfn = start >> PAGE_SHIFT; |
1023 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
1024 | ||
c04fc586 | 1025 | return __add_pages(nid, zone, start_pfn, nr_pages); |
05039b92 | 1026 | } |
9d99aaa3 | 1027 | #endif |
05039b92 | 1028 | |
1da177e4 LT |
1029 | /* |
1030 | * This function cannot be __init, since exceptions don't work in that | |
1031 | * section. Put this after the callers, so that it cannot be inlined. | |
1032 | */ | |
8550eb99 | 1033 | static noinline int do_test_wp_bit(void) |
1da177e4 LT |
1034 | { |
1035 | char tmp_reg; | |
1036 | int flag; | |
1037 | ||
1038 | __asm__ __volatile__( | |
8550eb99 IM |
1039 | " movb %0, %1 \n" |
1040 | "1: movb %1, %0 \n" | |
1041 | " xorl %2, %2 \n" | |
1da177e4 | 1042 | "2: \n" |
f832ff18 | 1043 | _ASM_EXTABLE(1b,2b) |
1da177e4 LT |
1044 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), |
1045 | "=q" (tmp_reg), | |
1046 | "=r" (flag) | |
1047 | :"2" (1) | |
1048 | :"memory"); | |
8550eb99 | 1049 | |
1da177e4 LT |
1050 | return flag; |
1051 | } | |
1052 | ||
63aaf308 | 1053 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
1054 | const int rodata_test_data = 0xC3; |
1055 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
63aaf308 | 1056 | |
63aaf308 AV |
1057 | void mark_rodata_ro(void) |
1058 | { | |
6fb14755 JB |
1059 | unsigned long start = PFN_ALIGN(_text); |
1060 | unsigned long size = PFN_ALIGN(_etext) - start; | |
63aaf308 | 1061 | |
8f0f996e SR |
1062 | #ifndef CONFIG_DYNAMIC_FTRACE |
1063 | /* Dynamic tracing modifies the kernel text section */ | |
4e4eee0e MD |
1064 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
1065 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", | |
1066 | size >> 10); | |
0c42f392 AK |
1067 | |
1068 | #ifdef CONFIG_CPA_DEBUG | |
4e4eee0e MD |
1069 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", |
1070 | start, start+size); | |
1071 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); | |
0c42f392 | 1072 | |
4e4eee0e MD |
1073 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
1074 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | |
602033ed | 1075 | #endif |
8f0f996e SR |
1076 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1077 | ||
6fb14755 JB |
1078 | start += size; |
1079 | size = (unsigned long)__end_rodata - start; | |
6d238cc4 | 1080 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
d7d119d7 IM |
1081 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
1082 | size >> 10); | |
edeed305 | 1083 | rodata_test(); |
63aaf308 | 1084 | |
0c42f392 | 1085 | #ifdef CONFIG_CPA_DEBUG |
d7d119d7 | 1086 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); |
6d238cc4 | 1087 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 1088 | |
d7d119d7 | 1089 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
6d238cc4 | 1090 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 1091 | #endif |
63aaf308 AV |
1092 | } |
1093 | #endif | |
1094 | ||
d2dbf343 YL |
1095 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, |
1096 | int flags) | |
1097 | { | |
1098 | return reserve_bootmem(phys, len, flags); | |
1099 | } |