2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h> /* for max_low_pfn */
8 #include <asm/cacheflush.h>
12 #include <asm/page_types.h>
13 #include <asm/sections.h>
14 #include <asm/setup.h>
15 #include <asm/tlbflush.h>
17 #include <asm/proto.h>
18 #include <asm/dma.h> /* for MAX_DMA_PFN */
20 unsigned long __initdata pgt_buf_start
;
21 unsigned long __meminitdata pgt_buf_end
;
22 unsigned long __meminitdata pgt_buf_top
;
27 #ifdef CONFIG_DIRECT_GBPAGES
35 unsigned page_size_mask
;
38 static int page_size_mask
;
40 static void __init
probe_page_size_mask(void)
42 #if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
44 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
45 * This will simplify cpa(), which otherwise needs to support splitting
46 * large pages into small in interrupt context, etc.
49 page_size_mask
|= 1 << PG_LEVEL_1G
;
51 page_size_mask
|= 1 << PG_LEVEL_2M
;
54 /* Enable PSE if available */
56 set_in_cr4(X86_CR4_PSE
);
58 /* Enable PGE if available */
60 set_in_cr4(X86_CR4_PGE
);
61 __supported_pte_mask
|= _PAGE_GLOBAL
;
64 void __init
native_pagetable_reserve(u64 start
, u64 end
)
66 memblock_reserve(start
, end
- start
);
71 #else /* CONFIG_X86_64 */
75 static int __meminit
save_mr(struct map_range
*mr
, int nr_range
,
76 unsigned long start_pfn
, unsigned long end_pfn
,
77 unsigned long page_size_mask
)
79 if (start_pfn
< end_pfn
) {
80 if (nr_range
>= NR_RANGE_MR
)
81 panic("run out of range for init_memory_mapping\n");
82 mr
[nr_range
].start
= start_pfn
<<PAGE_SHIFT
;
83 mr
[nr_range
].end
= end_pfn
<<PAGE_SHIFT
;
84 mr
[nr_range
].page_size_mask
= page_size_mask
;
91 static int __meminit
split_mem_range(struct map_range
*mr
, int nr_range
,
95 unsigned long start_pfn
, end_pfn
;
99 /* head if not big page alignment ? */
100 start_pfn
= start
>> PAGE_SHIFT
;
101 pos
= start_pfn
<< PAGE_SHIFT
;
104 * Don't use a large page for the first 2/4MB of memory
105 * because there are often fixed size MTRRs in there
106 * and overlapping MTRRs into large pages can cause
110 end_pfn
= 1<<(PMD_SHIFT
- PAGE_SHIFT
);
112 end_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
113 << (PMD_SHIFT
- PAGE_SHIFT
);
114 #else /* CONFIG_X86_64 */
115 end_pfn
= ((pos
+ (PMD_SIZE
- 1)) >> PMD_SHIFT
)
116 << (PMD_SHIFT
- PAGE_SHIFT
);
118 if (end_pfn
> (end
>> PAGE_SHIFT
))
119 end_pfn
= end
>> PAGE_SHIFT
;
120 if (start_pfn
< end_pfn
) {
121 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
122 pos
= end_pfn
<< PAGE_SHIFT
;
125 /* big page (2M) range */
126 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
127 << (PMD_SHIFT
- PAGE_SHIFT
);
129 end_pfn
= (end
>>PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
130 #else /* CONFIG_X86_64 */
131 end_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
132 << (PUD_SHIFT
- PAGE_SHIFT
);
133 if (end_pfn
> ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
)))
134 end_pfn
= ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
));
137 if (start_pfn
< end_pfn
) {
138 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
139 page_size_mask
& (1<<PG_LEVEL_2M
));
140 pos
= end_pfn
<< PAGE_SHIFT
;
144 /* big page (1G) range */
145 start_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
146 << (PUD_SHIFT
- PAGE_SHIFT
);
147 end_pfn
= (end
>> PUD_SHIFT
) << (PUD_SHIFT
- PAGE_SHIFT
);
148 if (start_pfn
< end_pfn
) {
149 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
151 ((1<<PG_LEVEL_2M
)|(1<<PG_LEVEL_1G
)));
152 pos
= end_pfn
<< PAGE_SHIFT
;
155 /* tail is not big page (1G) alignment */
156 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
157 << (PMD_SHIFT
- PAGE_SHIFT
);
158 end_pfn
= (end
>> PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
159 if (start_pfn
< end_pfn
) {
160 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
161 page_size_mask
& (1<<PG_LEVEL_2M
));
162 pos
= end_pfn
<< PAGE_SHIFT
;
166 /* tail is not big page (2M) alignment */
167 start_pfn
= pos
>>PAGE_SHIFT
;
168 end_pfn
= end
>>PAGE_SHIFT
;
169 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
171 /* try to merge same page size and continuous */
172 for (i
= 0; nr_range
> 1 && i
< nr_range
- 1; i
++) {
173 unsigned long old_start
;
174 if (mr
[i
].end
!= mr
[i
+1].start
||
175 mr
[i
].page_size_mask
!= mr
[i
+1].page_size_mask
)
178 old_start
= mr
[i
].start
;
179 memmove(&mr
[i
], &mr
[i
+1],
180 (nr_range
- 1 - i
) * sizeof(struct map_range
));
181 mr
[i
--].start
= old_start
;
185 for (i
= 0; i
< nr_range
; i
++)
186 printk(KERN_DEBUG
" [mem %#010lx-%#010lx] page %s\n",
187 mr
[i
].start
, mr
[i
].end
- 1,
188 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))?"1G":(
189 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))?"2M":"4k"));
195 * First calculate space needed for kernel direct mapping page tables to cover
196 * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
197 * pages. Then find enough contiguous space for those page tables.
199 static void __init
find_early_table_space(struct map_range
*mr
, int nr_range
)
202 unsigned long puds
= 0, pmds
= 0, ptes
= 0, tables
;
203 unsigned long start
= 0, good_end
;
206 for (i
= 0; i
< nr_range
; i
++) {
207 unsigned long range
, extra
;
209 range
= mr
[i
].end
- mr
[i
].start
;
210 puds
+= (range
+ PUD_SIZE
- 1) >> PUD_SHIFT
;
212 if (mr
[i
].page_size_mask
& (1 << PG_LEVEL_1G
)) {
213 extra
= range
- ((range
>> PUD_SHIFT
) << PUD_SHIFT
);
214 pmds
+= (extra
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
216 pmds
+= (range
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
219 if (mr
[i
].page_size_mask
& (1 << PG_LEVEL_2M
)) {
220 extra
= range
- ((range
>> PMD_SHIFT
) << PMD_SHIFT
);
224 ptes
+= (extra
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
226 ptes
+= (range
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
230 tables
= roundup(puds
* sizeof(pud_t
), PAGE_SIZE
);
231 tables
+= roundup(pmds
* sizeof(pmd_t
), PAGE_SIZE
);
232 tables
+= roundup(ptes
* sizeof(pte_t
), PAGE_SIZE
);
236 tables
+= roundup(__end_of_fixed_addresses
* sizeof(pte_t
), PAGE_SIZE
);
238 good_end
= max_pfn_mapped
<< PAGE_SHIFT
;
240 base
= memblock_find_in_range(start
, good_end
, tables
, PAGE_SIZE
);
242 panic("Cannot find space for the kernel page tables");
244 pgt_buf_start
= base
>> PAGE_SHIFT
;
245 pgt_buf_end
= pgt_buf_start
;
246 pgt_buf_top
= pgt_buf_start
+ (tables
>> PAGE_SHIFT
);
248 printk(KERN_DEBUG
"kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
249 mr
[nr_range
- 1].end
- 1, pgt_buf_start
<< PAGE_SHIFT
,
250 (pgt_buf_top
<< PAGE_SHIFT
) - 1);
254 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
255 * This runs before bootmem is initialized and gets pages directly from
256 * the physical memory. To access them they are temporarily mapped.
258 unsigned long __init_refok
init_memory_mapping(unsigned long start
,
261 struct map_range mr
[NR_RANGE_MR
];
262 unsigned long ret
= 0;
265 pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n",
268 memset(mr
, 0, sizeof(mr
));
269 nr_range
= split_mem_range(mr
, 0, start
, end
);
272 * Find space for the kernel direct mapping tables.
274 * Later we should allocate these tables in the local node of the
275 * memory mapped. Unfortunately this is done currently before the
276 * nodes are discovered.
279 find_early_table_space(mr
, nr_range
);
281 for (i
= 0; i
< nr_range
; i
++)
282 ret
= kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
283 mr
[i
].page_size_mask
);
286 early_ioremap_page_table_range_init();
288 load_cr3(swapper_pg_dir
);
294 * Reserve the kernel pagetable pages we used (pgt_buf_start -
295 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
296 * so that they can be reused for other purposes.
298 * On native it just means calling memblock_reserve, on Xen it also
299 * means marking RW the pagetable pages that we allocated before
300 * but that haven't been used.
302 * In fact on xen we mark RO the whole range pgt_buf_start -
303 * pgt_buf_top, because we have to make sure that when
304 * init_memory_mapping reaches the pagetable pages area, it maps
305 * RO all the pagetable pages, including the ones that are beyond
306 * pgt_buf_end at that time.
308 if (!after_bootmem
&& pgt_buf_end
> pgt_buf_start
)
309 x86_init
.mapping
.pagetable_reserve(PFN_PHYS(pgt_buf_start
),
310 PFN_PHYS(pgt_buf_end
));
313 early_memtest(start
, end
);
315 return ret
>> PAGE_SHIFT
;
318 void __init
init_mem_mapping(void)
320 probe_page_size_mask();
322 /* max_pfn_mapped is updated here */
323 max_low_pfn_mapped
= init_memory_mapping(0, max_low_pfn
<<PAGE_SHIFT
);
324 max_pfn_mapped
= max_low_pfn_mapped
;
327 if (max_pfn
> max_low_pfn
) {
328 max_pfn_mapped
= init_memory_mapping(1UL<<32,
329 max_pfn
<<PAGE_SHIFT
);
330 /* can we preseve max_low_pfn ?*/
331 max_low_pfn
= max_pfn
;
337 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
338 * is valid. The argument is a physical page number.
341 * On x86, access has to be given to the first megabyte of ram because that area
342 * contains bios code and data regions used by X and dosemu and similar apps.
343 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
344 * mmio resources as well as potential bios/acpi data regions.
346 int devmem_is_allowed(unsigned long pagenr
)
350 if (iomem_is_exclusive(pagenr
<< PAGE_SHIFT
))
352 if (!page_is_ram(pagenr
))
357 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
360 unsigned long begin_aligned
, end_aligned
;
362 /* Make sure boundaries are page aligned */
363 begin_aligned
= PAGE_ALIGN(begin
);
364 end_aligned
= end
& PAGE_MASK
;
366 if (WARN_ON(begin_aligned
!= begin
|| end_aligned
!= end
)) {
367 begin
= begin_aligned
;
377 * If debugging page accesses then do not free this memory but
378 * mark them not present - any buggy init-section access will
379 * create a kernel page fault:
381 #ifdef CONFIG_DEBUG_PAGEALLOC
382 printk(KERN_INFO
"debug: unmapping init [mem %#010lx-%#010lx]\n",
384 set_memory_np(begin
, (end
- begin
) >> PAGE_SHIFT
);
387 * We just marked the kernel text read only above, now that
388 * we are going to free part of that, we need to make that
389 * writeable and non-executable first.
391 set_memory_nx(begin
, (end
- begin
) >> PAGE_SHIFT
);
392 set_memory_rw(begin
, (end
- begin
) >> PAGE_SHIFT
);
394 printk(KERN_INFO
"Freeing %s: %luk freed\n", what
, (end
- begin
) >> 10);
396 for (; addr
< end
; addr
+= PAGE_SIZE
) {
397 ClearPageReserved(virt_to_page(addr
));
398 init_page_count(virt_to_page(addr
));
399 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
406 void free_initmem(void)
408 free_init_pages("unused kernel memory",
409 (unsigned long)(&__init_begin
),
410 (unsigned long)(&__init_end
));
413 #ifdef CONFIG_BLK_DEV_INITRD
414 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
417 * end could be not aligned, and We can not align that,
418 * decompresser could be confused by aligned initrd_end
419 * We already reserve the end partial page before in
420 * - i386_start_kernel()
421 * - x86_64_start_kernel()
422 * - relocate_initrd()
423 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
425 free_init_pages("initrd memory", start
, PAGE_ALIGN(end
));
429 void __init
zone_sizes_init(void)
431 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
433 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
435 #ifdef CONFIG_ZONE_DMA
436 max_zone_pfns
[ZONE_DMA
] = MAX_DMA_PFN
;
438 #ifdef CONFIG_ZONE_DMA32
439 max_zone_pfns
[ZONE_DMA32
] = MAX_DMA32_PFN
;
441 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
442 #ifdef CONFIG_HIGHMEM
443 max_zone_pfns
[ZONE_HIGHMEM
] = max_pfn
;
446 free_area_init_nodes(max_zone_pfns
);