3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
35 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
47 #include <asm/sections.h>
52 #ifndef CPU_FTR_COHERENT_ICACHE
53 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
54 #define CPU_FTR_NOEXECUTE 0
57 int init_bootmem_done
;
59 unsigned long memory_limit
;
61 int page_is_ram(unsigned long pfn
)
63 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
65 #ifndef CONFIG_PPC64 /* XXX for now */
66 return paddr
< __pa(high_memory
);
69 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
72 base
= lmb
.memory
.region
[i
].base
;
74 if ((paddr
>= base
) &&
75 (paddr
< (base
+ lmb
.memory
.region
[i
].size
))) {
84 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
85 unsigned long size
, pgprot_t vma_prot
)
87 if (ppc_md
.phys_mem_access_prot
)
88 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
90 if (!page_is_ram(pfn
))
91 vma_prot
= __pgprot(pgprot_val(vma_prot
)
92 | _PAGE_GUARDED
| _PAGE_NO_CACHE
);
95 EXPORT_SYMBOL(phys_mem_access_prot
);
97 #ifdef CONFIG_MEMORY_HOTPLUG
99 void online_page(struct page
*page
)
101 ClearPageReserved(page
);
102 init_page_count(page
);
109 int memory_add_physaddr_to_nid(u64 start
)
111 return hot_add_scn_to_nid(start
);
115 int __devinit
arch_add_memory(int nid
, u64 start
, u64 size
)
117 struct pglist_data
*pgdata
;
119 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
120 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
122 pgdata
= NODE_DATA(nid
);
124 start
= (unsigned long)__va(start
);
125 create_section_mapping(start
, start
+ size
);
127 /* this should work for most non-highmem platforms */
128 zone
= pgdata
->node_zones
;
130 return __add_pages(zone
, start_pfn
, nr_pages
);
134 * First pass at this code will check to determine if the remove
135 * request is within the RMO. Do not allow removal within the RMO.
137 int __devinit
remove_memory(u64 start
, u64 size
)
140 unsigned long start_pfn
, end_pfn
, nr_pages
;
142 start_pfn
= start
>> PAGE_SHIFT
;
143 nr_pages
= size
>> PAGE_SHIFT
;
144 end_pfn
= start_pfn
+ nr_pages
;
146 printk("%s(): Attempting to remove memoy in range "
147 "%lx to %lx\n", __func__
, start
, start
+size
);
149 * check for range within RMO
151 zone
= page_zone(pfn_to_page(start_pfn
));
153 printk("%s(): memory will be removed from "
154 "the %s zone\n", __func__
, zone
->name
);
157 * not handling removing memory ranges that
158 * overlap multiple zones yet
160 if (end_pfn
> (zone
->zone_start_pfn
+ zone
->spanned_pages
))
163 /* make sure it is NOT in RMO */
164 if ((start
< lmb
.rmo_size
) || ((start
+size
) < lmb
.rmo_size
)) {
165 printk("%s(): range to be removed must NOT be in RMO!\n",
170 return __remove_pages(zone
, start_pfn
, nr_pages
);
173 printk("%s(): memory range to be removed overlaps "
174 "multiple zones!!!\n", __func__
);
178 #endif /* CONFIG_MEMORY_HOTPLUG */
182 unsigned long total
= 0, reserved
= 0;
183 unsigned long shared
= 0, cached
= 0;
184 unsigned long highmem
= 0;
189 printk("Mem-info:\n");
191 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
192 for_each_online_pgdat(pgdat
) {
194 pgdat_resize_lock(pgdat
, &flags
);
195 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
196 if (!pfn_valid(pgdat
->node_start_pfn
+ i
))
198 page
= pgdat_page_nr(pgdat
, i
);
200 if (PageHighMem(page
))
202 if (PageReserved(page
))
204 else if (PageSwapCache(page
))
206 else if (page_count(page
))
207 shared
+= page_count(page
) - 1;
209 pgdat_resize_unlock(pgdat
, &flags
);
211 printk("%ld pages of RAM\n", total
);
212 #ifdef CONFIG_HIGHMEM
213 printk("%ld pages of HIGHMEM\n", highmem
);
215 printk("%ld reserved pages\n", reserved
);
216 printk("%ld pages shared\n", shared
);
217 printk("%ld pages swap cached\n", cached
);
221 * Initialize the bootmem system and give it all the memory we
222 * have available. If we are using highmem, we only put the
223 * lowmem into the bootmem system.
225 #ifndef CONFIG_NEED_MULTIPLE_NODES
226 void __init
do_init_bootmem(void)
229 unsigned long start
, bootmap_pages
;
230 unsigned long total_pages
;
233 max_pfn
= total_pages
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
234 #ifdef CONFIG_HIGHMEM
235 total_pages
= total_lowmem
>> PAGE_SHIFT
;
239 * Find an area to use for the bootmem bitmap. Calculate the size of
240 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
241 * Add 1 additional page in case the address isn't page-aligned.
243 bootmap_pages
= bootmem_bootmap_pages(total_pages
);
245 start
= lmb_alloc(bootmap_pages
<< PAGE_SHIFT
, PAGE_SIZE
);
247 boot_mapsize
= init_bootmem(start
>> PAGE_SHIFT
, total_pages
);
249 /* Add active regions with valid PFNs */
250 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
251 unsigned long start_pfn
, end_pfn
;
252 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
253 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
254 add_active_range(0, start_pfn
, end_pfn
);
257 /* Add all physical memory to the bootmem map, mark each area
260 #ifdef CONFIG_HIGHMEM
261 free_bootmem_with_active_regions(0, total_lowmem
>> PAGE_SHIFT
);
263 free_bootmem_with_active_regions(0, max_pfn
);
266 /* reserve the sections we're already using */
267 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++)
268 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
269 lmb_size_bytes(&lmb
.reserved
, i
));
271 /* XXX need to clip this if using highmem? */
272 sparse_memory_present_with_active_regions(0);
274 init_bootmem_done
= 1;
277 /* mark pages that don't exist as nosave */
278 static int __init
mark_nonram_nosave(void)
280 unsigned long lmb_next_region_start_pfn
,
284 for (i
= 0; i
< lmb
.memory
.cnt
- 1; i
++) {
286 (lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
) +
287 (lmb
.memory
.region
[i
].size
>> PAGE_SHIFT
);
288 lmb_next_region_start_pfn
=
289 lmb
.memory
.region
[i
+1].base
>> PAGE_SHIFT
;
291 if (lmb_region_max_pfn
< lmb_next_region_start_pfn
)
292 register_nosave_region(lmb_region_max_pfn
,
293 lmb_next_region_start_pfn
);
300 * paging_init() sets up the page tables - in fact we've already done this.
302 void __init
paging_init(void)
304 unsigned long total_ram
= lmb_phys_mem_size();
305 unsigned long top_of_ram
= lmb_end_of_DRAM();
306 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
308 #ifdef CONFIG_HIGHMEM
309 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
310 pkmap_page_table
= pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
311 (PKMAP_BASE
), PKMAP_BASE
), PKMAP_BASE
), PKMAP_BASE
);
312 map_page(KMAP_FIX_BEGIN
, 0, 0); /* XXX gross */
313 kmap_pte
= pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
314 (KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
),
316 kmap_prot
= PAGE_KERNEL
;
317 #endif /* CONFIG_HIGHMEM */
319 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
320 top_of_ram
, total_ram
);
321 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
322 (top_of_ram
- total_ram
) >> 20);
323 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
324 #ifdef CONFIG_HIGHMEM
325 max_zone_pfns
[ZONE_DMA
] = total_lowmem
>> PAGE_SHIFT
;
326 max_zone_pfns
[ZONE_HIGHMEM
] = top_of_ram
>> PAGE_SHIFT
;
328 max_zone_pfns
[ZONE_DMA
] = top_of_ram
>> PAGE_SHIFT
;
330 free_area_init_nodes(max_zone_pfns
);
332 mark_nonram_nosave();
334 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
336 void __init
mem_init(void)
338 #ifdef CONFIG_NEED_MULTIPLE_NODES
344 unsigned long reservedpages
= 0, codesize
, initsize
, datasize
, bsssize
;
346 num_physpages
= lmb
.memory
.size
>> PAGE_SHIFT
;
347 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
349 #ifdef CONFIG_NEED_MULTIPLE_NODES
350 for_each_online_node(nid
) {
351 if (NODE_DATA(nid
)->node_spanned_pages
!= 0) {
352 printk("freeing bootmem node %d\n", nid
);
354 free_all_bootmem_node(NODE_DATA(nid
));
359 totalram_pages
+= free_all_bootmem();
361 for_each_online_pgdat(pgdat
) {
362 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
363 if (!pfn_valid(pgdat
->node_start_pfn
+ i
))
365 page
= pgdat_page_nr(pgdat
, i
);
366 if (PageReserved(page
))
371 codesize
= (unsigned long)&_sdata
- (unsigned long)&_stext
;
372 datasize
= (unsigned long)&_edata
- (unsigned long)&_sdata
;
373 initsize
= (unsigned long)&__init_end
- (unsigned long)&__init_begin
;
374 bsssize
= (unsigned long)&__bss_stop
- (unsigned long)&__bss_start
;
376 #ifdef CONFIG_HIGHMEM
378 unsigned long pfn
, highmem_mapnr
;
380 highmem_mapnr
= total_lowmem
>> PAGE_SHIFT
;
381 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
382 struct page
*page
= pfn_to_page(pfn
);
384 ClearPageReserved(page
);
385 init_page_count(page
);
389 totalram_pages
+= totalhigh_pages
;
390 printk(KERN_DEBUG
"High memory: %luk\n",
391 totalhigh_pages
<< (PAGE_SHIFT
-10));
393 #endif /* CONFIG_HIGHMEM */
395 printk(KERN_INFO
"Memory: %luk/%luk available (%luk kernel code, "
396 "%luk reserved, %luk data, %luk bss, %luk init)\n",
397 (unsigned long)nr_free_pages() << (PAGE_SHIFT
-10),
398 num_physpages
<< (PAGE_SHIFT
-10),
400 reservedpages
<< (PAGE_SHIFT
-10),
409 * This is called when a page has been modified by the kernel.
410 * It just marks the page as not i-cache clean. We do the i-cache
411 * flush later when the page is given to a user process, if necessary.
413 void flush_dcache_page(struct page
*page
)
415 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
417 /* avoid an atomic op if possible */
418 if (test_bit(PG_arch_1
, &page
->flags
))
419 clear_bit(PG_arch_1
, &page
->flags
);
421 EXPORT_SYMBOL(flush_dcache_page
);
423 void flush_dcache_icache_page(struct page
*page
)
426 void *start
= kmap_atomic(page
, KM_PPC_SYNC_ICACHE
);
427 __flush_dcache_icache(start
);
428 kunmap_atomic(start
, KM_PPC_SYNC_ICACHE
);
429 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
430 /* On 8xx there is no need to kmap since highmem is not supported */
431 __flush_dcache_icache(page_address(page
));
433 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
437 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
442 * We shouldnt have to do this, but some versions of glibc
443 * require it (ld.so assumes zero filled pages are icache clean)
446 flush_dcache_page(pg
);
448 EXPORT_SYMBOL(clear_user_page
);
450 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
453 copy_page(vto
, vfrom
);
456 * We should be able to use the following optimisation, however
457 * there are two problems.
458 * Firstly a bug in some versions of binutils meant PLT sections
459 * were not marked executable.
460 * Secondly the first word in the GOT section is blrl, used
461 * to establish the GOT address. Until recently the GOT was
462 * not marked executable.
466 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
470 flush_dcache_page(pg
);
473 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
474 unsigned long addr
, int len
)
478 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
479 flush_icache_range(maddr
, maddr
+ len
);
482 EXPORT_SYMBOL(flush_icache_user_range
);
485 * This is called at the end of handling a user page fault, when the
486 * fault has been handled by updating a PTE in the linux page tables.
487 * We use it to preload an HPTE into the hash table corresponding to
488 * the updated linux PTE.
490 * This must always be called with the pte lock held.
492 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
495 #ifdef CONFIG_PPC_STD_MMU
496 unsigned long access
= 0, trap
;
498 unsigned long pfn
= pte_pfn(pte
);
500 /* handle i-cache coherency */
501 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
) &&
502 !cpu_has_feature(CPU_FTR_NOEXECUTE
) &&
504 struct page
*page
= pfn_to_page(pfn
);
506 /* On 8xx, cache control instructions (particularly
507 * "dcbst" from flush_dcache_icache) fault as write
508 * operation if there is an unpopulated TLB entry
509 * for the address in question. To workaround that,
510 * we invalidate the TLB here, thus avoiding dcbst
515 if (!PageReserved(page
)
516 && !test_bit(PG_arch_1
, &page
->flags
)) {
517 if (vma
->vm_mm
== current
->active_mm
) {
518 __flush_dcache_icache((void *) address
);
520 flush_dcache_icache_page(page
);
521 set_bit(PG_arch_1
, &page
->flags
);
525 #ifdef CONFIG_PPC_STD_MMU
526 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
527 if (!pte_young(pte
) || address
>= TASK_SIZE
)
530 /* We try to figure out if we are coming from an instruction
531 * access fault and pass that down to __hash_page so we avoid
532 * double-faulting on execution of fresh text. We have to test
533 * for regs NULL since init will get here first thing at boot
535 * We also avoid filling the hash if not coming from a fault
537 if (current
->thread
.regs
== NULL
)
539 trap
= TRAP(current
->thread
.regs
);
541 access
|= _PAGE_EXEC
;
542 else if (trap
!= 0x300)
544 hash_preload(vma
->vm_mm
, address
, access
, trap
);
545 #endif /* CONFIG_PPC_STD_MMU */