Merge branch 'next' into for-linus
[deliverable/linux.git] / arch / powerpc / mm / mem.c
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
35
36 #include <asm/pgalloc.h>
37 #include <asm/prom.h>
38 #include <asm/io.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/mmu.h>
42 #include <asm/smp.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
45 #include <asm/tlb.h>
46 #include <asm/sections.h>
47 #include <asm/sparsemem.h>
48 #include <asm/vdso.h>
49 #include <asm/fixmap.h>
50
51 #include "mmu_decl.h"
52
53 #ifndef CPU_FTR_COHERENT_ICACHE
54 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
55 #define CPU_FTR_NOEXECUTE 0
56 #endif
57
58 int init_bootmem_done;
59 int mem_init_done;
60 unsigned long memory_limit;
61
62 #ifdef CONFIG_HIGHMEM
63 pte_t *kmap_pte;
64 pgprot_t kmap_prot;
65
66 EXPORT_SYMBOL(kmap_prot);
67 EXPORT_SYMBOL(kmap_pte);
68
69 static inline pte_t *virt_to_kpte(unsigned long vaddr)
70 {
71 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
72 vaddr), vaddr), vaddr);
73 }
74 #endif
75
76 int page_is_ram(unsigned long pfn)
77 {
78 unsigned long paddr = (pfn << PAGE_SHIFT);
79
80 #ifndef CONFIG_PPC64 /* XXX for now */
81 return paddr < __pa(high_memory);
82 #else
83 int i;
84 for (i=0; i < lmb.memory.cnt; i++) {
85 unsigned long base;
86
87 base = lmb.memory.region[i].base;
88
89 if ((paddr >= base) &&
90 (paddr < (base + lmb.memory.region[i].size))) {
91 return 1;
92 }
93 }
94
95 return 0;
96 #endif
97 }
98
99 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
100 unsigned long size, pgprot_t vma_prot)
101 {
102 if (ppc_md.phys_mem_access_prot)
103 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
104
105 if (!page_is_ram(pfn))
106 vma_prot = __pgprot(pgprot_val(vma_prot)
107 | _PAGE_GUARDED | _PAGE_NO_CACHE);
108 return vma_prot;
109 }
110 EXPORT_SYMBOL(phys_mem_access_prot);
111
112 #ifdef CONFIG_MEMORY_HOTPLUG
113
114 #ifdef CONFIG_NUMA
115 int memory_add_physaddr_to_nid(u64 start)
116 {
117 return hot_add_scn_to_nid(start);
118 }
119 #endif
120
121 int arch_add_memory(int nid, u64 start, u64 size)
122 {
123 struct pglist_data *pgdata;
124 struct zone *zone;
125 unsigned long start_pfn = start >> PAGE_SHIFT;
126 unsigned long nr_pages = size >> PAGE_SHIFT;
127
128 pgdata = NODE_DATA(nid);
129
130 start = (unsigned long)__va(start);
131 create_section_mapping(start, start + size);
132
133 /* this should work for most non-highmem platforms */
134 zone = pgdata->node_zones;
135
136 return __add_pages(zone, start_pfn, nr_pages);
137 }
138
139 #ifdef CONFIG_MEMORY_HOTREMOVE
140 int remove_memory(u64 start, u64 size)
141 {
142 unsigned long start_pfn, end_pfn;
143 int ret;
144
145 start_pfn = start >> PAGE_SHIFT;
146 end_pfn = start_pfn + (size >> PAGE_SHIFT);
147 ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
148 if (ret)
149 goto out;
150 /* Arch-specific calls go here - next patch */
151 out:
152 return ret;
153 }
154 #endif /* CONFIG_MEMORY_HOTREMOVE */
155 #endif /* CONFIG_MEMORY_HOTPLUG */
156
157 /*
158 * walk_memory_resource() needs to make sure there is no holes in a given
159 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
160 * Instead it maintains it in lmb.memory structures. Walk through the
161 * memory regions, find holes and callback for contiguous regions.
162 */
163 int
164 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
165 int (*func)(unsigned long, unsigned long, void *))
166 {
167 struct lmb_property res;
168 unsigned long pfn, len;
169 u64 end;
170 int ret = -1;
171
172 res.base = (u64) start_pfn << PAGE_SHIFT;
173 res.size = (u64) nr_pages << PAGE_SHIFT;
174
175 end = res.base + res.size - 1;
176 while ((res.base < end) && (lmb_find(&res) >= 0)) {
177 pfn = (unsigned long)(res.base >> PAGE_SHIFT);
178 len = (unsigned long)(res.size >> PAGE_SHIFT);
179 ret = (*func)(pfn, len, arg);
180 if (ret)
181 break;
182 res.base += (res.size + 1);
183 res.size = (end - res.base + 1);
184 }
185 return ret;
186 }
187 EXPORT_SYMBOL_GPL(walk_memory_resource);
188
189 /*
190 * Initialize the bootmem system and give it all the memory we
191 * have available. If we are using highmem, we only put the
192 * lowmem into the bootmem system.
193 */
194 #ifndef CONFIG_NEED_MULTIPLE_NODES
195 void __init do_init_bootmem(void)
196 {
197 unsigned long i;
198 unsigned long start, bootmap_pages;
199 unsigned long total_pages;
200 int boot_mapsize;
201
202 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
203 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
204 #ifdef CONFIG_HIGHMEM
205 total_pages = total_lowmem >> PAGE_SHIFT;
206 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
207 #endif
208
209 /*
210 * Find an area to use for the bootmem bitmap. Calculate the size of
211 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
212 * Add 1 additional page in case the address isn't page-aligned.
213 */
214 bootmap_pages = bootmem_bootmap_pages(total_pages);
215
216 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
217
218 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
219 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
220
221 /* Add active regions with valid PFNs */
222 for (i = 0; i < lmb.memory.cnt; i++) {
223 unsigned long start_pfn, end_pfn;
224 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
225 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
226 add_active_range(0, start_pfn, end_pfn);
227 }
228
229 /* Add all physical memory to the bootmem map, mark each area
230 * present.
231 */
232 #ifdef CONFIG_HIGHMEM
233 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
234
235 /* reserve the sections we're already using */
236 for (i = 0; i < lmb.reserved.cnt; i++) {
237 unsigned long addr = lmb.reserved.region[i].base +
238 lmb_size_bytes(&lmb.reserved, i) - 1;
239 if (addr < lowmem_end_addr)
240 reserve_bootmem(lmb.reserved.region[i].base,
241 lmb_size_bytes(&lmb.reserved, i),
242 BOOTMEM_DEFAULT);
243 else if (lmb.reserved.region[i].base < lowmem_end_addr) {
244 unsigned long adjusted_size = lowmem_end_addr -
245 lmb.reserved.region[i].base;
246 reserve_bootmem(lmb.reserved.region[i].base,
247 adjusted_size, BOOTMEM_DEFAULT);
248 }
249 }
250 #else
251 free_bootmem_with_active_regions(0, max_pfn);
252
253 /* reserve the sections we're already using */
254 for (i = 0; i < lmb.reserved.cnt; i++)
255 reserve_bootmem(lmb.reserved.region[i].base,
256 lmb_size_bytes(&lmb.reserved, i),
257 BOOTMEM_DEFAULT);
258
259 #endif
260 /* XXX need to clip this if using highmem? */
261 sparse_memory_present_with_active_regions(0);
262
263 init_bootmem_done = 1;
264 }
265
266 /* mark pages that don't exist as nosave */
267 static int __init mark_nonram_nosave(void)
268 {
269 unsigned long lmb_next_region_start_pfn,
270 lmb_region_max_pfn;
271 int i;
272
273 for (i = 0; i < lmb.memory.cnt - 1; i++) {
274 lmb_region_max_pfn =
275 (lmb.memory.region[i].base >> PAGE_SHIFT) +
276 (lmb.memory.region[i].size >> PAGE_SHIFT);
277 lmb_next_region_start_pfn =
278 lmb.memory.region[i+1].base >> PAGE_SHIFT;
279
280 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
281 register_nosave_region(lmb_region_max_pfn,
282 lmb_next_region_start_pfn);
283 }
284
285 return 0;
286 }
287
288 /*
289 * paging_init() sets up the page tables - in fact we've already done this.
290 */
291 void __init paging_init(void)
292 {
293 unsigned long total_ram = lmb_phys_mem_size();
294 phys_addr_t top_of_ram = lmb_end_of_DRAM();
295 unsigned long max_zone_pfns[MAX_NR_ZONES];
296
297 #ifdef CONFIG_PPC32
298 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
299 unsigned long end = __fix_to_virt(FIX_HOLE);
300
301 for (; v < end; v += PAGE_SIZE)
302 map_page(v, 0, 0); /* XXX gross */
303 #endif
304
305 #ifdef CONFIG_HIGHMEM
306 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
307 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
308
309 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
310 kmap_prot = PAGE_KERNEL;
311 #endif /* CONFIG_HIGHMEM */
312
313 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
314 (unsigned long long)top_of_ram, total_ram);
315 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
316 (long int)((top_of_ram - total_ram) >> 20));
317 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
318 #ifdef CONFIG_HIGHMEM
319 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
320 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
321 #else
322 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
323 #endif
324 free_area_init_nodes(max_zone_pfns);
325
326 mark_nonram_nosave();
327 }
328 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
329
330 void __init mem_init(void)
331 {
332 #ifdef CONFIG_NEED_MULTIPLE_NODES
333 int nid;
334 #endif
335 pg_data_t *pgdat;
336 unsigned long i;
337 struct page *page;
338 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
339
340 num_physpages = lmb.memory.size >> PAGE_SHIFT;
341 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
342
343 #ifdef CONFIG_NEED_MULTIPLE_NODES
344 for_each_online_node(nid) {
345 if (NODE_DATA(nid)->node_spanned_pages != 0) {
346 printk("freeing bootmem node %d\n", nid);
347 totalram_pages +=
348 free_all_bootmem_node(NODE_DATA(nid));
349 }
350 }
351 #else
352 max_mapnr = max_pfn;
353 totalram_pages += free_all_bootmem();
354 #endif
355 for_each_online_pgdat(pgdat) {
356 for (i = 0; i < pgdat->node_spanned_pages; i++) {
357 if (!pfn_valid(pgdat->node_start_pfn + i))
358 continue;
359 page = pgdat_page_nr(pgdat, i);
360 if (PageReserved(page))
361 reservedpages++;
362 }
363 }
364
365 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
366 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
367 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
368 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
369
370 #ifdef CONFIG_HIGHMEM
371 {
372 unsigned long pfn, highmem_mapnr;
373
374 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
375 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
376 struct page *page = pfn_to_page(pfn);
377 if (lmb_is_reserved(pfn << PAGE_SHIFT))
378 continue;
379 ClearPageReserved(page);
380 init_page_count(page);
381 __free_page(page);
382 totalhigh_pages++;
383 reservedpages--;
384 }
385 totalram_pages += totalhigh_pages;
386 printk(KERN_DEBUG "High memory: %luk\n",
387 totalhigh_pages << (PAGE_SHIFT-10));
388 }
389 #endif /* CONFIG_HIGHMEM */
390
391 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
392 "%luk reserved, %luk data, %luk bss, %luk init)\n",
393 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
394 num_physpages << (PAGE_SHIFT-10),
395 codesize >> 10,
396 reservedpages << (PAGE_SHIFT-10),
397 datasize >> 10,
398 bsssize >> 10,
399 initsize >> 10);
400
401 mem_init_done = 1;
402 }
403
404 /*
405 * This is called when a page has been modified by the kernel.
406 * It just marks the page as not i-cache clean. We do the i-cache
407 * flush later when the page is given to a user process, if necessary.
408 */
409 void flush_dcache_page(struct page *page)
410 {
411 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
412 return;
413 /* avoid an atomic op if possible */
414 if (test_bit(PG_arch_1, &page->flags))
415 clear_bit(PG_arch_1, &page->flags);
416 }
417 EXPORT_SYMBOL(flush_dcache_page);
418
419 void flush_dcache_icache_page(struct page *page)
420 {
421 #ifdef CONFIG_BOOKE
422 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
423 __flush_dcache_icache(start);
424 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
425 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
426 /* On 8xx there is no need to kmap since highmem is not supported */
427 __flush_dcache_icache(page_address(page));
428 #else
429 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
430 #endif
431
432 }
433 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
434 {
435 clear_page(page);
436
437 /*
438 * We shouldnt have to do this, but some versions of glibc
439 * require it (ld.so assumes zero filled pages are icache clean)
440 * - Anton
441 */
442 flush_dcache_page(pg);
443 }
444 EXPORT_SYMBOL(clear_user_page);
445
446 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
447 struct page *pg)
448 {
449 copy_page(vto, vfrom);
450
451 /*
452 * We should be able to use the following optimisation, however
453 * there are two problems.
454 * Firstly a bug in some versions of binutils meant PLT sections
455 * were not marked executable.
456 * Secondly the first word in the GOT section is blrl, used
457 * to establish the GOT address. Until recently the GOT was
458 * not marked executable.
459 * - Anton
460 */
461 #if 0
462 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
463 return;
464 #endif
465
466 flush_dcache_page(pg);
467 }
468
469 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
470 unsigned long addr, int len)
471 {
472 unsigned long maddr;
473
474 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
475 flush_icache_range(maddr, maddr + len);
476 kunmap(page);
477 }
478 EXPORT_SYMBOL(flush_icache_user_range);
479
480 /*
481 * This is called at the end of handling a user page fault, when the
482 * fault has been handled by updating a PTE in the linux page tables.
483 * We use it to preload an HPTE into the hash table corresponding to
484 * the updated linux PTE.
485 *
486 * This must always be called with the pte lock held.
487 */
488 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
489 pte_t pte)
490 {
491 #ifdef CONFIG_PPC_STD_MMU
492 unsigned long access = 0, trap;
493 #endif
494 unsigned long pfn = pte_pfn(pte);
495
496 /* handle i-cache coherency */
497 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
498 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
499 pfn_valid(pfn)) {
500 struct page *page = pfn_to_page(pfn);
501 #ifdef CONFIG_8xx
502 /* On 8xx, cache control instructions (particularly
503 * "dcbst" from flush_dcache_icache) fault as write
504 * operation if there is an unpopulated TLB entry
505 * for the address in question. To workaround that,
506 * we invalidate the TLB here, thus avoiding dcbst
507 * misbehaviour.
508 */
509 _tlbie(address, 0 /* 8xx doesn't care about PID */);
510 #endif
511 /* The _PAGE_USER test should really be _PAGE_EXEC, but
512 * older glibc versions execute some code from no-exec
513 * pages, which for now we are supporting. If exec-only
514 * pages are ever implemented, this will have to change.
515 */
516 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
517 && !test_bit(PG_arch_1, &page->flags)) {
518 if (vma->vm_mm == current->active_mm) {
519 __flush_dcache_icache((void *) address);
520 } else
521 flush_dcache_icache_page(page);
522 set_bit(PG_arch_1, &page->flags);
523 }
524 }
525
526 #ifdef CONFIG_PPC_STD_MMU
527 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
528 if (!pte_young(pte) || address >= TASK_SIZE)
529 return;
530
531 /* We try to figure out if we are coming from an instruction
532 * access fault and pass that down to __hash_page so we avoid
533 * double-faulting on execution of fresh text. We have to test
534 * for regs NULL since init will get here first thing at boot
535 *
536 * We also avoid filling the hash if not coming from a fault
537 */
538 if (current->thread.regs == NULL)
539 return;
540 trap = TRAP(current->thread.regs);
541 if (trap == 0x400)
542 access |= _PAGE_EXEC;
543 else if (trap != 0x300)
544 return;
545 hash_preload(vma->vm_mm, address, access, trap);
546 #endif /* CONFIG_PPC_STD_MMU */
547 }
This page took 0.044496 seconds and 6 git commands to generate.