[PATCH] remove set_page_count() outside mm/
[deliverable/linux.git] / arch / frv / mm / init.c
CommitLineData
1da177e4
LT
1/* init.c: memory initialisation for FRV
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Derived from:
12 * - linux/arch/m68knommu/mm/init.c
13 * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, Kenneth Albanowski <kjahds@kjahds.com>,
14 * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
15 * - linux/arch/m68k/mm/init.c
16 * - Copyright (C) 1995 Hamish Macdonald
17 */
18
19#include <linux/config.h>
20#include <linux/signal.h>
21#include <linux/sched.h>
22#include <linux/pagemap.h>
23#include <linux/swap.h>
24#include <linux/mm.h>
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/bootmem.h>
29#include <linux/highmem.h>
30
31#include <asm/setup.h>
32#include <asm/segment.h>
33#include <asm/page.h>
34#include <asm/pgtable.h>
35#include <asm/system.h>
36#include <asm/mmu_context.h>
37#include <asm/virtconvert.h>
38#include <asm/sections.h>
39#include <asm/tlb.h>
40
41#undef DEBUG
42
43DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
44
45/*
46 * BAD_PAGE is the page that is used for page faults when linux
47 * is out-of-memory. Older versions of linux just did a
48 * do_exit(), but using this instead means there is less risk
49 * for a process dying in kernel mode, possibly leaving a inode
50 * unused etc..
51 *
52 * BAD_PAGETABLE is the accompanying page-table: it is initialized
53 * to point to BAD_PAGE entries.
54 *
55 * ZERO_PAGE is a special page that is used for zero-initialized
56 * data and COW.
57 */
58static unsigned long empty_bad_page_table;
59static unsigned long empty_bad_page;
60unsigned long empty_zero_page;
61
62/*****************************************************************************/
63/*
64 *
65 */
66void show_mem(void)
67{
68 unsigned long i;
69 int free = 0, total = 0, reserved = 0, shared = 0;
70
71 printk("\nMem-info:\n");
72 show_free_areas();
73 i = max_mapnr;
74 while (i-- > 0) {
75 struct page *page = &mem_map[i];
76
77 total++;
78 if (PageReserved(page))
79 reserved++;
80 else if (!page_count(page))
81 free++;
82 else
83 shared += page_count(page) - 1;
84 }
85
86 printk("%d pages of RAM\n",total);
87 printk("%d free pages\n",free);
88 printk("%d reserved pages\n",reserved);
89 printk("%d pages shared\n",shared);
90
91} /* end show_mem() */
92
93/*****************************************************************************/
94/*
95 * paging_init() continues the virtual memory environment setup which
96 * was begun by the code in arch/head.S.
97 * The parameters are pointers to where to stick the starting and ending
98 * addresses of available kernel virtual memory.
99 */
100void __init paging_init(void)
101{
102 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
103
104 /* allocate some pages for kernel housekeeping tasks */
105 empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
106 empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
107 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
108
109 memset((void *) empty_zero_page, 0, PAGE_SIZE);
110
8080f231 111#ifdef CONFIG_HIGHMEM
1da177e4
LT
112 if (num_physpages - num_mappedpages) {
113 pgd_t *pge;
114 pud_t *pue;
115 pmd_t *pme;
116
117 pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE);
118
119 memset(pkmap_page_table, 0, PAGE_SIZE);
120
121 pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE);
122 pue = pud_offset(pge, PKMAP_BASE);
123 pme = pmd_offset(pue, PKMAP_BASE);
124 __set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE);
125 }
126#endif
127
128 /* distribute the allocatable pages across the various zones and pass them to the allocator
129 */
130 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
131 zones_size[ZONE_NORMAL] = 0;
132#ifdef CONFIG_HIGHMEM
133 zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
134#endif
135
136 free_area_init(zones_size);
137
138#ifdef CONFIG_MMU
139 /* initialise init's MMU context */
140 init_new_context(&init_task, &init_mm);
141#endif
142
143} /* end paging_init() */
144
145/*****************************************************************************/
146/*
147 *
148 */
149void __init mem_init(void)
150{
151 unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT;
152 unsigned long tmp;
153#ifdef CONFIG_MMU
154 unsigned long loop, pfn;
155 int datapages = 0;
156#endif
157 int codek = 0, datak = 0;
158
159 /* this will put all memory onto the freelists */
160 totalram_pages = free_all_bootmem();
161
162#ifdef CONFIG_MMU
163 for (loop = 0 ; loop < npages ; loop++)
164 if (PageReserved(&mem_map[loop]))
165 datapages++;
166
167#ifdef CONFIG_HIGHMEM
168 for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) {
169 struct page *page = &mem_map[pfn];
170
171 ClearPageReserved(page);
7835e98b 172 init_page_count(page);
1da177e4
LT
173 __free_page(page);
174 totalram_pages++;
175 }
176#endif
177
178 codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
179 datak = datapages << (PAGE_SHIFT - 10);
180
181#else
182 codek = (_etext - _stext) >> 10;
183 datak = 0; //(_ebss - _sdata) >> 10;
184#endif
185
186 tmp = nr_free_pages() << PAGE_SHIFT;
187 printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n",
188 tmp >> 10,
189 npages << (PAGE_SHIFT - 10),
190 (rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
191 rom_length >> 10,
192 codek,
193 datak
194 );
195
196} /* end mem_init() */
197
198/*****************************************************************************/
199/*
200 * free the memory that was only required for initialisation
201 */
202void __init free_initmem(void)
203{
204#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
205 unsigned long start, end, addr;
206
207 start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */
208 end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */
209
210 /* next to check that the page we free is not a partial page */
211 for (addr = start; addr < end; addr += PAGE_SIZE) {
212 ClearPageReserved(virt_to_page(addr));
7835e98b 213 init_page_count(virt_to_page(addr));
1da177e4
LT
214 free_page(addr);
215 totalram_pages++;
216 }
217
218 printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n",
219 (end - start) >> 10, start, end);
220#endif
221} /* end free_initmem() */
222
223/*****************************************************************************/
224/*
225 * free the initial ramdisk memory
226 */
227#ifdef CONFIG_BLK_DEV_INITRD
228void __init free_initrd_mem(unsigned long start, unsigned long end)
229{
230 int pages = 0;
231 for (; start < end; start += PAGE_SIZE) {
232 ClearPageReserved(virt_to_page(start));
7835e98b 233 init_page_count(virt_to_page(start));
1da177e4
LT
234 free_page(start);
235 totalram_pages++;
236 pages++;
237 }
238 printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10);
239} /* end free_initrd_mem() */
240#endif
This page took 0.110785 seconds and 5 git commands to generate.