Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/mm/pgtable.c | |
3 | */ | |
4 | ||
1da177e4 LT |
5 | #include <linux/sched.h> |
6 | #include <linux/kernel.h> | |
7 | #include <linux/errno.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/smp.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/spinlock.h> | |
052e7994 | 15 | #include <linux/module.h> |
1da177e4 LT |
16 | |
17 | #include <asm/system.h> | |
18 | #include <asm/pgtable.h> | |
19 | #include <asm/pgalloc.h> | |
20 | #include <asm/fixmap.h> | |
21 | #include <asm/e820.h> | |
22 | #include <asm/tlb.h> | |
23 | #include <asm/tlbflush.h> | |
24 | ||
25 | void show_mem(void) | |
26 | { | |
27 | int total = 0, reserved = 0; | |
28 | int shared = 0, cached = 0; | |
29 | int highmem = 0; | |
30 | struct page *page; | |
31 | pg_data_t *pgdat; | |
32 | unsigned long i; | |
208d54e5 | 33 | unsigned long flags; |
1da177e4 | 34 | |
f90e7185 | 35 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 36 | show_free_areas(); |
f90e7185 | 37 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
ec936fc5 | 38 | for_each_online_pgdat(pgdat) { |
208d54e5 | 39 | pgdat_resize_lock(pgdat, &flags); |
1da177e4 | 40 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
408fde81 | 41 | page = pgdat_page_nr(pgdat, i); |
1da177e4 LT |
42 | total++; |
43 | if (PageHighMem(page)) | |
44 | highmem++; | |
45 | if (PageReserved(page)) | |
46 | reserved++; | |
47 | else if (PageSwapCache(page)) | |
48 | cached++; | |
49 | else if (page_count(page)) | |
50 | shared += page_count(page) - 1; | |
51 | } | |
208d54e5 | 52 | pgdat_resize_unlock(pgdat, &flags); |
1da177e4 | 53 | } |
f90e7185 CL |
54 | printk(KERN_INFO "%d pages of RAM\n", total); |
55 | printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); | |
56 | printk(KERN_INFO "%d reserved pages\n", reserved); | |
57 | printk(KERN_INFO "%d pages shared\n", shared); | |
58 | printk(KERN_INFO "%d pages swap cached\n", cached); | |
6f4e1e50 | 59 | |
b1e7a8fd | 60 | printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); |
ce866b34 CL |
61 | printk(KERN_INFO "%lu pages writeback\n", |
62 | global_page_state(NR_WRITEBACK)); | |
65ba55f5 | 63 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
972d1a7b CL |
64 | printk(KERN_INFO "%lu pages slab\n", |
65 | global_page_state(NR_SLAB_RECLAIMABLE) + | |
66 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | |
df849a15 CL |
67 | printk(KERN_INFO "%lu pages pagetables\n", |
68 | global_page_state(NR_PAGETABLE)); | |
1da177e4 LT |
69 | } |
70 | ||
71 | /* | |
72 | * Associate a virtual page frame with a given physical page frame | |
73 | * and protection flags for that frame. | |
74 | */ | |
75 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
76 | { | |
77 | pgd_t *pgd; | |
78 | pud_t *pud; | |
79 | pmd_t *pmd; | |
80 | pte_t *pte; | |
81 | ||
82 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
83 | if (pgd_none(*pgd)) { | |
84 | BUG(); | |
85 | return; | |
86 | } | |
87 | pud = pud_offset(pgd, vaddr); | |
88 | if (pud_none(*pud)) { | |
89 | BUG(); | |
90 | return; | |
91 | } | |
92 | pmd = pmd_offset(pud, vaddr); | |
93 | if (pmd_none(*pmd)) { | |
94 | BUG(); | |
95 | return; | |
96 | } | |
97 | pte = pte_offset_kernel(pmd, vaddr); | |
b0bfece4 JB |
98 | if (pgprot_val(flags)) |
99 | /* <pfn,flags> stored as-is, to permit clearing entries */ | |
100 | set_pte(pte, pfn_pte(pfn, flags)); | |
101 | else | |
102 | pte_clear(&init_mm, vaddr, pte); | |
1da177e4 LT |
103 | |
104 | /* | |
105 | * It's enough to flush this one mapping. | |
106 | * (PGE mappings get flushed as well) | |
107 | */ | |
108 | __flush_tlb_one(vaddr); | |
109 | } | |
110 | ||
111 | /* | |
112 | * Associate a large virtual page frame with a given physical page frame | |
113 | * and protection flags for that frame. pfn is for the base of the page, | |
114 | * vaddr is what the page gets mapped to - both must be properly aligned. | |
115 | * The pmd must already be instantiated. Assumes PAE mode. | |
116 | */ | |
117 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
118 | { | |
119 | pgd_t *pgd; | |
120 | pud_t *pud; | |
121 | pmd_t *pmd; | |
122 | ||
123 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | |
f90e7185 | 124 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); |
1da177e4 LT |
125 | return; /* BUG(); */ |
126 | } | |
127 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | |
f90e7185 | 128 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); |
1da177e4 LT |
129 | return; /* BUG(); */ |
130 | } | |
131 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
132 | if (pgd_none(*pgd)) { | |
f90e7185 | 133 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); |
1da177e4 LT |
134 | return; /* BUG(); */ |
135 | } | |
136 | pud = pud_offset(pgd, vaddr); | |
137 | pmd = pmd_offset(pud, vaddr); | |
138 | set_pmd(pmd, pfn_pmd(pfn, flags)); | |
139 | /* | |
140 | * It's enough to flush this one mapping. | |
141 | * (PGE mappings get flushed as well) | |
142 | */ | |
143 | __flush_tlb_one(vaddr); | |
144 | } | |
145 | ||
052e7994 | 146 | static int fixmaps; |
052e7994 JF |
147 | unsigned long __FIXADDR_TOP = 0xfffff000; |
148 | EXPORT_SYMBOL(__FIXADDR_TOP); | |
052e7994 | 149 | |
1da177e4 LT |
150 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
151 | { | |
152 | unsigned long address = __fix_to_virt(idx); | |
153 | ||
154 | if (idx >= __end_of_fixed_addresses) { | |
155 | BUG(); | |
156 | return; | |
157 | } | |
158 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | |
052e7994 JF |
159 | fixmaps++; |
160 | } | |
161 | ||
162 | /** | |
163 | * reserve_top_address - reserves a hole in the top of kernel address space | |
164 | * @reserve - size of hole to reserve | |
165 | * | |
166 | * Can be used to relocate the fixmap area and poke a hole in the top | |
167 | * of kernel address space to make room for a hypervisor. | |
168 | */ | |
169 | void reserve_top_address(unsigned long reserve) | |
170 | { | |
171 | BUG_ON(fixmaps > 0); | |
7ce0bcfd ZA |
172 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", |
173 | (int)-reserve); | |
052e7994 JF |
174 | __FIXADDR_TOP = -reserve - PAGE_SIZE; |
175 | __VMALLOC_RESERVE += reserve; | |
1da177e4 LT |
176 | } |
177 | ||
178 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
179 | { | |
180 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
181 | } | |
182 | ||
183 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
184 | { | |
185 | struct page *pte; | |
186 | ||
187 | #ifdef CONFIG_HIGHPTE | |
188 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | |
189 | #else | |
190 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | |
191 | #endif | |
192 | return pte; | |
193 | } | |
194 | ||
e18b890b | 195 | void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) |
1da177e4 LT |
196 | { |
197 | memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); | |
198 | } | |
199 | ||
200 | /* | |
201 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
202 | * in both cached and uncached pgd's; not needed for PAE since the | |
203 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
204 | * tactic would be needed. This is essentially codepath-based locking | |
205 | * against pageattr.c; it is the unique case in which a valid change | |
206 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
207 | * vmalloc faults work because attached pagetables are never freed. | |
208 | * The locking scheme was chosen on the basis of manfred's | |
209 | * recommendations and having no core impact whatsoever. | |
210 | * -- wli | |
211 | */ | |
212 | DEFINE_SPINLOCK(pgd_lock); | |
213 | struct page *pgd_list; | |
214 | ||
215 | static inline void pgd_list_add(pgd_t *pgd) | |
216 | { | |
217 | struct page *page = virt_to_page(pgd); | |
218 | page->index = (unsigned long)pgd_list; | |
219 | if (pgd_list) | |
4c21e2f2 | 220 | set_page_private(pgd_list, (unsigned long)&page->index); |
1da177e4 | 221 | pgd_list = page; |
4c21e2f2 | 222 | set_page_private(page, (unsigned long)&pgd_list); |
1da177e4 LT |
223 | } |
224 | ||
225 | static inline void pgd_list_del(pgd_t *pgd) | |
226 | { | |
227 | struct page *next, **pprev, *page = virt_to_page(pgd); | |
228 | next = (struct page *)page->index; | |
4c21e2f2 | 229 | pprev = (struct page **)page_private(page); |
1da177e4 LT |
230 | *pprev = next; |
231 | if (next) | |
4c21e2f2 | 232 | set_page_private(next, (unsigned long)pprev); |
1da177e4 LT |
233 | } |
234 | ||
5311ab62 JF |
235 | #if (PTRS_PER_PMD == 1) |
236 | /* Non-PAE pgd constructor */ | |
e18b890b | 237 | void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) |
1da177e4 LT |
238 | { |
239 | unsigned long flags; | |
240 | ||
5311ab62 JF |
241 | /* !PAE, no pagetable sharing */ |
242 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | |
243 | ||
244 | spin_lock_irqsave(&pgd_lock, flags); | |
1da177e4 | 245 | |
5311ab62 | 246 | /* must happen under lock */ |
d7271b14 | 247 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, |
1da177e4 | 248 | swapper_pg_dir + USER_PTRS_PER_PGD, |
d7271b14 | 249 | KERNEL_PGD_PTRS); |
c119ecce | 250 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, |
5311ab62 JF |
251 | __pa(swapper_pg_dir) >> PAGE_SHIFT, |
252 | USER_PTRS_PER_PGD, | |
253 | KERNEL_PGD_PTRS); | |
1da177e4 LT |
254 | pgd_list_add(pgd); |
255 | spin_unlock_irqrestore(&pgd_lock, flags); | |
1da177e4 | 256 | } |
5311ab62 JF |
257 | #else /* PTRS_PER_PMD > 1 */ |
258 | /* PAE pgd constructor */ | |
259 | void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) | |
260 | { | |
261 | /* PAE, kernel PMD may be shared */ | |
262 | ||
263 | if (SHARED_KERNEL_PMD) { | |
264 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, | |
265 | swapper_pg_dir + USER_PTRS_PER_PGD, | |
266 | KERNEL_PGD_PTRS); | |
267 | } else { | |
268 | unsigned long flags; | |
269 | ||
270 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | |
271 | spin_lock_irqsave(&pgd_lock, flags); | |
272 | pgd_list_add(pgd); | |
273 | spin_unlock_irqrestore(&pgd_lock, flags); | |
274 | } | |
275 | } | |
276 | #endif /* PTRS_PER_PMD */ | |
1da177e4 | 277 | |
e18b890b | 278 | void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) |
1da177e4 LT |
279 | { |
280 | unsigned long flags; /* can be called from interrupt context */ | |
281 | ||
5311ab62 JF |
282 | BUG_ON(SHARED_KERNEL_PMD); |
283 | ||
c119ecce | 284 | paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); |
1da177e4 LT |
285 | spin_lock_irqsave(&pgd_lock, flags); |
286 | pgd_list_del(pgd); | |
287 | spin_unlock_irqrestore(&pgd_lock, flags); | |
288 | } | |
289 | ||
5311ab62 JF |
290 | #define UNSHARED_PTRS_PER_PGD \ |
291 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | |
292 | ||
293 | /* If we allocate a pmd for part of the kernel address space, then | |
294 | make sure its initialized with the appropriate kernel mappings. | |
295 | Otherwise use a cached zeroed pmd. */ | |
296 | static pmd_t *pmd_cache_alloc(int idx) | |
297 | { | |
298 | pmd_t *pmd; | |
299 | ||
300 | if (idx >= USER_PTRS_PER_PGD) { | |
301 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL); | |
302 | ||
303 | if (pmd) | |
304 | memcpy(pmd, | |
305 | (void *)pgd_page_vaddr(swapper_pg_dir[idx]), | |
306 | sizeof(pmd_t) * PTRS_PER_PMD); | |
307 | } else | |
308 | pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); | |
309 | ||
310 | return pmd; | |
311 | } | |
312 | ||
313 | static void pmd_cache_free(pmd_t *pmd, int idx) | |
314 | { | |
315 | if (idx >= USER_PTRS_PER_PGD) | |
316 | free_page((unsigned long)pmd); | |
317 | else | |
318 | kmem_cache_free(pmd_cache, pmd); | |
319 | } | |
320 | ||
1da177e4 LT |
321 | pgd_t *pgd_alloc(struct mm_struct *mm) |
322 | { | |
323 | int i; | |
324 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | |
325 | ||
326 | if (PTRS_PER_PMD == 1 || !pgd) | |
327 | return pgd; | |
328 | ||
5311ab62 JF |
329 | for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { |
330 | pmd_t *pmd = pmd_cache_alloc(i); | |
331 | ||
1da177e4 LT |
332 | if (!pmd) |
333 | goto out_oom; | |
5311ab62 | 334 | |
c119ecce | 335 | paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); |
1da177e4 LT |
336 | set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); |
337 | } | |
338 | return pgd; | |
339 | ||
340 | out_oom: | |
c119ecce ZA |
341 | for (i--; i >= 0; i--) { |
342 | pgd_t pgdent = pgd[i]; | |
343 | void* pmd = (void *)__va(pgd_val(pgdent)-1); | |
344 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | |
5311ab62 | 345 | pmd_cache_free(pmd, i); |
c119ecce | 346 | } |
1da177e4 LT |
347 | kmem_cache_free(pgd_cache, pgd); |
348 | return NULL; | |
349 | } | |
350 | ||
351 | void pgd_free(pgd_t *pgd) | |
352 | { | |
353 | int i; | |
354 | ||
355 | /* in the PAE case user pgd entries are overwritten before usage */ | |
356 | if (PTRS_PER_PMD > 1) | |
5311ab62 | 357 | for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { |
c119ecce ZA |
358 | pgd_t pgdent = pgd[i]; |
359 | void* pmd = (void *)__va(pgd_val(pgdent)-1); | |
360 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | |
5311ab62 | 361 | pmd_cache_free(pmd, i); |
c119ecce | 362 | } |
e0da382c | 363 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ |
1da177e4 LT |
364 | kmem_cache_free(pgd_cache, pgd); |
365 | } |