Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: init.c,v 1.103 2001/11/19 19:03:08 davem Exp $ |
2 | * linux/arch/sparc/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be) | |
6 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
7 | * Copyright (C) 2000 Anton Blanchard (anton@samba.org) | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/module.h> |
11 | #include <linux/signal.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/ptrace.h> | |
18 | #include <linux/mman.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/swap.h> | |
21 | #include <linux/initrd.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/bootmem.h> | |
25 | ||
26 | #include <asm/system.h> | |
1da177e4 LT |
27 | #include <asm/vac-ops.h> |
28 | #include <asm/page.h> | |
29 | #include <asm/pgtable.h> | |
30 | #include <asm/vaddrs.h> | |
31 | #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ | |
32 | #include <asm/tlb.h> | |
942a6bdd | 33 | #include <asm/prom.h> |
1da177e4 LT |
34 | |
35 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
36 | ||
37 | unsigned long *sparc_valid_addr_bitmap; | |
38 | ||
39 | unsigned long phys_base; | |
40 | unsigned long pfn_base; | |
41 | ||
42 | unsigned long page_kernel; | |
43 | ||
44 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; | |
45 | unsigned long sparc_unmapped_base; | |
46 | ||
47 | struct pgtable_cache_struct pgt_quicklists; | |
48 | ||
49 | /* References to section boundaries */ | |
50 | extern char __init_begin, __init_end, _start, _end, etext , edata; | |
51 | ||
52 | /* Initial ramdisk setup */ | |
53 | extern unsigned int sparc_ramdisk_image; | |
54 | extern unsigned int sparc_ramdisk_size; | |
55 | ||
56 | unsigned long highstart_pfn, highend_pfn; | |
57 | ||
58 | pte_t *kmap_pte; | |
59 | pgprot_t kmap_prot; | |
60 | ||
61 | #define kmap_get_fixmap_pte(vaddr) \ | |
62 | pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) | |
63 | ||
64 | void __init kmap_init(void) | |
65 | { | |
66 | /* cache the first kmap pte */ | |
67 | kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); | |
68 | kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); | |
69 | } | |
70 | ||
71 | void show_mem(void) | |
72 | { | |
73 | printk("Mem-info:\n"); | |
74 | show_free_areas(); | |
75 | printk("Free swap: %6ldkB\n", | |
76 | nr_swap_pages << (PAGE_SHIFT-10)); | |
77 | printk("%ld pages of RAM\n", totalram_pages); | |
f0f0d0c6 | 78 | printk("%ld free pages\n", nr_free_pages()); |
1da177e4 LT |
79 | #if 0 /* undefined pgtable_cache_size, pgd_cache_size */ |
80 | printk("%ld pages in page table cache\n",pgtable_cache_size); | |
81 | #ifndef CONFIG_SMP | |
82 | if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) | |
83 | printk("%ld entries in page dir cache\n",pgd_cache_size); | |
84 | #endif | |
85 | #endif | |
86 | } | |
87 | ||
88 | void __init sparc_context_init(int numctx) | |
89 | { | |
90 | int ctx; | |
91 | ||
92 | ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); | |
93 | ||
94 | for(ctx = 0; ctx < numctx; ctx++) { | |
95 | struct ctx_list *clist; | |
96 | ||
97 | clist = (ctx_list_pool + ctx); | |
98 | clist->ctx_number = ctx; | |
99 | clist->ctx_mm = NULL; | |
100 | } | |
101 | ctx_free.next = ctx_free.prev = &ctx_free; | |
102 | ctx_used.next = ctx_used.prev = &ctx_used; | |
103 | for(ctx = 0; ctx < numctx; ctx++) | |
104 | add_to_free_ctxlist(ctx_list_pool + ctx); | |
105 | } | |
106 | ||
107 | extern unsigned long cmdline_memory_size; | |
108 | unsigned long last_valid_pfn; | |
109 | ||
110 | unsigned long calc_highpages(void) | |
111 | { | |
112 | int i; | |
113 | int nr = 0; | |
114 | ||
115 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | |
116 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | |
117 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | |
118 | ||
119 | if (end_pfn <= max_low_pfn) | |
120 | continue; | |
121 | ||
122 | if (start_pfn < max_low_pfn) | |
123 | start_pfn = max_low_pfn; | |
124 | ||
125 | nr += end_pfn - start_pfn; | |
126 | } | |
127 | ||
128 | return nr; | |
129 | } | |
130 | ||
131 | unsigned long calc_max_low_pfn(void) | |
132 | { | |
133 | int i; | |
134 | unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); | |
135 | unsigned long curr_pfn, last_pfn; | |
136 | ||
137 | last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; | |
138 | for (i = 1; sp_banks[i].num_bytes != 0; i++) { | |
139 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | |
140 | ||
141 | if (curr_pfn >= tmp) { | |
142 | if (last_pfn < tmp) | |
143 | tmp = last_pfn; | |
144 | break; | |
145 | } | |
146 | ||
147 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | |
148 | } | |
149 | ||
150 | return tmp; | |
151 | } | |
152 | ||
153 | unsigned long __init bootmem_init(unsigned long *pages_avail) | |
154 | { | |
155 | unsigned long bootmap_size, start_pfn; | |
156 | unsigned long end_of_phys_memory = 0UL; | |
157 | unsigned long bootmap_pfn, bytes_avail, size; | |
158 | int i; | |
159 | ||
160 | bytes_avail = 0UL; | |
161 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | |
162 | end_of_phys_memory = sp_banks[i].base_addr + | |
163 | sp_banks[i].num_bytes; | |
164 | bytes_avail += sp_banks[i].num_bytes; | |
165 | if (cmdline_memory_size) { | |
166 | if (bytes_avail > cmdline_memory_size) { | |
167 | unsigned long slack = bytes_avail - cmdline_memory_size; | |
168 | ||
169 | bytes_avail -= slack; | |
170 | end_of_phys_memory -= slack; | |
171 | ||
172 | sp_banks[i].num_bytes -= slack; | |
173 | if (sp_banks[i].num_bytes == 0) { | |
174 | sp_banks[i].base_addr = 0xdeadbeef; | |
175 | } else { | |
176 | sp_banks[i+1].num_bytes = 0; | |
177 | sp_banks[i+1].base_addr = 0xdeadbeef; | |
178 | } | |
179 | break; | |
180 | } | |
181 | } | |
182 | } | |
183 | ||
184 | /* Start with page aligned address of last symbol in kernel | |
185 | * image. | |
186 | */ | |
187 | start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); | |
188 | ||
189 | /* Now shift down to get the real physical page frame number. */ | |
190 | start_pfn >>= PAGE_SHIFT; | |
191 | ||
192 | bootmap_pfn = start_pfn; | |
193 | ||
194 | max_pfn = end_of_phys_memory >> PAGE_SHIFT; | |
195 | ||
196 | max_low_pfn = max_pfn; | |
197 | highstart_pfn = highend_pfn = max_pfn; | |
198 | ||
199 | if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { | |
200 | highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); | |
201 | max_low_pfn = calc_max_low_pfn(); | |
202 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | |
203 | calc_highpages() >> (20 - PAGE_SHIFT)); | |
204 | } | |
205 | ||
206 | #ifdef CONFIG_BLK_DEV_INITRD | |
207 | /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ | |
208 | if (sparc_ramdisk_image) { | |
f642b263 | 209 | sparc_ramdisk_image -= KERNBASE; |
1da177e4 LT |
210 | initrd_start = sparc_ramdisk_image + phys_base; |
211 | initrd_end = initrd_start + sparc_ramdisk_size; | |
212 | if (initrd_end > end_of_phys_memory) { | |
213 | printk(KERN_CRIT "initrd extends beyond end of memory " | |
214 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", | |
215 | initrd_end, end_of_phys_memory); | |
216 | initrd_start = 0; | |
217 | } | |
218 | if (initrd_start) { | |
219 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && | |
220 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) | |
221 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; | |
222 | } | |
223 | } | |
224 | #endif | |
225 | /* Initialize the boot-time allocator. */ | |
226 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, | |
227 | max_low_pfn); | |
228 | ||
229 | /* Now register the available physical memory with the | |
230 | * allocator. | |
231 | */ | |
232 | *pages_avail = 0; | |
233 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | |
234 | unsigned long curr_pfn, last_pfn; | |
235 | ||
236 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | |
237 | if (curr_pfn >= max_low_pfn) | |
238 | break; | |
239 | ||
240 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | |
241 | if (last_pfn > max_low_pfn) | |
242 | last_pfn = max_low_pfn; | |
243 | ||
244 | /* | |
245 | * .. finally, did all the rounding and playing | |
246 | * around just make the area go away? | |
247 | */ | |
248 | if (last_pfn <= curr_pfn) | |
249 | continue; | |
250 | ||
251 | size = (last_pfn - curr_pfn) << PAGE_SHIFT; | |
252 | *pages_avail += last_pfn - curr_pfn; | |
253 | ||
254 | free_bootmem(sp_banks[i].base_addr, size); | |
255 | } | |
256 | ||
257 | #ifdef CONFIG_BLK_DEV_INITRD | |
258 | if (initrd_start) { | |
259 | /* Reserve the initrd image area. */ | |
260 | size = initrd_end - initrd_start; | |
261 | reserve_bootmem(initrd_start, size); | |
262 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | |
263 | ||
264 | initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; | |
265 | initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; | |
266 | } | |
267 | #endif | |
268 | /* Reserve the kernel text/data/bss. */ | |
269 | size = (start_pfn << PAGE_SHIFT) - phys_base; | |
270 | reserve_bootmem(phys_base, size); | |
271 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | |
272 | ||
273 | /* Reserve the bootmem map. We do not account for it | |
274 | * in pages_avail because we will release that memory | |
275 | * in free_all_bootmem. | |
276 | */ | |
277 | size = bootmap_size; | |
278 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); | |
279 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | |
280 | ||
281 | return max_pfn; | |
282 | } | |
283 | ||
284 | /* | |
285 | * check_pgt_cache | |
286 | * | |
287 | * This is called at the end of unmapping of VMA (zap_page_range), | |
288 | * to rescan the page cache for architecture specific things, | |
289 | * presumably something like sun4/sun4c PMEGs. Most architectures | |
290 | * define check_pgt_cache empty. | |
291 | * | |
292 | * We simply copy the 2.4 implementation for now. | |
293 | */ | |
294 | int pgt_cache_water[2] = { 25, 50 }; | |
295 | ||
296 | void check_pgt_cache(void) | |
297 | { | |
298 | do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]); | |
299 | } | |
300 | ||
301 | /* | |
302 | * paging_init() sets up the page tables: We call the MMU specific | |
303 | * init routine based upon the Sun model type on the Sparc. | |
304 | * | |
305 | */ | |
306 | extern void sun4c_paging_init(void); | |
307 | extern void srmmu_paging_init(void); | |
308 | extern void device_scan(void); | |
309 | ||
378e515c AV |
310 | pgprot_t PAGE_SHARED __read_mostly; |
311 | EXPORT_SYMBOL(PAGE_SHARED); | |
312 | ||
1da177e4 LT |
313 | void __init paging_init(void) |
314 | { | |
315 | switch(sparc_cpu_model) { | |
316 | case sun4c: | |
317 | case sun4e: | |
318 | case sun4: | |
319 | sun4c_paging_init(); | |
320 | sparc_unmapped_base = 0xe0000000; | |
321 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); | |
322 | break; | |
323 | case sun4m: | |
324 | case sun4d: | |
325 | srmmu_paging_init(); | |
326 | sparc_unmapped_base = 0x50000000; | |
327 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); | |
328 | break; | |
329 | default: | |
330 | prom_printf("paging_init: Cannot init paging on this Sparc\n"); | |
331 | prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); | |
332 | prom_printf("paging_init: Halting...\n"); | |
333 | prom_halt(); | |
334 | }; | |
335 | ||
336 | /* Initialize the protection map with non-constant, MMU dependent values. */ | |
337 | protection_map[0] = PAGE_NONE; | |
338 | protection_map[1] = PAGE_READONLY; | |
339 | protection_map[2] = PAGE_COPY; | |
340 | protection_map[3] = PAGE_COPY; | |
341 | protection_map[4] = PAGE_READONLY; | |
342 | protection_map[5] = PAGE_READONLY; | |
343 | protection_map[6] = PAGE_COPY; | |
344 | protection_map[7] = PAGE_COPY; | |
345 | protection_map[8] = PAGE_NONE; | |
346 | protection_map[9] = PAGE_READONLY; | |
347 | protection_map[10] = PAGE_SHARED; | |
348 | protection_map[11] = PAGE_SHARED; | |
349 | protection_map[12] = PAGE_READONLY; | |
350 | protection_map[13] = PAGE_READONLY; | |
351 | protection_map[14] = PAGE_SHARED; | |
352 | protection_map[15] = PAGE_SHARED; | |
353 | btfixup(); | |
942a6bdd | 354 | prom_build_devicetree(); |
1da177e4 LT |
355 | device_scan(); |
356 | } | |
357 | ||
358 | struct cache_palias *sparc_aliases; | |
359 | ||
360 | static void __init taint_real_pages(void) | |
361 | { | |
362 | int i; | |
363 | ||
364 | for (i = 0; sp_banks[i].num_bytes; i++) { | |
365 | unsigned long start, end; | |
366 | ||
367 | start = sp_banks[i].base_addr; | |
368 | end = start + sp_banks[i].num_bytes; | |
369 | ||
370 | while (start < end) { | |
371 | set_bit(start >> 20, sparc_valid_addr_bitmap); | |
372 | start += PAGE_SIZE; | |
373 | } | |
374 | } | |
375 | } | |
376 | ||
377 | void map_high_region(unsigned long start_pfn, unsigned long end_pfn) | |
378 | { | |
379 | unsigned long tmp; | |
380 | ||
381 | #ifdef CONFIG_DEBUG_HIGHMEM | |
382 | printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); | |
383 | #endif | |
384 | ||
385 | for (tmp = start_pfn; tmp < end_pfn; tmp++) { | |
386 | struct page *page = pfn_to_page(tmp); | |
387 | ||
388 | ClearPageReserved(page); | |
7835e98b | 389 | init_page_count(page); |
1da177e4 LT |
390 | __free_page(page); |
391 | totalhigh_pages++; | |
392 | } | |
393 | } | |
394 | ||
395 | void __init mem_init(void) | |
396 | { | |
397 | int codepages = 0; | |
398 | int datapages = 0; | |
399 | int initpages = 0; | |
400 | int reservedpages = 0; | |
401 | int i; | |
402 | ||
403 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | |
404 | prom_printf("BUG: fixmap and pkmap areas overlap\n"); | |
405 | prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n", | |
406 | PKMAP_BASE, | |
407 | (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | |
408 | FIXADDR_START); | |
409 | prom_printf("Please mail sparclinux@vger.kernel.org.\n"); | |
410 | prom_halt(); | |
411 | } | |
412 | ||
413 | ||
414 | /* Saves us work later. */ | |
415 | memset((void *)&empty_zero_page, 0, PAGE_SIZE); | |
416 | ||
417 | i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); | |
418 | i += 1; | |
419 | sparc_valid_addr_bitmap = (unsigned long *) | |
420 | __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL); | |
421 | ||
422 | if (sparc_valid_addr_bitmap == NULL) { | |
423 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | |
424 | prom_halt(); | |
425 | } | |
426 | memset(sparc_valid_addr_bitmap, 0, i << 2); | |
427 | ||
428 | taint_real_pages(); | |
429 | ||
430 | max_mapnr = last_valid_pfn - pfn_base; | |
431 | high_memory = __va(max_low_pfn << PAGE_SHIFT); | |
432 | ||
433 | totalram_pages = free_all_bootmem(); | |
434 | ||
435 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | |
436 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | |
437 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | |
438 | ||
439 | num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT; | |
440 | ||
441 | if (end_pfn <= highstart_pfn) | |
442 | continue; | |
443 | ||
444 | if (start_pfn < highstart_pfn) | |
445 | start_pfn = highstart_pfn; | |
446 | ||
447 | map_high_region(start_pfn, end_pfn); | |
448 | } | |
449 | ||
450 | totalram_pages += totalhigh_pages; | |
451 | ||
452 | codepages = (((unsigned long) &etext) - ((unsigned long)&_start)); | |
453 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | |
454 | datapages = (((unsigned long) &edata) - ((unsigned long)&etext)); | |
455 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | |
456 | initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); | |
457 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | |
458 | ||
459 | /* Ignore memory holes for the purpose of counting reserved pages */ | |
460 | for (i=0; i < max_low_pfn; i++) | |
461 | if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap) | |
462 | && PageReserved(pfn_to_page(i))) | |
463 | reservedpages++; | |
464 | ||
465 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | |
466 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | |
467 | num_physpages << (PAGE_SHIFT - 10), | |
468 | codepages << (PAGE_SHIFT-10), | |
469 | reservedpages << (PAGE_SHIFT - 10), | |
470 | datapages << (PAGE_SHIFT-10), | |
471 | initpages << (PAGE_SHIFT-10), | |
472 | totalhigh_pages << (PAGE_SHIFT-10)); | |
473 | } | |
474 | ||
475 | void free_initmem (void) | |
476 | { | |
477 | unsigned long addr; | |
478 | ||
479 | addr = (unsigned long)(&__init_begin); | |
480 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | |
481 | struct page *p; | |
482 | ||
483 | p = virt_to_page(addr); | |
484 | ||
485 | ClearPageReserved(p); | |
7835e98b | 486 | init_page_count(p); |
1da177e4 LT |
487 | __free_page(p); |
488 | totalram_pages++; | |
489 | num_physpages++; | |
490 | } | |
491 | printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); | |
492 | } | |
493 | ||
494 | #ifdef CONFIG_BLK_DEV_INITRD | |
495 | void free_initrd_mem(unsigned long start, unsigned long end) | |
496 | { | |
497 | if (start < end) | |
498 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
499 | for (; start < end; start += PAGE_SIZE) { | |
500 | struct page *p = virt_to_page(start); | |
501 | ||
502 | ClearPageReserved(p); | |
7835e98b | 503 | init_page_count(p); |
1da177e4 LT |
504 | __free_page(p); |
505 | num_physpages++; | |
506 | } | |
507 | } | |
508 | #endif | |
509 | ||
510 | void sparc_flush_page_to_ram(struct page *page) | |
511 | { | |
512 | unsigned long vaddr = (unsigned long)page_address(page); | |
513 | ||
514 | if (vaddr) | |
515 | __flush_page_to_ram(vaddr); | |
516 | } |