Merge branch 'for-4.7-dw' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata
[deliverable/linux.git] / arch / m68k / mm / sun3mmu.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/m68k/mm/sun3mmu.c
3 *
4 * Implementations of mm routines specific to the sun3 MMU.
5 *
6 * Moved here 8/20/1999 Sam Creasey
7 *
8 */
9
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/swap.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/bootmem.h>
19
20#include <asm/setup.h>
21#include <asm/uaccess.h>
22#include <asm/page.h>
23#include <asm/pgtable.h>
1da177e4
LT
24#include <asm/machdep.h>
25#include <asm/io.h>
26
27extern void mmu_emu_init (unsigned long bootmem_end);
28
29const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
30
31extern unsigned long num_pages;
32
1da177e4
LT
33/* For the sun3 we try to follow the i386 paging_init() more closely */
34/* start_mem and end_mem have PAGE_OFFSET added already */
35/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
36void __init paging_init(void)
37{
38 pgd_t * pg_dir;
39 pte_t * pg_table;
40 int i;
41 unsigned long address;
42 unsigned long next_pgtable;
43 unsigned long bootmem_end;
2dcf15b7 44 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
1da177e4
LT
45 unsigned long size;
46
1da177e4
LT
47#ifdef TEST_VERIFY_AREA
48 wp_works_ok = 0;
49#endif
50 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
1da177e4
LT
51
52 address = PAGE_OFFSET;
53 pg_dir = swapper_pg_dir;
54 memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
55 memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir));
56
57 size = num_pages * sizeof(pte_t);
58 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
59
60 next_pgtable = (unsigned long)alloc_bootmem_pages(size);
61 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
62
63 /* Map whole memory from PAGE_OFFSET (0x0E000000) */
64 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
65
66 while (address < (unsigned long)high_memory) {
67 pg_table = (pte_t *) __pa (next_pgtable);
68 next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
69 pgd_val(*pg_dir) = (unsigned long) pg_table;
70 pg_dir++;
71
72 /* now change pg_table to kernel virtual addresses */
73 pg_table = (pte_t *) __va ((unsigned long) pg_table);
74 for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
75 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
76 if (address >= (unsigned long)high_memory)
77 pte_val (pte) = 0;
78 set_pte (pg_table, pte);
79 address += PAGE_SIZE;
80 }
81 }
82
83 mmu_emu_init(bootmem_end);
84
85 current->mm = NULL;
86
87 /* memory sizing is a hack stolen from motorola.c.. hope it works for us */
2dcf15b7 88 zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
1da177e4 89
a3a79bd7
SC
90 /* I really wish I knew why the following change made things better... -- Sam */
91/* free_area_init(zones_size); */
9109fb7b 92 free_area_init_node(0, zones_size,
a3a79bd7
SC
93 (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL);
94
1da177e4
LT
95
96}
97
98
This page took 0.773753 seconds and 5 git commands to generate.