Commit | Line | Data |
---|---|---|
066bf87b GU |
1 | /* |
2 | * Based upon linux/arch/m68k/mm/sun3mmu.c | |
3 | * Based upon linux/arch/ppc/mm/mmu_context.c | |
4 | * | |
5 | * Implementations of mm routines specific to the Coldfire MMU. | |
6 | * | |
7 | * Copyright (c) 2008 Freescale Semiconductor, Inc. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/bootmem.h> | |
16 | ||
17 | #include <asm/setup.h> | |
18 | #include <asm/page.h> | |
19 | #include <asm/pgtable.h> | |
20 | #include <asm/mmu_context.h> | |
21 | #include <asm/mcf_pgalloc.h> | |
22 | #include <asm/tlbflush.h> | |
23 | ||
24 | #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) | |
25 | ||
26 | mm_context_t next_mmu_context; | |
27 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | |
28 | atomic_t nr_free_contexts; | |
29 | struct mm_struct *context_mm[LAST_CONTEXT+1]; | |
a77e489f | 30 | unsigned long num_pages; |
066bf87b | 31 | |
066bf87b GU |
32 | /* |
33 | * ColdFire paging_init derived from sun3. | |
34 | */ | |
35 | void __init paging_init(void) | |
36 | { | |
37 | pgd_t *pg_dir; | |
38 | pte_t *pg_table; | |
39 | unsigned long address, size; | |
40 | unsigned long next_pgtable, bootmem_end; | |
41 | unsigned long zones_size[MAX_NR_ZONES]; | |
42 | enum zone_type zone; | |
43 | int i; | |
44 | ||
45 | empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE); | |
46 | memset((void *) empty_zero_page, 0, PAGE_SIZE); | |
47 | ||
48 | pg_dir = swapper_pg_dir; | |
49 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | |
50 | ||
51 | size = num_pages * sizeof(pte_t); | |
52 | size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); | |
53 | next_pgtable = (unsigned long) alloc_bootmem_pages(size); | |
54 | ||
55 | bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; | |
56 | pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; | |
57 | ||
58 | address = PAGE_OFFSET; | |
59 | while (address < (unsigned long)high_memory) { | |
60 | pg_table = (pte_t *) next_pgtable; | |
61 | next_pgtable += PTRS_PER_PTE * sizeof(pte_t); | |
62 | pgd_val(*pg_dir) = (unsigned long) pg_table; | |
63 | pg_dir++; | |
64 | ||
65 | /* now change pg_table to kernel virtual addresses */ | |
66 | for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { | |
67 | pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); | |
68 | if (address >= (unsigned long) high_memory) | |
69 | pte_val(pte) = 0; | |
70 | ||
71 | set_pte(pg_table, pte); | |
72 | address += PAGE_SIZE; | |
73 | } | |
74 | } | |
75 | ||
76 | current->mm = NULL; | |
77 | ||
78 | for (zone = 0; zone < MAX_NR_ZONES; zone++) | |
79 | zones_size[zone] = 0x0; | |
80 | zones_size[ZONE_DMA] = num_pages; | |
81 | free_area_init(zones_size); | |
82 | } | |
83 | ||
84 | int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) | |
85 | { | |
3372f5a7 | 86 | unsigned long flags, mmuar, mmutr; |
066bf87b GU |
87 | struct mm_struct *mm; |
88 | pgd_t *pgd; | |
89 | pmd_t *pmd; | |
90 | pte_t *pte; | |
91 | int asid; | |
92 | ||
93 | local_irq_save(flags); | |
94 | ||
95 | mmuar = (dtlb) ? mmu_read(MMUAR) : | |
96 | regs->pc + (extension_word * sizeof(long)); | |
97 | ||
98 | mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; | |
99 | if (!mm) { | |
100 | local_irq_restore(flags); | |
101 | return -1; | |
102 | } | |
103 | ||
104 | pgd = pgd_offset(mm, mmuar); | |
105 | if (pgd_none(*pgd)) { | |
106 | local_irq_restore(flags); | |
107 | return -1; | |
108 | } | |
109 | ||
110 | pmd = pmd_offset(pgd, mmuar); | |
111 | if (pmd_none(*pmd)) { | |
112 | local_irq_restore(flags); | |
113 | return -1; | |
114 | } | |
115 | ||
116 | pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) | |
117 | : pte_offset_map(pmd, mmuar); | |
118 | if (pte_none(*pte) || !pte_present(*pte)) { | |
119 | local_irq_restore(flags); | |
120 | return -1; | |
121 | } | |
122 | ||
123 | if (write) { | |
124 | if (!pte_write(*pte)) { | |
125 | local_irq_restore(flags); | |
126 | return -1; | |
127 | } | |
128 | set_pte(pte, pte_mkdirty(*pte)); | |
129 | } | |
130 | ||
131 | set_pte(pte, pte_mkyoung(*pte)); | |
132 | asid = mm->context & 0xff; | |
133 | if (!pte_dirty(*pte) && !KMAPAREA(mmuar)) | |
134 | set_pte(pte, pte_wrprotect(*pte)); | |
135 | ||
3372f5a7 AS |
136 | mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V; |
137 | if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE)) | |
138 | mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT; | |
139 | mmu_write(MMUTR, mmutr); | |
066bf87b GU |
140 | |
141 | mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | | |
142 | ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); | |
143 | ||
144 | if (dtlb) | |
145 | mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); | |
146 | else | |
147 | mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); | |
148 | ||
149 | local_irq_restore(flags); | |
150 | return 0; | |
151 | } | |
152 | ||
a77e489f GU |
153 | void __init cf_bootmem_alloc(void) |
154 | { | |
155 | unsigned long start_pfn; | |
156 | unsigned long memstart; | |
157 | ||
158 | /* _rambase and _ramend will be naturally page aligned */ | |
159 | m68k_memory[0].addr = _rambase; | |
160 | m68k_memory[0].size = _ramend - _rambase; | |
161 | ||
162 | /* compute total pages in system */ | |
163 | num_pages = PFN_DOWN(_ramend - _rambase); | |
164 | ||
165 | /* page numbers */ | |
166 | memstart = PAGE_ALIGN(_ramstart); | |
167 | min_low_pfn = PFN_DOWN(_rambase); | |
168 | start_pfn = PFN_DOWN(memstart); | |
169 | max_pfn = max_low_pfn = PFN_DOWN(_ramend); | |
170 | high_memory = (void *)_ramend; | |
171 | ||
172 | m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; | |
173 | module_fixup(NULL, __start_fixup, __stop_fixup); | |
174 | ||
175 | /* setup bootmem data */ | |
176 | m68k_setup_node(0); | |
177 | memstart += init_bootmem_node(NODE_DATA(0), start_pfn, | |
178 | min_low_pfn, max_low_pfn); | |
179 | free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart); | |
180 | } | |
181 | ||
066bf87b GU |
182 | /* |
183 | * Initialize the context management stuff. | |
184 | * The following was taken from arch/ppc/mmu_context.c | |
185 | */ | |
186 | void __init mmu_context_init(void) | |
187 | { | |
188 | /* | |
189 | * Some processors have too few contexts to reserve one for | |
190 | * init_mm, and require using context 0 for a normal task. | |
191 | * Other processors reserve the use of context zero for the kernel. | |
192 | * This code assumes FIRST_CONTEXT < 32. | |
193 | */ | |
194 | context_map[0] = (1 << FIRST_CONTEXT) - 1; | |
195 | next_mmu_context = FIRST_CONTEXT; | |
196 | atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); | |
197 | } | |
198 | ||
199 | /* | |
200 | * Steal a context from a task that has one at the moment. | |
201 | * This is only used on 8xx and 4xx and we presently assume that | |
202 | * they don't do SMP. If they do then thicfpgalloc.hs will have to check | |
203 | * whether the MM we steal is in use. | |
204 | * We also assume that this is only used on systems that don't | |
205 | * use an MMU hash table - this is true for 8xx and 4xx. | |
206 | * This isn't an LRU system, it just frees up each context in | |
207 | * turn (sort-of pseudo-random replacement :). This would be the | |
208 | * place to implement an LRU scheme if anyone was motivated to do it. | |
209 | * -- paulus | |
210 | */ | |
211 | void steal_context(void) | |
212 | { | |
213 | struct mm_struct *mm; | |
214 | /* | |
215 | * free up context `next_mmu_context' | |
216 | * if we shouldn't free context 0, don't... | |
217 | */ | |
218 | if (next_mmu_context < FIRST_CONTEXT) | |
219 | next_mmu_context = FIRST_CONTEXT; | |
220 | mm = context_mm[next_mmu_context]; | |
221 | flush_tlb_mm(mm); | |
222 | destroy_context(mm); | |
223 | } | |
224 |