Commit | Line | Data |
---|---|---|
26ff6c11 PM |
1 | /* |
2 | * Page fault handler for SH with an MMU. | |
1da177e4 | 3 | * |
1da177e4 LT |
4 | * Copyright (C) 1999 Niibe Yutaka |
5 | * Copyright (C) 2003 Paul Mundt | |
6 | * | |
7 | * Based on linux/arch/i386/mm/fault.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
26ff6c11 PM |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
1da177e4 | 13 | */ |
1da177e4 | 14 | #include <linux/kernel.h> |
1da177e4 | 15 | #include <linux/mm.h> |
1da177e4 | 16 | #include <asm/system.h> |
1da177e4 | 17 | #include <asm/mmu_context.h> |
1da177e4 LT |
18 | #include <asm/kgdb.h> |
19 | ||
20 | extern void die(const char *,struct pt_regs *,long); | |
21 | ||
22 | /* | |
23 | * This routine handles page faults. It determines the address, | |
24 | * and the problem, and then passes it off to one of the appropriate | |
25 | * routines. | |
26 | */ | |
27 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |
28 | unsigned long address) | |
29 | { | |
30 | struct task_struct *tsk; | |
31 | struct mm_struct *mm; | |
32 | struct vm_area_struct * vma; | |
33 | unsigned long page; | |
34 | ||
35 | #ifdef CONFIG_SH_KGDB | |
36 | if (kgdb_nofault && kgdb_bus_err_hook) | |
37 | kgdb_bus_err_hook(); | |
38 | #endif | |
39 | ||
40 | tsk = current; | |
41 | mm = tsk->mm; | |
42 | ||
43 | /* | |
44 | * If we're in an interrupt or have no user | |
45 | * context, we must not take the fault.. | |
46 | */ | |
47 | if (in_atomic() || !mm) | |
48 | goto no_context; | |
49 | ||
50 | down_read(&mm->mmap_sem); | |
51 | ||
52 | vma = find_vma(mm, address); | |
53 | if (!vma) | |
54 | goto bad_area; | |
55 | if (vma->vm_start <= address) | |
56 | goto good_area; | |
57 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
58 | goto bad_area; | |
59 | if (expand_stack(vma, address)) | |
60 | goto bad_area; | |
61 | /* | |
62 | * Ok, we have a good vm_area for this memory access, so | |
63 | * we can handle it.. | |
64 | */ | |
65 | good_area: | |
66 | if (writeaccess) { | |
67 | if (!(vma->vm_flags & VM_WRITE)) | |
68 | goto bad_area; | |
69 | } else { | |
70 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | |
71 | goto bad_area; | |
72 | } | |
73 | ||
74 | /* | |
75 | * If for any reason at all we couldn't handle the fault, | |
76 | * make sure we exit gracefully rather than endlessly redo | |
77 | * the fault. | |
78 | */ | |
79 | survive: | |
80 | switch (handle_mm_fault(mm, vma, address, writeaccess)) { | |
81 | case VM_FAULT_MINOR: | |
82 | tsk->min_flt++; | |
83 | break; | |
84 | case VM_FAULT_MAJOR: | |
85 | tsk->maj_flt++; | |
86 | break; | |
87 | case VM_FAULT_SIGBUS: | |
88 | goto do_sigbus; | |
89 | case VM_FAULT_OOM: | |
90 | goto out_of_memory; | |
91 | default: | |
92 | BUG(); | |
93 | } | |
94 | ||
95 | up_read(&mm->mmap_sem); | |
96 | return; | |
97 | ||
98 | /* | |
99 | * Something tried to access memory that isn't in our memory map.. | |
100 | * Fix it, but check if it's kernel or user first.. | |
101 | */ | |
102 | bad_area: | |
103 | up_read(&mm->mmap_sem); | |
104 | ||
105 | if (user_mode(regs)) { | |
106 | tsk->thread.address = address; | |
107 | tsk->thread.error_code = writeaccess; | |
108 | force_sig(SIGSEGV, tsk); | |
109 | return; | |
110 | } | |
111 | ||
112 | no_context: | |
113 | /* Are we prepared to handle this kernel fault? */ | |
114 | if (fixup_exception(regs)) | |
115 | return; | |
116 | ||
117 | /* | |
118 | * Oops. The kernel tried to access some bad page. We'll have to | |
119 | * terminate things with extreme prejudice. | |
120 | * | |
121 | */ | |
122 | if (address < PAGE_SIZE) | |
123 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | |
124 | else | |
125 | printk(KERN_ALERT "Unable to handle kernel paging request"); | |
126 | printk(" at virtual address %08lx\n", address); | |
127 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | |
128 | asm volatile("mov.l %1, %0" | |
129 | : "=r" (page) | |
130 | : "m" (__m(MMU_TTB))); | |
131 | if (page) { | |
132 | page = ((unsigned long *) page)[address >> 22]; | |
133 | printk(KERN_ALERT "*pde = %08lx\n", page); | |
134 | if (page & _PAGE_PRESENT) { | |
135 | page &= PAGE_MASK; | |
136 | address &= 0x003ff000; | |
137 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; | |
138 | printk(KERN_ALERT "*pte = %08lx\n", page); | |
139 | } | |
140 | } | |
141 | die("Oops", regs, writeaccess); | |
142 | do_exit(SIGKILL); | |
143 | ||
144 | /* | |
145 | * We ran out of memory, or some other thing happened to us that made | |
146 | * us unable to handle the page fault gracefully. | |
147 | */ | |
148 | out_of_memory: | |
149 | up_read(&mm->mmap_sem); | |
150 | if (current->pid == 1) { | |
151 | yield(); | |
152 | down_read(&mm->mmap_sem); | |
153 | goto survive; | |
154 | } | |
155 | printk("VM: killing process %s\n", tsk->comm); | |
156 | if (user_mode(regs)) | |
157 | do_exit(SIGKILL); | |
158 | goto no_context; | |
159 | ||
160 | do_sigbus: | |
161 | up_read(&mm->mmap_sem); | |
162 | ||
163 | /* | |
164 | * Send a sigbus, regardless of whether we were in kernel | |
165 | * or user mode. | |
166 | */ | |
167 | tsk->thread.address = address; | |
168 | tsk->thread.error_code = writeaccess; | |
169 | tsk->thread.trap_no = 14; | |
170 | force_sig(SIGBUS, tsk); | |
171 | ||
172 | /* Kernel mode? Handle exceptions or die */ | |
173 | if (!user_mode(regs)) | |
174 | goto no_context; | |
175 | } | |
176 | ||
26ff6c11 | 177 | #ifdef CONFIG_SH_STORE_QUEUES |
1da177e4 | 178 | /* |
26ff6c11 PM |
179 | * This is a special case for the SH-4 store queues, as pages for this |
180 | * space still need to be faulted in before it's possible to flush the | |
181 | * store queue cache for writeout to the remapped region. | |
182 | */ | |
183 | #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) | |
184 | #else | |
185 | #define P3_ADDR_MAX P4SEG | |
186 | #endif | |
187 | ||
188 | /* | |
189 | * Called with interrupts disabled. | |
1da177e4 LT |
190 | */ |
191 | asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |
192 | unsigned long address) | |
193 | { | |
60ec5585 | 194 | pgd_t *pgd; |
26ff6c11 | 195 | pud_t *pud; |
1da177e4 LT |
196 | pmd_t *pmd; |
197 | pte_t *pte; | |
198 | pte_t entry; | |
60ec5585 HD |
199 | struct mm_struct *mm; |
200 | spinlock_t *ptl; | |
201 | int ret = 1; | |
1da177e4 LT |
202 | |
203 | #ifdef CONFIG_SH_KGDB | |
204 | if (kgdb_nofault && kgdb_bus_err_hook) | |
205 | kgdb_bus_err_hook(); | |
206 | #endif | |
207 | ||
26ff6c11 PM |
208 | /* |
209 | * We don't take page faults for P1, P2, and parts of P4, these | |
210 | * are always mapped, whether it be due to legacy behaviour in | |
211 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | |
212 | */ | |
f647d33f | 213 | if (address >= P3SEG && address < P3_ADDR_MAX) { |
60ec5585 | 214 | pgd = pgd_offset_k(address); |
f647d33f PM |
215 | mm = NULL; |
216 | } else { | |
217 | if (unlikely(address >= TASK_SIZE || !(mm = current->mm))) | |
26ff6c11 PM |
218 | return 1; |
219 | ||
220 | pgd = pgd_offset(current->mm, address); | |
221 | } | |
1da177e4 | 222 | |
26ff6c11 PM |
223 | pud = pud_offset(pgd, address); |
224 | if (pud_none_or_clear_bad(pud)) | |
225 | return 1; | |
226 | pmd = pmd_offset(pud, address); | |
60ec5585 | 227 | if (pmd_none_or_clear_bad(pmd)) |
1da177e4 | 228 | return 1; |
26ff6c11 | 229 | |
60ec5585 HD |
230 | if (mm) |
231 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | |
232 | else | |
233 | pte = pte_offset_kernel(pmd, address); | |
234 | ||
1da177e4 | 235 | entry = *pte; |
26ff6c11 PM |
236 | if (unlikely(pte_none(entry) || pte_not_present(entry))) |
237 | goto unlock; | |
238 | if (unlikely(writeaccess && !pte_write(entry))) | |
60ec5585 | 239 | goto unlock; |
1da177e4 LT |
240 | |
241 | if (writeaccess) | |
242 | entry = pte_mkdirty(entry); | |
243 | entry = pte_mkyoung(entry); | |
244 | ||
245 | #ifdef CONFIG_CPU_SH4 | |
246 | /* | |
247 | * ITLB is not affected by "ldtlb" instruction. | |
248 | * So, we need to flush the entry by ourselves. | |
249 | */ | |
26ff6c11 | 250 | __flush_tlb_page(get_asid(), address & PAGE_MASK); |
1da177e4 LT |
251 | #endif |
252 | ||
253 | set_pte(pte, entry); | |
254 | update_mmu_cache(NULL, address, entry); | |
60ec5585 HD |
255 | ret = 0; |
256 | unlock: | |
257 | if (mm) | |
258 | pte_unmap_unlock(pte, ptl); | |
259 | return ret; | |
1da177e4 | 260 | } |