Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/fault-armv.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Modifications for ARM processor (c) 1995-2002 Russell King | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
1da177e4 LT |
11 | #include <linux/sched.h> |
12 | #include <linux/kernel.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/vmalloc.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/pagemap.h> | |
5a0e3ad6 | 18 | #include <linux/gfp.h> |
1da177e4 | 19 | |
09d9bae0 | 20 | #include <asm/bugs.h> |
1da177e4 | 21 | #include <asm/cacheflush.h> |
46097c7d | 22 | #include <asm/cachetype.h> |
1da177e4 LT |
23 | #include <asm/pgtable.h> |
24 | #include <asm/tlbflush.h> | |
25 | ||
7b0a1003 RK |
26 | #include "mm.h" |
27 | ||
f6e3354d | 28 | static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; |
1da177e4 | 29 | |
6012191a | 30 | #if __LINUX_ARM_ARCH__ < 6 |
1da177e4 LT |
31 | /* |
32 | * We take the easy way out of this problem - we make the | |
33 | * PTE uncacheable. However, we leave the write buffer on. | |
69b04754 HD |
34 | * |
35 | * Note that the pte lock held when calling update_mmu_cache must also | |
36 | * guard the pte (somewhere else in the same mm) that we modify here. | |
37 | * Therefore those configurations which might call adjust_pte (those | |
38 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | |
1da177e4 | 39 | */ |
c26c20b8 | 40 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
ed42acae | 41 | unsigned long pfn, pte_t *ptep) |
1da177e4 | 42 | { |
c26c20b8 | 43 | pte_t entry = *ptep; |
53cdb27a | 44 | int ret; |
1da177e4 | 45 | |
53cdb27a RK |
46 | /* |
47 | * If this page is present, it's actually being shared. | |
48 | */ | |
49 | ret = pte_present(entry); | |
50 | ||
1da177e4 LT |
51 | /* |
52 | * If this page isn't present, or is already setup to | |
53 | * fault (ie, is old), we can safely ignore any issues. | |
54 | */ | |
bb30f36f | 55 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
08e445bd NP |
56 | flush_cache_page(vma, address, pfn); |
57 | outer_flush_range((pfn << PAGE_SHIFT), | |
58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | |
bb30f36f RK |
59 | pte_val(entry) &= ~L_PTE_MT_MASK; |
60 | pte_val(entry) |= shared_pte_mask; | |
c26c20b8 | 61 | set_pte_at(vma->vm_mm, address, ptep, entry); |
1da177e4 | 62 | flush_tlb_page(vma, address); |
1da177e4 | 63 | } |
c26c20b8 RK |
64 | |
65 | return ret; | |
66 | } | |
67 | ||
57c1ffce | 68 | #if USE_SPLIT_PTE_PTLOCKS |
4e54d93d MW |
69 | /* |
70 | * If we are using split PTE locks, then we need to take the page | |
71 | * lock here. Otherwise we are using shared mm->page_table_lock | |
72 | * which is already locked, thus cannot take it. | |
73 | */ | |
74 | static inline void do_pte_lock(spinlock_t *ptl) | |
75 | { | |
76 | /* | |
77 | * Use nested version here to indicate that we are already | |
78 | * holding one similar spinlock. | |
79 | */ | |
80 | spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); | |
81 | } | |
82 | ||
83 | static inline void do_pte_unlock(spinlock_t *ptl) | |
84 | { | |
85 | spin_unlock(ptl); | |
86 | } | |
57c1ffce | 87 | #else /* !USE_SPLIT_PTE_PTLOCKS */ |
4e54d93d MW |
88 | static inline void do_pte_lock(spinlock_t *ptl) {} |
89 | static inline void do_pte_unlock(spinlock_t *ptl) {} | |
57c1ffce | 90 | #endif /* USE_SPLIT_PTE_PTLOCKS */ |
4e54d93d | 91 | |
ed42acae RK |
92 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
93 | unsigned long pfn) | |
c26c20b8 | 94 | { |
56dd4709 | 95 | spinlock_t *ptl; |
c26c20b8 | 96 | pgd_t *pgd; |
516295e5 | 97 | pud_t *pud; |
c26c20b8 RK |
98 | pmd_t *pmd; |
99 | pte_t *pte; | |
100 | int ret; | |
101 | ||
102 | pgd = pgd_offset(vma->vm_mm, address); | |
f8a85f11 RK |
103 | if (pgd_none_or_clear_bad(pgd)) |
104 | return 0; | |
c26c20b8 | 105 | |
516295e5 RK |
106 | pud = pud_offset(pgd, address); |
107 | if (pud_none_or_clear_bad(pud)) | |
108 | return 0; | |
109 | ||
110 | pmd = pmd_offset(pud, address); | |
f8a85f11 RK |
111 | if (pmd_none_or_clear_bad(pmd)) |
112 | return 0; | |
c26c20b8 | 113 | |
56dd4709 RK |
114 | /* |
115 | * This is called while another page table is mapped, so we | |
116 | * must use the nested version. This also means we need to | |
117 | * open-code the spin-locking. | |
118 | */ | |
119 | ptl = pte_lockptr(vma->vm_mm, pmd); | |
ece0e2b6 | 120 | pte = pte_offset_map(pmd, address); |
4e54d93d | 121 | do_pte_lock(ptl); |
c26c20b8 | 122 | |
ed42acae | 123 | ret = do_adjust_pte(vma, address, pfn, pte); |
c26c20b8 | 124 | |
4e54d93d | 125 | do_pte_unlock(ptl); |
ece0e2b6 | 126 | pte_unmap(pte); |
c26c20b8 | 127 | |
1da177e4 | 128 | return ret; |
1da177e4 LT |
129 | } |
130 | ||
131 | static void | |
ae140202 RK |
132 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, |
133 | unsigned long addr, pte_t *ptep, unsigned long pfn) | |
1da177e4 | 134 | { |
1da177e4 LT |
135 | struct mm_struct *mm = vma->vm_mm; |
136 | struct vm_area_struct *mpnt; | |
1da177e4 LT |
137 | unsigned long offset; |
138 | pgoff_t pgoff; | |
139 | int aliases = 0; | |
140 | ||
1da177e4 LT |
141 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); |
142 | ||
143 | /* | |
144 | * If we have any shared mappings that are in the same mm | |
145 | * space, then we need to handle them specially to maintain | |
146 | * cache coherency. | |
147 | */ | |
148 | flush_dcache_mmap_lock(mapping); | |
6b2dbba8 | 149 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
1da177e4 LT |
150 | /* |
151 | * If this VMA is not in our MM, we can ignore it. | |
152 | * Note that we intentionally mask out the VMA | |
153 | * that we are fixing up. | |
154 | */ | |
155 | if (mpnt->vm_mm != mm || mpnt == vma) | |
156 | continue; | |
157 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
158 | continue; | |
159 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
ed42acae | 160 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); |
1da177e4 LT |
161 | } |
162 | flush_dcache_mmap_unlock(mapping); | |
163 | if (aliases) | |
ae140202 | 164 | do_adjust_pte(vma, addr, pfn, ptep); |
1da177e4 LT |
165 | } |
166 | ||
167 | /* | |
168 | * Take care of architecture specific things when placing a new PTE into | |
169 | * a page table, or changing an existing PTE. Basically, there are two | |
170 | * things that we need to take care of: | |
171 | * | |
c0177800 | 172 | * 1. If PG_dcache_clean is not set for the page, we need to ensure |
1da177e4 LT |
173 | * that any cache entries for the kernels virtual memory |
174 | * range are written back to the page. | |
175 | * 2. If we have multiple shared mappings of the same space in | |
176 | * an object, we need to deal with the cache aliasing issues. | |
177 | * | |
69b04754 | 178 | * Note that the pte lock will be held. |
1da177e4 | 179 | */ |
4b3073e1 RK |
180 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
181 | pte_t *ptep) | |
1da177e4 | 182 | { |
4b3073e1 | 183 | unsigned long pfn = pte_pfn(*ptep); |
8830f04a | 184 | struct address_space *mapping; |
1da177e4 LT |
185 | struct page *page; |
186 | ||
187 | if (!pfn_valid(pfn)) | |
188 | return; | |
8830f04a | 189 | |
421fe93c RK |
190 | /* |
191 | * The zero page is never written to, so never has any dirty | |
192 | * cache lines, and therefore never needs to be flushed. | |
193 | */ | |
1da177e4 | 194 | page = pfn_to_page(pfn); |
421fe93c RK |
195 | if (page == ZERO_PAGE(0)) |
196 | return; | |
197 | ||
8830f04a | 198 | mapping = page_mapping(page); |
c0177800 | 199 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
787b2faa | 200 | __flush_dcache_page(mapping, page); |
787b2faa | 201 | if (mapping) { |
1da177e4 | 202 | if (cache_is_vivt()) |
ae140202 | 203 | make_coherent(mapping, vma, addr, ptep, pfn); |
826cbdaf CM |
204 | else if (vma->vm_flags & VM_EXEC) |
205 | __flush_icache_all(); | |
1da177e4 LT |
206 | } |
207 | } | |
6012191a | 208 | #endif /* __LINUX_ARM_ARCH__ < 6 */ |
1da177e4 LT |
209 | |
210 | /* | |
211 | * Check whether the write buffer has physical address aliasing | |
212 | * issues. If it has, we need to avoid them for the case where | |
213 | * we have several shared mappings of the same object in user | |
214 | * space. | |
215 | */ | |
216 | static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) | |
217 | { | |
218 | register unsigned long zero = 0, one = 1, val; | |
219 | ||
220 | local_irq_disable(); | |
221 | mb(); | |
222 | *p1 = one; | |
223 | mb(); | |
224 | *p2 = zero; | |
225 | mb(); | |
226 | val = *p1; | |
227 | mb(); | |
228 | local_irq_enable(); | |
229 | return val != zero; | |
230 | } | |
231 | ||
232 | void __init check_writebuffer_bugs(void) | |
233 | { | |
234 | struct page *page; | |
235 | const char *reason; | |
236 | unsigned long v = 1; | |
237 | ||
4ed89f22 | 238 | pr_info("CPU: Testing write buffer coherency: "); |
1da177e4 LT |
239 | |
240 | page = alloc_page(GFP_KERNEL); | |
241 | if (page) { | |
242 | unsigned long *p1, *p2; | |
52e8bfd8 RK |
243 | pgprot_t prot = __pgprot_modify(PAGE_KERNEL, |
244 | L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); | |
1da177e4 LT |
245 | |
246 | p1 = vmap(&page, 1, VM_IOREMAP, prot); | |
247 | p2 = vmap(&page, 1, VM_IOREMAP, prot); | |
248 | ||
249 | if (p1 && p2) { | |
250 | v = check_writebuffer(p1, p2); | |
251 | reason = "enabling work-around"; | |
252 | } else { | |
253 | reason = "unable to map memory\n"; | |
254 | } | |
255 | ||
256 | vunmap(p1); | |
257 | vunmap(p2); | |
258 | put_page(page); | |
259 | } else { | |
260 | reason = "unable to grab page\n"; | |
261 | } | |
262 | ||
263 | if (v) { | |
4ed89f22 | 264 | pr_cont("failed, %s\n", reason); |
bb30f36f | 265 | shared_pte_mask = L_PTE_MT_UNCACHED; |
1da177e4 | 266 | } else { |
4ed89f22 | 267 | pr_cont("ok\n"); |
1da177e4 LT |
268 | } |
269 | } |