| 1 | /* |
| 2 | * linux/arch/arm/mm/flush.c |
| 3 | * |
| 4 | * Copyright (C) 1995-2002 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/highmem.h> |
| 14 | |
| 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/cachetype.h> |
| 17 | #include <asm/highmem.h> |
| 18 | #include <asm/smp_plat.h> |
| 19 | #include <asm/tlbflush.h> |
| 20 | |
| 21 | #include "mm.h" |
| 22 | |
| 23 | #ifdef CONFIG_CPU_CACHE_VIPT |
| 24 | |
| 25 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
| 26 | { |
| 27 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
| 28 | const int zero = 0; |
| 29 | |
| 30 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
| 31 | |
| 32 | asm( "mcrr p15, 0, %1, %0, c14\n" |
| 33 | " mcr p15, 0, %2, c7, c10, 4" |
| 34 | : |
| 35 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
| 36 | : "cc"); |
| 37 | } |
| 38 | |
| 39 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
| 40 | { |
| 41 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
| 42 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
| 43 | unsigned long to; |
| 44 | |
| 45 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
| 46 | to = va + offset; |
| 47 | flush_icache_range(to, to + len); |
| 48 | } |
| 49 | |
| 50 | void flush_cache_mm(struct mm_struct *mm) |
| 51 | { |
| 52 | if (cache_is_vivt()) { |
| 53 | vivt_flush_cache_mm(mm); |
| 54 | return; |
| 55 | } |
| 56 | |
| 57 | if (cache_is_vipt_aliasing()) { |
| 58 | asm( "mcr p15, 0, %0, c7, c14, 0\n" |
| 59 | " mcr p15, 0, %0, c7, c10, 4" |
| 60 | : |
| 61 | : "r" (0) |
| 62 | : "cc"); |
| 63 | } |
| 64 | } |
| 65 | |
| 66 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 67 | { |
| 68 | if (cache_is_vivt()) { |
| 69 | vivt_flush_cache_range(vma, start, end); |
| 70 | return; |
| 71 | } |
| 72 | |
| 73 | if (cache_is_vipt_aliasing()) { |
| 74 | asm( "mcr p15, 0, %0, c7, c14, 0\n" |
| 75 | " mcr p15, 0, %0, c7, c10, 4" |
| 76 | : |
| 77 | : "r" (0) |
| 78 | : "cc"); |
| 79 | } |
| 80 | |
| 81 | if (vma->vm_flags & VM_EXEC) |
| 82 | __flush_icache_all(); |
| 83 | } |
| 84 | |
| 85 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
| 86 | { |
| 87 | if (cache_is_vivt()) { |
| 88 | vivt_flush_cache_page(vma, user_addr, pfn); |
| 89 | return; |
| 90 | } |
| 91 | |
| 92 | if (cache_is_vipt_aliasing()) { |
| 93 | flush_pfn_alias(pfn, user_addr); |
| 94 | __flush_icache_all(); |
| 95 | } |
| 96 | |
| 97 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) |
| 98 | __flush_icache_all(); |
| 99 | } |
| 100 | |
| 101 | #else |
| 102 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
| 103 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) |
| 104 | #endif |
| 105 | |
| 106 | static void flush_ptrace_access_other(void *args) |
| 107 | { |
| 108 | __flush_icache_all(); |
| 109 | } |
| 110 | |
| 111 | static |
| 112 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
| 113 | unsigned long uaddr, void *kaddr, unsigned long len) |
| 114 | { |
| 115 | if (cache_is_vivt()) { |
| 116 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
| 117 | unsigned long addr = (unsigned long)kaddr; |
| 118 | __cpuc_coherent_kern_range(addr, addr + len); |
| 119 | } |
| 120 | return; |
| 121 | } |
| 122 | |
| 123 | if (cache_is_vipt_aliasing()) { |
| 124 | flush_pfn_alias(page_to_pfn(page), uaddr); |
| 125 | __flush_icache_all(); |
| 126 | return; |
| 127 | } |
| 128 | |
| 129 | /* VIPT non-aliasing D-cache */ |
| 130 | if (vma->vm_flags & VM_EXEC) { |
| 131 | unsigned long addr = (unsigned long)kaddr; |
| 132 | if (icache_is_vipt_aliasing()) |
| 133 | flush_icache_alias(page_to_pfn(page), uaddr, len); |
| 134 | else |
| 135 | __cpuc_coherent_kern_range(addr, addr + len); |
| 136 | if (cache_ops_need_broadcast()) |
| 137 | smp_call_function(flush_ptrace_access_other, |
| 138 | NULL, 1); |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * Copy user data from/to a page which is mapped into a different |
| 144 | * processes address space. Really, we want to allow our "user |
| 145 | * space" model to handle this. |
| 146 | * |
| 147 | * Note that this code needs to run on the current CPU. |
| 148 | */ |
| 149 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
| 150 | unsigned long uaddr, void *dst, const void *src, |
| 151 | unsigned long len) |
| 152 | { |
| 153 | #ifdef CONFIG_SMP |
| 154 | preempt_disable(); |
| 155 | #endif |
| 156 | memcpy(dst, src, len); |
| 157 | flush_ptrace_access(vma, page, uaddr, dst, len); |
| 158 | #ifdef CONFIG_SMP |
| 159 | preempt_enable(); |
| 160 | #endif |
| 161 | } |
| 162 | |
| 163 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
| 164 | { |
| 165 | /* |
| 166 | * Writeback any data associated with the kernel mapping of this |
| 167 | * page. This ensures that data in the physical page is mutually |
| 168 | * coherent with the kernels mapping. |
| 169 | */ |
| 170 | if (!PageHighMem(page)) { |
| 171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
| 172 | } else { |
| 173 | void *addr; |
| 174 | |
| 175 | if (cache_is_vipt_nonaliasing()) { |
| 176 | addr = kmap_atomic(page); |
| 177 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 178 | kunmap_atomic(addr); |
| 179 | } else { |
| 180 | addr = kmap_high_get(page); |
| 181 | if (addr) { |
| 182 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 183 | kunmap_high(page); |
| 184 | } |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * If this is a page cache page, and we have an aliasing VIPT cache, |
| 190 | * we only need to do one flush - which would be at the relevant |
| 191 | * userspace colour, which is congruent with page->index. |
| 192 | */ |
| 193 | if (mapping && cache_is_vipt_aliasing()) |
| 194 | flush_pfn_alias(page_to_pfn(page), |
| 195 | page->index << PAGE_CACHE_SHIFT); |
| 196 | } |
| 197 | |
| 198 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) |
| 199 | { |
| 200 | struct mm_struct *mm = current->active_mm; |
| 201 | struct vm_area_struct *mpnt; |
| 202 | pgoff_t pgoff; |
| 203 | |
| 204 | /* |
| 205 | * There are possible user space mappings of this page: |
| 206 | * - VIVT cache: we need to also write back and invalidate all user |
| 207 | * data in the current VM view associated with this page. |
| 208 | * - aliasing VIPT: we only need to find one mapping of this page. |
| 209 | */ |
| 210 | pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
| 211 | |
| 212 | flush_dcache_mmap_lock(mapping); |
| 213 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
| 214 | unsigned long offset; |
| 215 | |
| 216 | /* |
| 217 | * If this VMA is not in our MM, we can ignore it. |
| 218 | */ |
| 219 | if (mpnt->vm_mm != mm) |
| 220 | continue; |
| 221 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
| 222 | continue; |
| 223 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
| 224 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); |
| 225 | } |
| 226 | flush_dcache_mmap_unlock(mapping); |
| 227 | } |
| 228 | |
| 229 | #if __LINUX_ARM_ARCH__ >= 6 |
| 230 | void __sync_icache_dcache(pte_t pteval) |
| 231 | { |
| 232 | unsigned long pfn; |
| 233 | struct page *page; |
| 234 | struct address_space *mapping; |
| 235 | |
| 236 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
| 237 | /* only flush non-aliasing VIPT caches for exec mappings */ |
| 238 | return; |
| 239 | pfn = pte_pfn(pteval); |
| 240 | if (!pfn_valid(pfn)) |
| 241 | return; |
| 242 | |
| 243 | page = pfn_to_page(pfn); |
| 244 | if (cache_is_vipt_aliasing()) |
| 245 | mapping = page_mapping(page); |
| 246 | else |
| 247 | mapping = NULL; |
| 248 | |
| 249 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
| 250 | __flush_dcache_page(mapping, page); |
| 251 | |
| 252 | if (pte_exec(pteval)) |
| 253 | __flush_icache_all(); |
| 254 | } |
| 255 | #endif |
| 256 | |
| 257 | /* |
| 258 | * Ensure cache coherency between kernel mapping and userspace mapping |
| 259 | * of this page. |
| 260 | * |
| 261 | * We have three cases to consider: |
| 262 | * - VIPT non-aliasing cache: fully coherent so nothing required. |
| 263 | * - VIVT: fully aliasing, so we need to handle every alias in our |
| 264 | * current VM view. |
| 265 | * - VIPT aliasing: need to handle one alias in our current VM view. |
| 266 | * |
| 267 | * If we need to handle aliasing: |
| 268 | * If the page only exists in the page cache and there are no user |
| 269 | * space mappings, we can be lazy and remember that we may have dirty |
| 270 | * kernel cache lines for later. Otherwise, we assume we have |
| 271 | * aliasing mappings. |
| 272 | * |
| 273 | * Note that we disable the lazy flush for SMP configurations where |
| 274 | * the cache maintenance operations are not automatically broadcasted. |
| 275 | */ |
| 276 | void flush_dcache_page(struct page *page) |
| 277 | { |
| 278 | struct address_space *mapping; |
| 279 | |
| 280 | /* |
| 281 | * The zero page is never written to, so never has any dirty |
| 282 | * cache lines, and therefore never needs to be flushed. |
| 283 | */ |
| 284 | if (page == ZERO_PAGE(0)) |
| 285 | return; |
| 286 | |
| 287 | mapping = page_mapping(page); |
| 288 | |
| 289 | if (!cache_ops_need_broadcast() && |
| 290 | mapping && !mapping_mapped(mapping)) |
| 291 | clear_bit(PG_dcache_clean, &page->flags); |
| 292 | else { |
| 293 | __flush_dcache_page(mapping, page); |
| 294 | if (mapping && cache_is_vivt()) |
| 295 | __flush_dcache_aliases(mapping, page); |
| 296 | else if (mapping) |
| 297 | __flush_icache_all(); |
| 298 | set_bit(PG_dcache_clean, &page->flags); |
| 299 | } |
| 300 | } |
| 301 | EXPORT_SYMBOL(flush_dcache_page); |
| 302 | |
| 303 | /* |
| 304 | * Flush an anonymous page so that users of get_user_pages() |
| 305 | * can safely access the data. The expected sequence is: |
| 306 | * |
| 307 | * get_user_pages() |
| 308 | * -> flush_anon_page |
| 309 | * memcpy() to/from page |
| 310 | * if written to page, flush_dcache_page() |
| 311 | */ |
| 312 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
| 313 | { |
| 314 | unsigned long pfn; |
| 315 | |
| 316 | /* VIPT non-aliasing caches need do nothing */ |
| 317 | if (cache_is_vipt_nonaliasing()) |
| 318 | return; |
| 319 | |
| 320 | /* |
| 321 | * Write back and invalidate userspace mapping. |
| 322 | */ |
| 323 | pfn = page_to_pfn(page); |
| 324 | if (cache_is_vivt()) { |
| 325 | flush_cache_page(vma, vmaddr, pfn); |
| 326 | } else { |
| 327 | /* |
| 328 | * For aliasing VIPT, we can flush an alias of the |
| 329 | * userspace address only. |
| 330 | */ |
| 331 | flush_pfn_alias(pfn, vmaddr); |
| 332 | __flush_icache_all(); |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * Invalidate kernel mapping. No data should be contained |
| 337 | * in this mapping of the page. FIXME: this is overkill |
| 338 | * since we actually ask for a write-back and invalidate. |
| 339 | */ |
| 340 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
| 341 | } |