[ARM] 2941/1: Fix running legacy binaries from a soft-float root filesystem with...
[deliverable/linux.git] / arch / arm / mm / flush.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13
14#include <asm/cacheflush.h>
15#include <asm/system.h>
8d802d28
RK
16#include <asm/tlbflush.h>
17
18#ifdef CONFIG_CPU_CACHE_VIPT
d7b6b358
RK
19
20void flush_cache_mm(struct mm_struct *mm)
21{
22 if (cache_is_vivt()) {
23 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
24 __cpuc_flush_user_all();
25 return;
26 }
27
28 if (cache_is_vipt_aliasing()) {
29 asm( "mcr p15, 0, %0, c7, c14, 0\n"
30 " mcr p15, 0, %0, c7, c5, 0\n"
31 " mcr p15, 0, %0, c7, c10, 4"
32 :
33 : "r" (0)
34 : "cc");
35 }
36}
37
38void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
39{
40 if (cache_is_vivt()) {
41 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
42 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
43 vma->vm_flags);
44 return;
45 }
46
47 if (cache_is_vipt_aliasing()) {
48 asm( "mcr p15, 0, %0, c7, c14, 0\n"
49 " mcr p15, 0, %0, c7, c5, 0\n"
50 " mcr p15, 0, %0, c7, c10, 4"
51 :
52 : "r" (0)
53 : "cc");
54 }
55}
56
57void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
58{
59 if (cache_is_vivt()) {
60 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
61 unsigned long addr = user_addr & PAGE_MASK;
62 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
63 }
64 return;
65 }
66
67 if (cache_is_vipt_aliasing())
68 flush_pfn_alias(pfn, user_addr);
69}
70
8d802d28
RK
71#define ALIAS_FLUSH_START 0xffff4000
72
73#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
74
75static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
76{
77 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
78
79 set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
80 flush_tlb_kernel_page(to);
81
82 asm( "mcrr p15, 0, %1, %0, c14\n"
83 " mcrr p15, 0, %1, %0, c5\n"
84 :
85 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
86 : "cc");
87}
88#else
89#define flush_pfn_alias(pfn,vaddr) do { } while (0)
90#endif
1da177e4 91
8830f04a 92void __flush_dcache_page(struct address_space *mapping, struct page *page)
1da177e4 93{
1da177e4
LT
94 /*
95 * Writeback any data associated with the kernel mapping of this
96 * page. This ensures that data in the physical page is mutually
97 * coherent with the kernels mapping.
98 */
99 __cpuc_flush_dcache_page(page_address(page));
100
101 /*
8830f04a
RK
102 * If this is a page cache page, and we have an aliasing VIPT cache,
103 * we only need to do one flush - which would be at the relevant
8d802d28
RK
104 * userspace colour, which is congruent with page->index.
105 */
8830f04a
RK
106 if (mapping && cache_is_vipt_aliasing())
107 flush_pfn_alias(page_to_pfn(page),
108 page->index << PAGE_CACHE_SHIFT);
109}
110
111static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
112{
113 struct mm_struct *mm = current->active_mm;
114 struct vm_area_struct *mpnt;
115 struct prio_tree_iter iter;
116 pgoff_t pgoff;
8d802d28 117
1da177e4
LT
118 /*
119 * There are possible user space mappings of this page:
120 * - VIVT cache: we need to also write back and invalidate all user
121 * data in the current VM view associated with this page.
122 * - aliasing VIPT: we only need to find one mapping of this page.
123 */
124 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
125
126 flush_dcache_mmap_lock(mapping);
127 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
128 unsigned long offset;
129
130 /*
131 * If this VMA is not in our MM, we can ignore it.
132 */
133 if (mpnt->vm_mm != mm)
134 continue;
135 if (!(mpnt->vm_flags & VM_MAYSHARE))
136 continue;
137 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
138 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
1da177e4
LT
139 }
140 flush_dcache_mmap_unlock(mapping);
141}
142
143/*
144 * Ensure cache coherency between kernel mapping and userspace mapping
145 * of this page.
146 *
147 * We have three cases to consider:
148 * - VIPT non-aliasing cache: fully coherent so nothing required.
149 * - VIVT: fully aliasing, so we need to handle every alias in our
150 * current VM view.
151 * - VIPT aliasing: need to handle one alias in our current VM view.
152 *
153 * If we need to handle aliasing:
154 * If the page only exists in the page cache and there are no user
155 * space mappings, we can be lazy and remember that we may have dirty
156 * kernel cache lines for later. Otherwise, we assume we have
157 * aliasing mappings.
158 */
159void flush_dcache_page(struct page *page)
160{
161 struct address_space *mapping = page_mapping(page);
162
1da177e4
LT
163 if (mapping && !mapping_mapped(mapping))
164 set_bit(PG_dcache_dirty, &page->flags);
8830f04a 165 else {
1da177e4 166 __flush_dcache_page(mapping, page);
8830f04a
RK
167 if (mapping && cache_is_vivt())
168 __flush_dcache_aliases(mapping, page);
169 }
1da177e4
LT
170}
171EXPORT_SYMBOL(flush_dcache_page);
This page took 0.11688 seconds and 5 git commands to generate.