Merge git://git.infradead.org/users/willy/linux-nvme
[deliverable/linux.git] / arch / xtensa / include / asm / cacheflush.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * (C) 2001 - 2013 Tensilica Inc.
7 */
8
9 #ifndef _XTENSA_CACHEFLUSH_H
10 #define _XTENSA_CACHEFLUSH_H
11
12 #include <linux/mm.h>
13 #include <asm/processor.h>
14 #include <asm/page.h>
15
16 /*
17 * Lo-level routines for cache flushing.
18 *
19 * invalidate data or instruction cache:
20 *
21 * __invalidate_icache_all()
22 * __invalidate_icache_page(adr)
23 * __invalidate_dcache_page(adr)
24 * __invalidate_icache_range(from,size)
25 * __invalidate_dcache_range(from,size)
26 *
27 * flush data cache:
28 *
29 * __flush_dcache_page(adr)
30 *
31 * flush and invalidate data cache:
32 *
33 * __flush_invalidate_dcache_all()
34 * __flush_invalidate_dcache_page(adr)
35 * __flush_invalidate_dcache_range(from,size)
36 *
37 * specials for cache aliasing:
38 *
39 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
40 * __invalidate_icache_page_alias(vaddr,paddr)
41 */
42
43 extern void __invalidate_dcache_all(void);
44 extern void __invalidate_icache_all(void);
45 extern void __invalidate_dcache_page(unsigned long);
46 extern void __invalidate_icache_page(unsigned long);
47 extern void __invalidate_icache_range(unsigned long, unsigned long);
48 extern void __invalidate_dcache_range(unsigned long, unsigned long);
49
50 #if XCHAL_DCACHE_IS_WRITEBACK
51 extern void __flush_invalidate_dcache_all(void);
52 extern void __flush_dcache_page(unsigned long);
53 extern void __flush_dcache_range(unsigned long, unsigned long);
54 extern void __flush_invalidate_dcache_page(unsigned long);
55 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
56 #else
57 # define __flush_dcache_range(p,s) do { } while(0)
58 # define __flush_dcache_page(p) do { } while(0)
59 # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
60 # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
61 #endif
62
63 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
64 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
65 #else
66 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
67 unsigned long phys) { }
68 #endif
69 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
70 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
71 #else
72 static inline void __invalidate_icache_page_alias(unsigned long virt,
73 unsigned long phys) { }
74 #endif
75
76 /*
77 * We have physically tagged caches - nothing to do here -
78 * unless we have cache aliasing.
79 *
80 * Pages can get remapped. Because this might change the 'color' of that page,
81 * we have to flush the cache before the PTE is changed.
82 * (see also Documentation/cachetlb.txt)
83 */
84
85 #if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
86
87 #ifdef CONFIG_SMP
88 void flush_cache_all(void);
89 void flush_cache_range(struct vm_area_struct*, ulong, ulong);
90 void flush_icache_range(unsigned long start, unsigned long end);
91 void flush_cache_page(struct vm_area_struct*,
92 unsigned long, unsigned long);
93 #else
94 #define flush_cache_all local_flush_cache_all
95 #define flush_cache_range local_flush_cache_range
96 #define flush_icache_range local_flush_icache_range
97 #define flush_cache_page local_flush_cache_page
98 #endif
99
100 #define local_flush_cache_all() \
101 do { \
102 __flush_invalidate_dcache_all(); \
103 __invalidate_icache_all(); \
104 } while (0)
105
106 #define flush_cache_mm(mm) flush_cache_all()
107 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
108
109 #define flush_cache_vmap(start,end) flush_cache_all()
110 #define flush_cache_vunmap(start,end) flush_cache_all()
111
112 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
113 extern void flush_dcache_page(struct page*);
114
115 void local_flush_cache_range(struct vm_area_struct *vma,
116 unsigned long start, unsigned long end);
117 void local_flush_cache_page(struct vm_area_struct *vma,
118 unsigned long address, unsigned long pfn);
119
120 #else
121
122 #define flush_cache_all() do { } while (0)
123 #define flush_cache_mm(mm) do { } while (0)
124 #define flush_cache_dup_mm(mm) do { } while (0)
125
126 #define flush_cache_vmap(start,end) do { } while (0)
127 #define flush_cache_vunmap(start,end) do { } while (0)
128
129 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
130 #define flush_dcache_page(page) do { } while (0)
131
132 #define flush_icache_range local_flush_icache_range
133 #define flush_cache_page(vma, addr, pfn) do { } while (0)
134 #define flush_cache_range(vma, start, end) do { } while (0)
135
136 #endif
137
138 /* Ensure consistency between data and instruction cache. */
139 #define local_flush_icache_range(start, end) \
140 do { \
141 __flush_dcache_range(start, (end) - (start)); \
142 __invalidate_icache_range(start,(end) - (start)); \
143 } while (0)
144
145 /* This is not required, see Documentation/cachetlb.txt */
146 #define flush_icache_page(vma,page) do { } while (0)
147
148 #define flush_dcache_mmap_lock(mapping) do { } while (0)
149 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
150
151 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
152
153 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
154 unsigned long, void*, const void*, unsigned long);
155 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
156 unsigned long, void*, const void*, unsigned long);
157
158 #else
159
160 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
161 do { \
162 memcpy(dst, src, len); \
163 __flush_dcache_range((unsigned long) dst, len); \
164 __invalidate_icache_range((unsigned long) dst, len); \
165 } while (0)
166
167 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
168 memcpy(dst, src, len)
169
170 #endif
171
172 #define XTENSA_CACHEBLK_LOG2 29
173 #define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
174 #define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
175
176 #if XCHAL_HAVE_CACHEATTR
177 static inline u32 xtensa_get_cacheattr(void)
178 {
179 u32 r;
180 asm volatile(" rsr %0, cacheattr" : "=a"(r));
181 return r;
182 }
183
184 static inline u32 xtensa_get_dtlb1(u32 addr)
185 {
186 u32 r = addr & XTENSA_CACHEBLK_MASK;
187 return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
188 & 0xF);
189 }
190 #else
191 static inline u32 xtensa_get_dtlb1(u32 addr)
192 {
193 u32 r;
194 asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
195 asm volatile(" dsync");
196 return r;
197 }
198
199 static inline u32 xtensa_get_cacheattr(void)
200 {
201 u32 r = 0;
202 u32 a = 0;
203 do {
204 a -= XTENSA_CACHEBLK_SIZE;
205 r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
206 } while (a);
207 return r;
208 }
209 #endif
210
211 static inline int xtensa_need_flush_dma_source(u32 addr)
212 {
213 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
214 }
215
216 static inline int xtensa_need_invalidate_dma_destination(u32 addr)
217 {
218 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
219 }
220
221 static inline void flush_dcache_unaligned(u32 addr, u32 size)
222 {
223 u32 cnt;
224 if (size) {
225 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
226 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
227 while (cnt--) {
228 asm volatile(" dhwb %0, 0" : : "a"(addr));
229 addr += XCHAL_DCACHE_LINESIZE;
230 }
231 asm volatile(" dsync");
232 }
233 }
234
235 static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
236 {
237 int cnt;
238 if (size) {
239 asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
240 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
241 - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
242 while (cnt-- > 0) {
243 asm volatile(" dhi %0, %1" : : "a"(addr),
244 "n"(XCHAL_DCACHE_LINESIZE));
245 addr += XCHAL_DCACHE_LINESIZE;
246 }
247 asm volatile(" dhwbi %0, %1" : : "a"(addr),
248 "n"(XCHAL_DCACHE_LINESIZE));
249 asm volatile(" dsync");
250 }
251 }
252
253 static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
254 {
255 u32 cnt;
256 if (size) {
257 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
258 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
259 while (cnt--) {
260 asm volatile(" dhwbi %0, 0" : : "a"(addr));
261 addr += XCHAL_DCACHE_LINESIZE;
262 }
263 asm volatile(" dsync");
264 }
265 }
266
267 #endif /* _XTENSA_CACHEFLUSH_H */
This page took 0.043764 seconds and 5 git commands to generate.