Merge remote-tracking branches 'regulator/topic/lp8755', 'regulator/topic/ltc3589...
[deliverable/linux.git] / arch / xtensa / include / asm / cacheflush.h
CommitLineData
9a8fd558 1/*
9a8fd558
CZ
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
f615136c 6 * (C) 2001 - 2013 Tensilica Inc.
9a8fd558
CZ
7 */
8
9#ifndef _XTENSA_CACHEFLUSH_H
10#define _XTENSA_CACHEFLUSH_H
11
9a8fd558
CZ
12#include <linux/mm.h>
13#include <asm/processor.h>
14#include <asm/page.h>
15
16/*
6656920b 17 * Lo-level routines for cache flushing.
9a8fd558
CZ
18 *
19 * invalidate data or instruction cache:
20 *
21 * __invalidate_icache_all()
22 * __invalidate_icache_page(adr)
23 * __invalidate_dcache_page(adr)
24 * __invalidate_icache_range(from,size)
25 * __invalidate_dcache_range(from,size)
26 *
27 * flush data cache:
28 *
29 * __flush_dcache_page(adr)
30 *
31 * flush and invalidate data cache:
32 *
33 * __flush_invalidate_dcache_all()
34 * __flush_invalidate_dcache_page(adr)
35 * __flush_invalidate_dcache_range(from,size)
6656920b
CZ
36 *
37 * specials for cache aliasing:
38 *
39 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
40 * __invalidate_icache_page_alias(vaddr,paddr)
9a8fd558
CZ
41 */
42
6656920b 43extern void __invalidate_dcache_all(void);
9a8fd558 44extern void __invalidate_icache_all(void);
9a8fd558
CZ
45extern void __invalidate_dcache_page(unsigned long);
46extern void __invalidate_icache_page(unsigned long);
47extern void __invalidate_icache_range(unsigned long, unsigned long);
48extern void __invalidate_dcache_range(unsigned long, unsigned long);
49
50#if XCHAL_DCACHE_IS_WRITEBACK
6656920b 51extern void __flush_invalidate_dcache_all(void);
9a8fd558 52extern void __flush_dcache_page(unsigned long);
6656920b 53extern void __flush_dcache_range(unsigned long, unsigned long);
9a8fd558
CZ
54extern void __flush_invalidate_dcache_page(unsigned long);
55extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
56#else
6656920b
CZ
57# define __flush_dcache_range(p,s) do { } while(0)
58# define __flush_dcache_page(p) do { } while(0)
59# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
60# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
61#endif
62
e5083a63 63#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
6656920b 64extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
e5083a63
JW
65#else
66static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
67 unsigned long phys) { }
6656920b 68#endif
e5083a63 69#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
6656920b 70extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
9f8fcf38 71#else
e5083a63
JW
72static inline void __invalidate_icache_page_alias(unsigned long virt,
73 unsigned long phys) { }
9a8fd558
CZ
74#endif
75
76/*
77 * We have physically tagged caches - nothing to do here -
78 * unless we have cache aliasing.
79 *
80 * Pages can get remapped. Because this might change the 'color' of that page,
81 * we have to flush the cache before the PTE is changed.
82 * (see also Documentation/cachetlb.txt)
83 */
84
f615136c
MF
85#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
86
87#ifdef CONFIG_SMP
88void flush_cache_all(void);
89void flush_cache_range(struct vm_area_struct*, ulong, ulong);
90void flush_icache_range(unsigned long start, unsigned long end);
91void flush_cache_page(struct vm_area_struct*,
92 unsigned long, unsigned long);
93#else
94#define flush_cache_all local_flush_cache_all
95#define flush_cache_range local_flush_cache_range
96#define flush_icache_range local_flush_icache_range
97#define flush_cache_page local_flush_cache_page
98#endif
9a8fd558 99
f615136c 100#define local_flush_cache_all() \
6656920b
CZ
101 do { \
102 __flush_invalidate_dcache_all(); \
103 __invalidate_icache_all(); \
104 } while (0)
9a8fd558 105
6656920b
CZ
106#define flush_cache_mm(mm) flush_cache_all()
107#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
9a8fd558 108
6656920b
CZ
109#define flush_cache_vmap(start,end) flush_cache_all()
110#define flush_cache_vunmap(start,end) flush_cache_all()
9a8fd558 111
2d4dc890 112#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
6656920b 113extern void flush_dcache_page(struct page*);
f615136c
MF
114
115void local_flush_cache_range(struct vm_area_struct *vma,
116 unsigned long start, unsigned long end);
117void local_flush_cache_page(struct vm_area_struct *vma,
118 unsigned long address, unsigned long pfn);
9a8fd558
CZ
119
120#else
121
122#define flush_cache_all() do { } while (0)
123#define flush_cache_mm(mm) do { } while (0)
ec8c0446 124#define flush_cache_dup_mm(mm) do { } while (0)
9a8fd558
CZ
125
126#define flush_cache_vmap(start,end) do { } while (0)
127#define flush_cache_vunmap(start,end) do { } while (0)
128
91e08063 129#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
9a8fd558
CZ
130#define flush_dcache_page(page) do { } while (0)
131
f615136c
MF
132#define flush_icache_range local_flush_icache_range
133#define flush_cache_page(vma, addr, pfn) do { } while (0)
134#define flush_cache_range(vma, start, end) do { } while (0)
9a8fd558
CZ
135
136#endif
137
6656920b 138/* Ensure consistency between data and instruction cache. */
f615136c 139#define local_flush_icache_range(start, end) \
6656920b
CZ
140 do { \
141 __flush_dcache_range(start, (end) - (start)); \
142 __invalidate_icache_range(start,(end) - (start)); \
143 } while (0)
9a8fd558
CZ
144
145/* This is not required, see Documentation/cachetlb.txt */
6656920b 146#define flush_icache_page(vma,page) do { } while (0)
9a8fd558
CZ
147
148#define flush_dcache_mmap_lock(mapping) do { } while (0)
149#define flush_dcache_mmap_unlock(mapping) do { } while (0)
150
6656920b 151#if (DCACHE_WAY_SIZE > PAGE_SIZE)
9a8fd558 152
6656920b
CZ
153extern void copy_to_user_page(struct vm_area_struct*, struct page*,
154 unsigned long, void*, const void*, unsigned long);
155extern void copy_from_user_page(struct vm_area_struct*, struct page*,
156 unsigned long, void*, const void*, unsigned long);
157
158#else
159
160#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
161 do { \
162 memcpy(dst, src, len); \
163 __flush_dcache_range((unsigned long) dst, len); \
164 __invalidate_icache_range((unsigned long) dst, len); \
165 } while (0)
9a8fd558
CZ
166
167#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
168 memcpy(dst, src, len)
169
6656920b 170#endif
9a8fd558 171
bd974240
OS
172#define XTENSA_CACHEBLK_LOG2 29
173#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
174#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
175
176#if XCHAL_HAVE_CACHEATTR
177static inline u32 xtensa_get_cacheattr(void)
178{
179 u32 r;
bc5378fc 180 asm volatile(" rsr %0, cacheattr" : "=a"(r));
bd974240
OS
181 return r;
182}
183
184static inline u32 xtensa_get_dtlb1(u32 addr)
185{
186 u32 r = addr & XTENSA_CACHEBLK_MASK;
187 return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
188 & 0xF);
189}
190#else
191static inline u32 xtensa_get_dtlb1(u32 addr)
192{
193 u32 r;
194 asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
195 asm volatile(" dsync");
196 return r;
197}
198
199static inline u32 xtensa_get_cacheattr(void)
200{
201 u32 r = 0;
202 u32 a = 0;
203 do {
204 a -= XTENSA_CACHEBLK_SIZE;
205 r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
206 } while (a);
207 return r;
208}
209#endif
210
211static inline int xtensa_need_flush_dma_source(u32 addr)
212{
213 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
214}
215
216static inline int xtensa_need_invalidate_dma_destination(u32 addr)
217{
218 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
219}
220
221static inline void flush_dcache_unaligned(u32 addr, u32 size)
222{
223 u32 cnt;
224 if (size) {
225 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
226 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
227 while (cnt--) {
228 asm volatile(" dhwb %0, 0" : : "a"(addr));
229 addr += XCHAL_DCACHE_LINESIZE;
230 }
231 asm volatile(" dsync");
232 }
233}
234
235static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
236{
237 int cnt;
238 if (size) {
239 asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
240 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
241 - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
242 while (cnt-- > 0) {
243 asm volatile(" dhi %0, %1" : : "a"(addr),
244 "n"(XCHAL_DCACHE_LINESIZE));
245 addr += XCHAL_DCACHE_LINESIZE;
246 }
247 asm volatile(" dhwbi %0, %1" : : "a"(addr),
248 "n"(XCHAL_DCACHE_LINESIZE));
249 asm volatile(" dsync");
250 }
251}
252
253static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
254{
255 u32 cnt;
256 if (size) {
257 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
258 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
259 while (cnt--) {
260 asm volatile(" dhwbi %0, 0" : : "a"(addr));
261 addr += XCHAL_DCACHE_LINESIZE;
262 }
263 asm volatile(" dsync");
264 }
265}
266
9a8fd558 267#endif /* _XTENSA_CACHEFLUSH_H */
This page took 0.795664 seconds and 5 git commands to generate.