nl80211: Fix comment merge error
[deliverable/linux.git] / include / asm-arm / cacheflush.h
CommitLineData
1da177e4
LT
1/*
2 * linux/include/asm-arm/cacheflush.h
3 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
1da177e4
LT
13#include <linux/sched.h>
14#include <linux/mm.h>
15
1da177e4 16#include <asm/glue.h>
b8a9b66f
RK
17#include <asm/shmparam.h>
18
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
1da177e4
LT
20
21/*
22 * Cache Model
23 * ===========
24 */
25#undef _CACHE
26#undef MULTI_CACHE
27
6cc7cbef 28#if defined(CONFIG_CPU_CACHE_V3)
1da177e4
LT
29# ifdef _CACHE
30# define MULTI_CACHE 1
31# else
32# define _CACHE v3
33# endif
34#endif
35
6cc7cbef 36#if defined(CONFIG_CPU_CACHE_V4)
1da177e4
LT
37# ifdef _CACHE
38# define MULTI_CACHE 1
39# else
40# define _CACHE v4
41# endif
42#endif
43
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
46# define MULTI_CACHE 1
47#endif
48
49#if defined(CONFIG_CPU_ARM926T)
50# ifdef _CACHE
51# define MULTI_CACHE 1
52# else
53# define _CACHE arm926
54# endif
55#endif
56
d60674eb
HC
57#if defined(CONFIG_CPU_ARM940T)
58# ifdef _CACHE
59# define MULTI_CACHE 1
60# else
61# define _CACHE arm940
62# endif
63#endif
64
f37f46eb
HC
65#if defined(CONFIG_CPU_ARM946E)
66# ifdef _CACHE
67# define MULTI_CACHE 1
68# else
69# define _CACHE arm946
70# endif
71#endif
72
6cc7cbef 73#if defined(CONFIG_CPU_CACHE_V4WB)
1da177e4
LT
74# ifdef _CACHE
75# define MULTI_CACHE 1
76# else
77# define _CACHE v4wb
78# endif
79#endif
80
81#if defined(CONFIG_CPU_XSCALE)
82# ifdef _CACHE
83# define MULTI_CACHE 1
84# else
85# define _CACHE xscale
86# endif
87#endif
88
23bdf86a
LB
89#if defined(CONFIG_CPU_XSC3)
90# ifdef _CACHE
91# define MULTI_CACHE 1
92# else
93# define _CACHE xsc3
94# endif
95#endif
96
e50d6409
AH
97#if defined(CONFIG_CPU_FEROCEON)
98# ifdef _CACHE
99# define MULTI_CACHE 1
100# else
101# define _CACHE feroceon
102# endif
103#endif
104
1da177e4
LT
105#if defined(CONFIG_CPU_V6)
106//# ifdef _CACHE
107# define MULTI_CACHE 1
108//# else
109//# define _CACHE v6
110//# endif
111#endif
112
bbe88886
CM
113#if defined(CONFIG_CPU_V7)
114//# ifdef _CACHE
115# define MULTI_CACHE 1
116//# else
117//# define _CACHE v7
118//# endif
119#endif
120
1da177e4
LT
121#if !defined(_CACHE) && !defined(MULTI_CACHE)
122#error Unknown cache maintainence model
123#endif
124
125/*
126 * This flag is used to indicate that the page pointed to by a pte
127 * is dirty and requires cleaning before returning it to the user.
128 */
129#define PG_dcache_dirty PG_arch_1
130
131/*
132 * MM Cache Management
133 * ===================
134 *
135 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
136 * implement these methods.
137 *
138 * Start addresses are inclusive and end addresses are exclusive;
139 * start addresses should be rounded down, end addresses up.
140 *
141 * See Documentation/cachetlb.txt for more information.
142 * Please note that the implementation of these, and the required
143 * effects are cache-type (VIVT/VIPT/PIPT) specific.
144 *
145 * flush_cache_kern_all()
146 *
147 * Unconditionally clean and invalidate the entire cache.
148 *
149 * flush_cache_user_mm(mm)
150 *
151 * Clean and invalidate all user space cache entries
152 * before a change of page tables.
153 *
154 * flush_cache_user_range(start, end, flags)
155 *
156 * Clean and invalidate a range of cache entries in the
157 * specified address space before a change of page tables.
158 * - start - user start address (inclusive, page aligned)
159 * - end - user end address (exclusive, page aligned)
160 * - flags - vma->vm_flags field
161 *
162 * coherent_kern_range(start, end)
163 *
164 * Ensure coherency between the Icache and the Dcache in the
165 * region described by start, end. If you have non-snooping
166 * Harvard caches, you need to implement this function.
167 * - start - virtual start address
168 * - end - virtual end address
169 *
170 * DMA Cache Coherency
171 * ===================
172 *
173 * dma_inv_range(start, end)
174 *
175 * Invalidate (discard) the specified virtual address range.
176 * May not write back any entries. If 'start' or 'end'
177 * are not cache line aligned, those lines must be written
178 * back.
179 * - start - virtual start address
180 * - end - virtual end address
181 *
182 * dma_clean_range(start, end)
183 *
184 * Clean (write back) the specified virtual address range.
185 * - start - virtual start address
186 * - end - virtual end address
187 *
188 * dma_flush_range(start, end)
189 *
190 * Clean and invalidate the specified virtual address range.
191 * - start - virtual start address
192 * - end - virtual end address
193 */
194
195struct cpu_cache_fns {
196 void (*flush_kern_all)(void);
197 void (*flush_user_all)(void);
198 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
199
200 void (*coherent_kern_range)(unsigned long, unsigned long);
201 void (*coherent_user_range)(unsigned long, unsigned long);
202 void (*flush_kern_dcache_page)(void *);
203
7ae5a761
RK
204 void (*dma_inv_range)(const void *, const void *);
205 void (*dma_clean_range)(const void *, const void *);
206 void (*dma_flush_range)(const void *, const void *);
1da177e4
LT
207};
208
953233dc
CM
209struct outer_cache_fns {
210 void (*inv_range)(unsigned long, unsigned long);
211 void (*clean_range)(unsigned long, unsigned long);
212 void (*flush_range)(unsigned long, unsigned long);
213};
214
1da177e4
LT
215/*
216 * Select the calling method
217 */
218#ifdef MULTI_CACHE
219
220extern struct cpu_cache_fns cpu_cache;
221
222#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
223#define __cpuc_flush_user_all cpu_cache.flush_user_all
224#define __cpuc_flush_user_range cpu_cache.flush_user_range
225#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
226#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
227#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
228
229/*
230 * These are private to the dma-mapping API. Do not use directly.
231 * Their sole purpose is to ensure that data held in the cache
232 * is visible to DMA, or data written by DMA to system memory is
233 * visible to the CPU.
234 */
235#define dmac_inv_range cpu_cache.dma_inv_range
236#define dmac_clean_range cpu_cache.dma_clean_range
237#define dmac_flush_range cpu_cache.dma_flush_range
238
239#else
240
241#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
242#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
243#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
244#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
245#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
246#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
247
248extern void __cpuc_flush_kern_all(void);
249extern void __cpuc_flush_user_all(void);
250extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
251extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
252extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
253extern void __cpuc_flush_dcache_page(void *);
254
255/*
256 * These are private to the dma-mapping API. Do not use directly.
257 * Their sole purpose is to ensure that data held in the cache
258 * is visible to DMA, or data written by DMA to system memory is
259 * visible to the CPU.
260 */
261#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
262#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
263#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
264
7ae5a761
RK
265extern void dmac_inv_range(const void *, const void *);
266extern void dmac_clean_range(const void *, const void *);
267extern void dmac_flush_range(const void *, const void *);
1da177e4
LT
268
269#endif
270
953233dc
CM
271#ifdef CONFIG_OUTER_CACHE
272
273extern struct outer_cache_fns outer_cache;
274
275static inline void outer_inv_range(unsigned long start, unsigned long end)
276{
277 if (outer_cache.inv_range)
278 outer_cache.inv_range(start, end);
279}
280static inline void outer_clean_range(unsigned long start, unsigned long end)
281{
282 if (outer_cache.clean_range)
283 outer_cache.clean_range(start, end);
284}
285static inline void outer_flush_range(unsigned long start, unsigned long end)
286{
287 if (outer_cache.flush_range)
288 outer_cache.flush_range(start, end);
289}
290
291#else
292
293static inline void outer_inv_range(unsigned long start, unsigned long end)
294{ }
295static inline void outer_clean_range(unsigned long start, unsigned long end)
296{ }
297static inline void outer_flush_range(unsigned long start, unsigned long end)
298{ }
299
300#endif
301
1da177e4
LT
302/*
303 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
304 * vmalloc, ioremap etc) in kernel space for pages. Since the
305 * direct-mappings of these pages may contain cached data, we need
306 * to do a full cache flush to ensure that writebacks don't corrupt
307 * data placed into these pages via the new mappings.
308 */
309#define flush_cache_vmap(start, end) flush_cache_all()
310#define flush_cache_vunmap(start, end) flush_cache_all()
311
312/*
313 * Copy user data from/to a page which is mapped into a different
314 * processes address space. Really, we want to allow our "user
315 * space" model to handle this.
316 */
317#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
318 do { \
1da177e4 319 memcpy(dst, src, len); \
a188ad2b 320 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
1da177e4
LT
321 } while (0)
322
323#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
324 do { \
1da177e4
LT
325 memcpy(dst, src, len); \
326 } while (0)
327
328/*
329 * Convert calls to our calling convention.
330 */
331#define flush_cache_all() __cpuc_flush_kern_all()
d7b6b358 332#ifndef CONFIG_CPU_CACHE_VIPT
1da177e4
LT
333static inline void flush_cache_mm(struct mm_struct *mm)
334{
335 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
336 __cpuc_flush_user_all();
337}
338
339static inline void
340flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
341{
342 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
343 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
344 vma->vm_flags);
345}
346
347static inline void
348flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
349{
350 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
351 unsigned long addr = user_addr & PAGE_MASK;
352 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
353 }
354}
a188ad2b
GD
355
356static inline void
357flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
358 unsigned long uaddr, void *kaddr,
359 unsigned long len, int write)
360{
361 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
362 unsigned long addr = (unsigned long)kaddr;
363 __cpuc_coherent_kern_range(addr, addr + len);
364 }
365}
d7b6b358
RK
366#else
367extern void flush_cache_mm(struct mm_struct *mm);
368extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
369extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
a188ad2b
GD
370extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
371 unsigned long uaddr, void *kaddr,
372 unsigned long len, int write);
d7b6b358 373#endif
1da177e4 374
ec8c0446
RB
375#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
376
1da177e4
LT
377/*
378 * flush_cache_user_range is used when we want to ensure that the
379 * Harvard caches are synchronised for the user space address range.
380 * This is used for the ARM private sys_cacheflush system call.
381 */
382#define flush_cache_user_range(vma,start,end) \
383 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
384
385/*
386 * Perform necessary cache operations to ensure that data previously
387 * stored within this range of addresses can be executed by the CPU.
388 */
389#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
390
391/*
392 * Perform necessary cache operations to ensure that the TLB will
393 * see data written in the specified area.
394 */
395#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
396
397/*
398 * flush_dcache_page is used when the kernel has written to the page
399 * cache page at virtual address page->virtual.
400 *
401 * If this page isn't mapped (ie, page_mapping == NULL), or it might
402 * have userspace mappings, then we _must_ always clean + invalidate
403 * the dcache entries associated with the kernel mapping.
404 *
405 * Otherwise we can defer the operation, and clean the cache when we are
406 * about to change to user space. This is the same method as used on SPARC64.
407 * See update_mmu_cache for the user space part.
408 */
409extern void flush_dcache_page(struct page *);
410
1c9d3df5
RP
411extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
412
6020dff0
RK
413#define ARCH_HAS_FLUSH_ANON_PAGE
414static inline void flush_anon_page(struct vm_area_struct *vma,
415 struct page *page, unsigned long vmaddr)
416{
417 extern void __flush_anon_page(struct vm_area_struct *vma,
418 struct page *, unsigned long);
419 if (PageAnon(page))
420 __flush_anon_page(vma, page, vmaddr);
421}
422
1da177e4
LT
423#define flush_dcache_mmap_lock(mapping) \
424 write_lock_irq(&(mapping)->tree_lock)
425#define flush_dcache_mmap_unlock(mapping) \
426 write_unlock_irq(&(mapping)->tree_lock)
427
428#define flush_icache_user_range(vma,page,addr,len) \
429 flush_dcache_page(page)
430
431/*
432 * We don't appear to need to do anything here. In fact, if we did, we'd
433 * duplicate cache flushing elsewhere performed by flush_dcache_page().
434 */
435#define flush_icache_page(vma,page) do { } while (0)
436
90833fda
JH
437static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
438 unsigned offset, size_t size)
439{
440 const void *start = (void __force *)virt + offset;
441 dmac_inv_range(start, start + size);
442}
443
aaf83acb
CM
444#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
445#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29))
446
447#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25))
448#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25))
449#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
450#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
451
452#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
453#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
454#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
455#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
065cf519 456#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
1da177e4
LT
457
458#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
459
460#define cache_is_vivt() 1
461#define cache_is_vipt() 0
462#define cache_is_vipt_nonaliasing() 0
463#define cache_is_vipt_aliasing() 0
065cf519 464#define icache_is_vivt_asid_tagged() 0
1da177e4
LT
465
466#elif defined(CONFIG_CPU_CACHE_VIPT)
467
468#define cache_is_vivt() 0
469#define cache_is_vipt() 1
470#define cache_is_vipt_nonaliasing() \
471 ({ \
472 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
473 __cacheid_vipt_nonaliasing(__val); \
474 })
475
476#define cache_is_vipt_aliasing() \
477 ({ \
478 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
479 __cacheid_vipt_aliasing(__val); \
480 })
481
065cf519
CM
482#define icache_is_vivt_asid_tagged() \
483 ({ \
484 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
485 __cacheid_vivt_asid_tagged_instr(__val); \
486 })
487
1da177e4
LT
488#else
489
490#define cache_is_vivt() \
491 ({ \
492 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
493 (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
494 })
495
496#define cache_is_vipt() \
497 ({ \
498 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
499 __cacheid_present(__val) && __cacheid_vipt(__val); \
500 })
501
502#define cache_is_vipt_nonaliasing() \
503 ({ \
504 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
505 __cacheid_present(__val) && \
506 __cacheid_vipt_nonaliasing(__val); \
507 })
508
509#define cache_is_vipt_aliasing() \
510 ({ \
511 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
512 __cacheid_present(__val) && \
513 __cacheid_vipt_aliasing(__val); \
514 })
515
065cf519
CM
516#define icache_is_vivt_asid_tagged() \
517 ({ \
518 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
519 __cacheid_present(__val) && \
520 __cacheid_vivt_asid_tagged_instr(__val); \
521 })
522
1da177e4
LT
523#endif
524
525#endif
This page took 0.323235 seconds and 5 git commands to generate.