Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/cache-v4wt.S | |
3 | * | |
4 | * Copyright (C) 1997-2002 Russell king | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * ARMv4 write through cache operations support. | |
11 | * | |
12 | * We assume that the write buffer is not enabled. | |
13 | */ | |
14 | #include <linux/linkage.h> | |
15 | #include <linux/init.h> | |
6ebbf2ce | 16 | #include <asm/assembler.h> |
1da177e4 LT |
17 | #include <asm/page.h> |
18 | #include "proc-macros.S" | |
19 | ||
20 | /* | |
21 | * The size of one data cache line. | |
22 | */ | |
23 | #define CACHE_DLINESIZE 32 | |
24 | ||
25 | /* | |
26 | * The number of data cache segments. | |
27 | */ | |
28 | #define CACHE_DSEGMENTS 8 | |
29 | ||
30 | /* | |
31 | * The number of lines in a cache segment. | |
32 | */ | |
33 | #define CACHE_DENTRIES 64 | |
34 | ||
35 | /* | |
36 | * This is the size at which it becomes more efficient to | |
37 | * clean the whole cache, rather than using the individual | |
25985edc | 38 | * cache line maintenance instructions. |
1da177e4 LT |
39 | * |
40 | * *** This needs benchmarking | |
41 | */ | |
42 | #define CACHE_DLIMIT 16384 | |
43 | ||
c8c90860 MW |
44 | /* |
45 | * flush_icache_all() | |
46 | * | |
47 | * Unconditionally clean and invalidate the entire icache. | |
48 | */ | |
49 | ENTRY(v4wt_flush_icache_all) | |
50 | mov r0, #0 | |
51 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | |
6ebbf2ce | 52 | ret lr |
c8c90860 MW |
53 | ENDPROC(v4wt_flush_icache_all) |
54 | ||
1da177e4 LT |
55 | /* |
56 | * flush_user_cache_all() | |
57 | * | |
58 | * Invalidate all cache entries in a particular address | |
59 | * space. | |
60 | */ | |
61 | ENTRY(v4wt_flush_user_cache_all) | |
62 | /* FALLTHROUGH */ | |
63 | /* | |
64 | * flush_kern_cache_all() | |
65 | * | |
66 | * Clean and invalidate the entire cache. | |
67 | */ | |
68 | ENTRY(v4wt_flush_kern_cache_all) | |
69 | mov r2, #VM_EXEC | |
70 | mov ip, #0 | |
71 | __flush_whole_cache: | |
72 | tst r2, #VM_EXEC | |
73 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
74 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache | |
6ebbf2ce | 75 | ret lr |
1da177e4 LT |
76 | |
77 | /* | |
78 | * flush_user_cache_range(start, end, flags) | |
79 | * | |
80 | * Clean and invalidate a range of cache entries in the specified | |
81 | * address space. | |
82 | * | |
83 | * - start - start address (inclusive, page aligned) | |
84 | * - end - end address (exclusive, page aligned) | |
85 | * - flags - vma_area_struct flags describing address space | |
86 | */ | |
87 | ENTRY(v4wt_flush_user_cache_range) | |
88 | sub r3, r1, r0 @ calculate total size | |
89 | cmp r3, #CACHE_DLIMIT | |
90 | bhs __flush_whole_cache | |
91 | ||
92 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | |
93 | tst r2, #VM_EXEC | |
94 | mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry | |
95 | add r0, r0, #CACHE_DLINESIZE | |
96 | cmp r0, r1 | |
97 | blo 1b | |
6ebbf2ce | 98 | ret lr |
1da177e4 LT |
99 | |
100 | /* | |
101 | * coherent_kern_range(start, end) | |
102 | * | |
103 | * Ensure coherency between the Icache and the Dcache in the | |
104 | * region described by start. If you have non-snooping | |
105 | * Harvard caches, you need to implement this function. | |
106 | * | |
107 | * - start - virtual start address | |
108 | * - end - virtual end address | |
109 | */ | |
110 | ENTRY(v4wt_coherent_kern_range) | |
111 | /* FALLTRHOUGH */ | |
112 | ||
113 | /* | |
114 | * coherent_user_range(start, end) | |
115 | * | |
116 | * Ensure coherency between the Icache and the Dcache in the | |
117 | * region described by start. If you have non-snooping | |
118 | * Harvard caches, you need to implement this function. | |
119 | * | |
120 | * - start - virtual start address | |
121 | * - end - virtual end address | |
122 | */ | |
123 | ENTRY(v4wt_coherent_user_range) | |
124 | bic r0, r0, #CACHE_DLINESIZE - 1 | |
125 | 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry | |
126 | add r0, r0, #CACHE_DLINESIZE | |
127 | cmp r0, r1 | |
128 | blo 1b | |
c5102f59 | 129 | mov r0, #0 |
6ebbf2ce | 130 | ret lr |
1da177e4 LT |
131 | |
132 | /* | |
2c9b9c84 | 133 | * flush_kern_dcache_area(void *addr, size_t size) |
1da177e4 LT |
134 | * |
135 | * Ensure no D cache aliasing occurs, either with itself or | |
136 | * the I cache | |
137 | * | |
2c9b9c84 RK |
138 | * - addr - kernel address |
139 | * - size - region size | |
1da177e4 | 140 | */ |
2c9b9c84 | 141 | ENTRY(v4wt_flush_kern_dcache_area) |
1da177e4 LT |
142 | mov r2, #0 |
143 | mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache | |
2c9b9c84 | 144 | add r1, r0, r1 |
1da177e4 LT |
145 | /* fallthrough */ |
146 | ||
147 | /* | |
148 | * dma_inv_range(start, end) | |
149 | * | |
150 | * Invalidate (discard) the specified virtual address range. | |
151 | * May not write back any entries. If 'start' or 'end' | |
152 | * are not cache line aligned, those lines must be written | |
153 | * back. | |
154 | * | |
155 | * - start - virtual start address | |
156 | * - end - virtual end address | |
157 | */ | |
702b94bf | 158 | v4wt_dma_inv_range: |
1da177e4 LT |
159 | bic r0, r0, #CACHE_DLINESIZE - 1 |
160 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | |
161 | add r0, r0, #CACHE_DLINESIZE | |
162 | cmp r0, r1 | |
163 | blo 1b | |
6ebbf2ce | 164 | ret lr |
1da177e4 LT |
165 | |
166 | /* | |
167 | * dma_flush_range(start, end) | |
168 | * | |
169 | * Clean and invalidate the specified virtual address range. | |
170 | * | |
171 | * - start - virtual start address | |
172 | * - end - virtual end address | |
173 | */ | |
174 | .globl v4wt_dma_flush_range | |
175 | .equ v4wt_dma_flush_range, v4wt_dma_inv_range | |
176 | ||
a9c9147e RK |
177 | /* |
178 | * dma_unmap_area(start, size, dir) | |
179 | * - start - kernel virtual start address | |
180 | * - size - size of region | |
181 | * - dir - DMA direction | |
182 | */ | |
183 | ENTRY(v4wt_dma_unmap_area) | |
184 | add r1, r1, r0 | |
185 | teq r2, #DMA_TO_DEVICE | |
186 | bne v4wt_dma_inv_range | |
187 | /* FALLTHROUGH */ | |
188 | ||
189 | /* | |
190 | * dma_map_area(start, size, dir) | |
191 | * - start - kernel virtual start address | |
192 | * - size - size of region | |
193 | * - dir - DMA direction | |
194 | */ | |
195 | ENTRY(v4wt_dma_map_area) | |
6ebbf2ce | 196 | ret lr |
a9c9147e RK |
197 | ENDPROC(v4wt_dma_unmap_area) |
198 | ENDPROC(v4wt_dma_map_area) | |
199 | ||
031bd879 LP |
200 | .globl v4wt_flush_kern_cache_louis |
201 | .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all | |
202 | ||
1da177e4 LT |
203 | __INITDATA |
204 | ||
d5b5b2e2 DM |
205 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
206 | define_cache_functions v4wt |