Commit | Line | Data |
---|---|---|
95d6976d VG |
1 | /* |
2 | * ARC700 VIPT Cache Management | |
3 | * | |
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs | |
11 | * -flush_cache_dup_mm (fork) | |
12 | * -likewise for flush_cache_mm (exit/execve) | |
13 | * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break) | |
14 | * | |
15 | * vineetg: Apr 2011 | |
16 | * -Now that MMU can support larger pg sz (16K), the determiniation of | |
17 | * aliasing shd not be based on assumption of 8k pg | |
18 | * | |
19 | * vineetg: Mar 2011 | |
20 | * -optimised version of flush_icache_range( ) for making I/D coherent | |
21 | * when vaddr is available (agnostic of num of aliases) | |
22 | * | |
23 | * vineetg: Mar 2011 | |
24 | * -Added documentation about I-cache aliasing on ARC700 and the way it | |
25 | * was handled up until MMU V2. | |
26 | * -Spotted a three year old bug when killing the 4 aliases, which needs | |
27 | * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03} | |
28 | * instead of paddr | {0x00, 0x01, 0x10, 0x11} | |
29 | * (Rajesh you owe me one now) | |
30 | * | |
31 | * vineetg: Dec 2010 | |
32 | * -Off-by-one error when computing num_of_lines to flush | |
33 | * This broke signal handling with bionic which uses synthetic sigret stub | |
34 | * | |
35 | * vineetg: Mar 2010 | |
36 | * -GCC can't generate ZOL for core cache flush loops. | |
37 | * Conv them into iterations based as opposed to while (start < end) types | |
38 | * | |
39 | * Vineetg: July 2009 | |
40 | * -In I-cache flush routine we used to chk for aliasing for every line INV. | |
41 | * Instead now we setup routines per cache geometry and invoke them | |
42 | * via function pointers. | |
43 | * | |
44 | * Vineetg: Jan 2009 | |
45 | * -Cache Line flush routines used to flush an extra line beyond end addr | |
46 | * because check was while (end >= start) instead of (end > start) | |
47 | * =Some call sites had to work around by doing -1, -4 etc to end param | |
48 | * =Some callers didnt care. This was spec bad in case of INV routines | |
49 | * which would discard valid data (cause of the horrible ext2 bug | |
50 | * in ARC IDE driver) | |
51 | * | |
52 | * vineetg: June 11th 2008: Fixed flush_icache_range( ) | |
53 | * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need | |
54 | * to be flushed, which it was not doing. | |
55 | * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API, | |
56 | * however ARC cache maintenance OPs require PHY addr. Thus need to do | |
57 | * vmalloc_to_phy. | |
58 | * -Also added optimisation there, that for range > PAGE SIZE we flush the | |
59 | * entire cache in one shot rather than line by line. For e.g. a module | |
60 | * with Code sz 600k, old code flushed 600k worth of cache (line-by-line), | |
61 | * while cache is only 16 or 32k. | |
62 | */ | |
63 | ||
64 | #include <linux/module.h> | |
65 | #include <linux/mm.h> | |
66 | #include <linux/sched.h> | |
67 | #include <linux/cache.h> | |
68 | #include <linux/mmu_context.h> | |
69 | #include <linux/syscalls.h> | |
70 | #include <linux/uaccess.h> | |
4102b533 | 71 | #include <linux/pagemap.h> |
95d6976d VG |
72 | #include <asm/cacheflush.h> |
73 | #include <asm/cachectl.h> | |
74 | #include <asm/setup.h> | |
75 | ||
c3441edd | 76 | char *arc_cache_mumbojumbo(int c, char *buf, int len) |
af617428 VG |
77 | { |
78 | int n = 0; | |
af617428 | 79 | |
da40ff48 | 80 | #define PR_CACHE(p, cfg, str) \ |
af617428 VG |
81 | if (!(p)->ver) \ |
82 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ | |
83 | else \ | |
84 | n += scnprintf(buf + n, len - n, \ | |
da40ff48 VG |
85 | str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ |
86 | (p)->sz_k, (p)->assoc, (p)->line_len, \ | |
87 | (p)->vipt ? "VIPT" : "PIPT", \ | |
88 | (p)->alias ? " aliasing" : "", \ | |
89 | IS_ENABLED(cfg) ? "" : " (not used)"); | |
af617428 | 90 | |
da40ff48 VG |
91 | PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); |
92 | PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); | |
af617428 VG |
93 | |
94 | return buf; | |
95 | } | |
96 | ||
95d6976d VG |
97 | /* |
98 | * Read the Cache Build Confuration Registers, Decode them and save into | |
99 | * the cpuinfo structure for later use. | |
100 | * No Validation done here, simply read/convert the BCRs | |
101 | */ | |
ce759956 | 102 | void read_decode_cache_bcr(void) |
95d6976d | 103 | { |
95d6976d VG |
104 | struct cpuinfo_arc_cache *p_ic, *p_dc; |
105 | unsigned int cpu = smp_processor_id(); | |
da1677b0 VG |
106 | struct bcr_cache { |
107 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
108 | unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; | |
109 | #else | |
110 | unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; | |
111 | #endif | |
112 | } ibcr, dbcr; | |
95d6976d VG |
113 | |
114 | p_ic = &cpuinfo_arc700[cpu].icache; | |
115 | READ_BCR(ARC_REG_IC_BCR, ibcr); | |
116 | ||
da40ff48 VG |
117 | if (!ibcr.ver) |
118 | goto dc_chk; | |
119 | ||
30499186 VG |
120 | BUG_ON(ibcr.config != 3); |
121 | p_ic->assoc = 2; /* Fixed to 2w set assoc */ | |
95d6976d | 122 | p_ic->line_len = 8 << ibcr.line_len; |
da40ff48 | 123 | p_ic->sz_k = 1 << (ibcr.sz - 1); |
95d6976d | 124 | p_ic->ver = ibcr.ver; |
da40ff48 VG |
125 | p_ic->vipt = 1; |
126 | p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; | |
95d6976d | 127 | |
da40ff48 | 128 | dc_chk: |
95d6976d VG |
129 | p_dc = &cpuinfo_arc700[cpu].dcache; |
130 | READ_BCR(ARC_REG_DC_BCR, dbcr); | |
131 | ||
da40ff48 VG |
132 | if (!dbcr.ver) |
133 | return; | |
134 | ||
30499186 VG |
135 | BUG_ON(dbcr.config != 2); |
136 | p_dc->assoc = 4; /* Fixed to 4w set assoc */ | |
95d6976d | 137 | p_dc->line_len = 16 << dbcr.line_len; |
da40ff48 | 138 | p_dc->sz_k = 1 << (dbcr.sz - 1); |
95d6976d | 139 | p_dc->ver = dbcr.ver; |
da40ff48 VG |
140 | p_dc->vipt = 1; |
141 | p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; | |
95d6976d VG |
142 | } |
143 | ||
144 | /* | |
145 | * 1. Validate the Cache Geomtery (compile time config matches hardware) | |
146 | * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn) | |
147 | * (aliasing D-cache configurations are not supported YET) | |
148 | * 3. Enable the Caches, setup default flush mode for D-Cache | |
149 | * 3. Calculate the SHMLBA used by user space | |
150 | */ | |
ce759956 | 151 | void arc_cache_init(void) |
95d6976d | 152 | { |
ef680cdc | 153 | unsigned int __maybe_unused cpu = smp_processor_id(); |
af617428 VG |
154 | char str[256]; |
155 | ||
156 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); | |
95d6976d | 157 | |
da40ff48 VG |
158 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { |
159 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; | |
160 | ||
161 | if (!ic->ver) | |
162 | panic("cache support enabled but non-existent cache\n"); | |
163 | ||
ef680cdc VG |
164 | if (ic->line_len != L1_CACHE_BYTES) |
165 | panic("ICache line [%d] != kernel Config [%d]", | |
166 | ic->line_len, L1_CACHE_BYTES); | |
167 | ||
168 | if (ic->ver != CONFIG_ARC_MMU_VER) | |
169 | panic("Cache ver [%d] doesn't match MMU ver [%d]\n", | |
170 | ic->ver, CONFIG_ARC_MMU_VER); | |
171 | } | |
95d6976d | 172 | |
da40ff48 VG |
173 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { |
174 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; | |
175 | int handled; | |
176 | ||
177 | if (!dc->ver) | |
178 | panic("cache support enabled but non-existent cache\n"); | |
ef680cdc VG |
179 | |
180 | if (dc->line_len != L1_CACHE_BYTES) | |
181 | panic("DCache line [%d] != kernel Config [%d]", | |
182 | dc->line_len, L1_CACHE_BYTES); | |
183 | ||
184 | /* check for D-Cache aliasing */ | |
da40ff48 | 185 | handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); |
ef680cdc | 186 | |
da40ff48 | 187 | if (dc->alias && !handled) |
ef680cdc | 188 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
da40ff48 | 189 | else if (!dc->alias && handled) |
ef680cdc VG |
190 | panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
191 | } | |
95d6976d VG |
192 | } |
193 | ||
194 | #define OP_INV 0x1 | |
195 | #define OP_FLUSH 0x2 | |
196 | #define OP_FLUSH_N_INV 0x3 | |
bd12976c VG |
197 | #define OP_INV_IC 0x4 |
198 | ||
199 | /* | |
200 | * Common Helper for Line Operations on {I,D}-Cache | |
201 | */ | |
202 | static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, | |
203 | unsigned long sz, const int cacheop) | |
204 | { | |
205 | unsigned int aux_cmd, aux_tag; | |
206 | int num_lines; | |
d4599baf | 207 | const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
bd12976c VG |
208 | |
209 | if (cacheop == OP_INV_IC) { | |
210 | aux_cmd = ARC_REG_IC_IVIL; | |
d7538636 | 211 | #if (CONFIG_ARC_MMU_VER > 2) |
bd12976c | 212 | aux_tag = ARC_REG_IC_PTAG; |
d7538636 | 213 | #endif |
bd12976c VG |
214 | } |
215 | else { | |
216 | /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ | |
217 | aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; | |
d7538636 | 218 | #if (CONFIG_ARC_MMU_VER > 2) |
bd12976c | 219 | aux_tag = ARC_REG_DC_PTAG; |
d7538636 | 220 | #endif |
bd12976c VG |
221 | } |
222 | ||
223 | /* Ensure we properly floor/ceil the non-line aligned/sized requests | |
224 | * and have @paddr - aligned to cache line and integral @num_lines. | |
225 | * This however can be avoided for page sized since: | |
226 | * -@paddr will be cache-line aligned already (being page aligned) | |
227 | * -@sz will be integral multiple of line size (being page sized). | |
228 | */ | |
d4599baf | 229 | if (!full_page_op) { |
bd12976c VG |
230 | sz += paddr & ~CACHE_LINE_MASK; |
231 | paddr &= CACHE_LINE_MASK; | |
232 | vaddr &= CACHE_LINE_MASK; | |
233 | } | |
234 | ||
235 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); | |
236 | ||
237 | #if (CONFIG_ARC_MMU_VER <= 2) | |
238 | /* MMUv2 and before: paddr contains stuffed vaddrs bits */ | |
239 | paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; | |
d4599baf VG |
240 | #else |
241 | /* if V-P const for loop, PTAG can be written once outside loop */ | |
242 | if (full_page_op) | |
b053940d | 243 | write_aux_reg(aux_tag, paddr); |
bd12976c VG |
244 | #endif |
245 | ||
246 | while (num_lines-- > 0) { | |
247 | #if (CONFIG_ARC_MMU_VER > 2) | |
248 | /* MMUv3, cache ops require paddr seperately */ | |
d4599baf VG |
249 | if (!full_page_op) { |
250 | write_aux_reg(aux_tag, paddr); | |
251 | paddr += L1_CACHE_BYTES; | |
252 | } | |
bd12976c VG |
253 | |
254 | write_aux_reg(aux_cmd, vaddr); | |
255 | vaddr += L1_CACHE_BYTES; | |
256 | #else | |
b053940d | 257 | write_aux_reg(aux_cmd, paddr); |
bd12976c | 258 | paddr += L1_CACHE_BYTES; |
d4599baf | 259 | #endif |
bd12976c VG |
260 | } |
261 | } | |
95d6976d VG |
262 | |
263 | #ifdef CONFIG_ARC_HAS_DCACHE | |
264 | ||
265 | /*************************************************************** | |
266 | * Machine specific helpers for Entire D-Cache or Per Line ops | |
267 | */ | |
268 | ||
1b1a22b1 | 269 | static unsigned int __before_dc_op(const int op) |
95d6976d | 270 | { |
1b1a22b1 VG |
271 | unsigned int reg = reg; |
272 | ||
273 | if (op == OP_FLUSH_N_INV) { | |
274 | /* Dcache provides 2 cmd: FLUSH or INV | |
275 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE | |
276 | * flush-n-inv is achieved by INV cmd but with IM=1 | |
277 | * So toggle INV sub-mode depending on op request and default | |
278 | */ | |
279 | reg = read_aux_reg(ARC_REG_DC_CTRL); | |
280 | write_aux_reg(ARC_REG_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH) | |
281 | ; | |
282 | } | |
283 | ||
284 | return reg; | |
285 | } | |
286 | ||
287 | static void __after_dc_op(const int op, unsigned int reg) | |
288 | { | |
289 | if (op & OP_FLUSH) /* flush / flush-n-inv both wait */ | |
290 | while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS); | |
291 | ||
292 | /* Switch back to default Invalidate mode */ | |
293 | if (op == OP_FLUSH_N_INV) | |
294 | write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH); | |
95d6976d VG |
295 | } |
296 | ||
297 | /* | |
298 | * Operation on Entire D-Cache | |
299 | * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} | |
300 | * Note that constant propagation ensures all the checks are gone | |
301 | * in generated code | |
302 | */ | |
303 | static inline void __dc_entire_op(const int cacheop) | |
304 | { | |
1b1a22b1 | 305 | unsigned int ctrl_reg; |
95d6976d VG |
306 | int aux; |
307 | ||
1b1a22b1 | 308 | ctrl_reg = __before_dc_op(cacheop); |
95d6976d VG |
309 | |
310 | if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */ | |
311 | aux = ARC_REG_DC_IVDC; | |
312 | else | |
313 | aux = ARC_REG_DC_FLSH; | |
314 | ||
315 | write_aux_reg(aux, 0x1); | |
316 | ||
1b1a22b1 | 317 | __after_dc_op(cacheop, ctrl_reg); |
95d6976d VG |
318 | } |
319 | ||
4102b533 | 320 | /* For kernel mappings cache operation: index is same as paddr */ |
6ec18a81 VG |
321 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
322 | ||
95d6976d VG |
323 | /* |
324 | * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) | |
325 | */ | |
6ec18a81 VG |
326 | static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, |
327 | unsigned long sz, const int cacheop) | |
95d6976d | 328 | { |
1b1a22b1 VG |
329 | unsigned long flags; |
330 | unsigned int ctrl_reg; | |
95d6976d VG |
331 | |
332 | local_irq_save(flags); | |
333 | ||
1b1a22b1 | 334 | ctrl_reg = __before_dc_op(cacheop); |
95d6976d | 335 | |
bd12976c | 336 | __cache_line_loop(paddr, vaddr, sz, cacheop); |
95d6976d | 337 | |
1b1a22b1 | 338 | __after_dc_op(cacheop, ctrl_reg); |
95d6976d VG |
339 | |
340 | local_irq_restore(flags); | |
341 | } | |
342 | ||
343 | #else | |
344 | ||
345 | #define __dc_entire_op(cacheop) | |
6ec18a81 VG |
346 | #define __dc_line_op(paddr, vaddr, sz, cacheop) |
347 | #define __dc_line_op_k(paddr, sz, cacheop) | |
95d6976d VG |
348 | |
349 | #endif /* CONFIG_ARC_HAS_DCACHE */ | |
350 | ||
351 | ||
352 | #ifdef CONFIG_ARC_HAS_ICACHE | |
353 | ||
354 | /* | |
355 | * I-Cache Aliasing in ARC700 VIPT caches | |
356 | * | |
7f250a0f VG |
357 | * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. |
358 | * The orig Cache Management Module "CDU" only required paddr to invalidate a | |
359 | * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. | |
360 | * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching | |
361 | * the exact same line. | |
95d6976d | 362 | * |
7f250a0f VG |
363 | * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, |
364 | * paddr alone could not be used to correctly index the cache. | |
95d6976d VG |
365 | * |
366 | * ------------------ | |
367 | * MMU v1/v2 (Fixed Page Size 8k) | |
368 | * ------------------ | |
369 | * The solution was to provide CDU with these additonal vaddr bits. These | |
7f250a0f VG |
370 | * would be bits [x:13], x would depend on cache-geometry, 13 comes from |
371 | * standard page size of 8k. | |
95d6976d VG |
372 | * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits |
373 | * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the | |
374 | * orig 5 bits of paddr were anyways ignored by CDU line ops, as they | |
375 | * represent the offset within cache-line. The adv of using this "clumsy" | |
7f250a0f VG |
376 | * interface for additional info was no new reg was needed in CDU programming |
377 | * model. | |
95d6976d VG |
378 | * |
379 | * 17:13 represented the max num of bits passable, actual bits needed were | |
380 | * fewer, based on the num-of-aliases possible. | |
381 | * -for 2 alias possibility, only bit 13 needed (32K cache) | |
382 | * -for 4 alias possibility, bits 14:13 needed (64K cache) | |
383 | * | |
95d6976d VG |
384 | * ------------------ |
385 | * MMU v3 | |
386 | * ------------------ | |
7f250a0f VG |
387 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will |
388 | * only support 8k (default), 16k and 4k. | |
95d6976d VG |
389 | * However from hardware perspective, smaller page sizes aggrevate aliasing |
390 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; | |
391 | * the existing scheme of piggybacking won't work for certain configurations. | |
392 | * Two new registers IC_PTAG and DC_PTAG inttoduced. | |
393 | * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs | |
394 | */ | |
395 | ||
396 | /*********************************************************** | |
7f250a0f | 397 | * Machine specific helper for per line I-Cache invalidate. |
95d6976d | 398 | */ |
af5abf1b VG |
399 | |
400 | static inline void __ic_entire_inv(void) | |
401 | { | |
402 | write_aux_reg(ARC_REG_IC_IVIC, 1); | |
403 | read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ | |
404 | } | |
405 | ||
406 | static inline void | |
407 | __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, | |
408 | unsigned long sz) | |
95d6976d VG |
409 | { |
410 | unsigned long flags; | |
95d6976d VG |
411 | |
412 | local_irq_save(flags); | |
bd12976c | 413 | __cache_line_loop(paddr, vaddr, sz, OP_INV_IC); |
95d6976d VG |
414 | local_irq_restore(flags); |
415 | } | |
416 | ||
af5abf1b VG |
417 | #ifndef CONFIG_SMP |
418 | ||
419 | #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) | |
420 | ||
421 | #else | |
336e199e | 422 | |
af5abf1b | 423 | struct ic_inv_args { |
2328af0c VG |
424 | unsigned long paddr, vaddr; |
425 | int sz; | |
426 | }; | |
427 | ||
428 | static void __ic_line_inv_vaddr_helper(void *info) | |
429 | { | |
af5abf1b VG |
430 | struct ic_inv *ic_inv_args = (struct ic_inv_args *) info; |
431 | ||
2328af0c VG |
432 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
433 | } | |
434 | ||
435 | static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, | |
436 | unsigned long sz) | |
437 | { | |
af5abf1b VG |
438 | struct ic_inv_args ic_inv = { |
439 | .paddr = paddr, | |
440 | .vaddr = vaddr, | |
441 | .sz = sz | |
442 | }; | |
443 | ||
2328af0c VG |
444 | on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); |
445 | } | |
af5abf1b VG |
446 | |
447 | #endif /* CONFIG_SMP */ | |
448 | ||
449 | #else /* !CONFIG_ARC_HAS_ICACHE */ | |
95d6976d | 450 | |
336e199e | 451 | #define __ic_entire_inv() |
95d6976d VG |
452 | #define __ic_line_inv_vaddr(pstart, vstart, sz) |
453 | ||
454 | #endif /* CONFIG_ARC_HAS_ICACHE */ | |
455 | ||
456 | ||
457 | /*********************************************************** | |
458 | * Exported APIs | |
459 | */ | |
460 | ||
4102b533 VG |
461 | /* |
462 | * Handle cache congruency of kernel and userspace mappings of page when kernel | |
463 | * writes-to/reads-from | |
464 | * | |
465 | * The idea is to defer flushing of kernel mapping after a WRITE, possible if: | |
466 | * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent | |
467 | * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) | |
468 | * -In SMP, if hardware caches are coherent | |
469 | * | |
470 | * There's a corollary case, where kernel READs from a userspace mapped page. | |
471 | * If the U-mapping is not congruent to to K-mapping, former needs flushing. | |
472 | */ | |
95d6976d VG |
473 | void flush_dcache_page(struct page *page) |
474 | { | |
4102b533 VG |
475 | struct address_space *mapping; |
476 | ||
477 | if (!cache_is_vipt_aliasing()) { | |
2ed21dae | 478 | clear_bit(PG_dc_clean, &page->flags); |
4102b533 VG |
479 | return; |
480 | } | |
481 | ||
482 | /* don't handle anon pages here */ | |
483 | mapping = page_mapping(page); | |
484 | if (!mapping) | |
485 | return; | |
486 | ||
487 | /* | |
488 | * pagecache page, file not yet mapped to userspace | |
489 | * Make a note that K-mapping is dirty | |
490 | */ | |
491 | if (!mapping_mapped(mapping)) { | |
2ed21dae | 492 | clear_bit(PG_dc_clean, &page->flags); |
4102b533 VG |
493 | } else if (page_mapped(page)) { |
494 | ||
495 | /* kernel reading from page with U-mapping */ | |
496 | void *paddr = page_address(page); | |
497 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; | |
498 | ||
499 | if (addr_not_cache_congruent(paddr, vaddr)) | |
500 | __flush_dcache_page(paddr, vaddr); | |
501 | } | |
95d6976d VG |
502 | } |
503 | EXPORT_SYMBOL(flush_dcache_page); | |
504 | ||
505 | ||
506 | void dma_cache_wback_inv(unsigned long start, unsigned long sz) | |
507 | { | |
6ec18a81 | 508 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
95d6976d VG |
509 | } |
510 | EXPORT_SYMBOL(dma_cache_wback_inv); | |
511 | ||
512 | void dma_cache_inv(unsigned long start, unsigned long sz) | |
513 | { | |
6ec18a81 | 514 | __dc_line_op_k(start, sz, OP_INV); |
95d6976d VG |
515 | } |
516 | EXPORT_SYMBOL(dma_cache_inv); | |
517 | ||
518 | void dma_cache_wback(unsigned long start, unsigned long sz) | |
519 | { | |
6ec18a81 | 520 | __dc_line_op_k(start, sz, OP_FLUSH); |
95d6976d VG |
521 | } |
522 | EXPORT_SYMBOL(dma_cache_wback); | |
523 | ||
524 | /* | |
7586bf72 VG |
525 | * This is API for making I/D Caches consistent when modifying |
526 | * kernel code (loadable modules, kprobes, kgdb...) | |
95d6976d VG |
527 | * This is called on insmod, with kernel virtual address for CODE of |
528 | * the module. ARC cache maintenance ops require PHY address thus we | |
529 | * need to convert vmalloc addr to PHY addr | |
530 | */ | |
531 | void flush_icache_range(unsigned long kstart, unsigned long kend) | |
532 | { | |
533 | unsigned int tot_sz, off, sz; | |
534 | unsigned long phy, pfn; | |
95d6976d VG |
535 | |
536 | /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */ | |
537 | ||
538 | /* This is not the right API for user virtual address */ | |
539 | if (kstart < TASK_SIZE) { | |
540 | BUG_ON("Flush icache range for user virtual addr space"); | |
541 | return; | |
542 | } | |
543 | ||
544 | /* Shortcut for bigger flush ranges. | |
545 | * Here we don't care if this was kernel virtual or phy addr | |
546 | */ | |
547 | tot_sz = kend - kstart; | |
548 | if (tot_sz > PAGE_SIZE) { | |
549 | flush_cache_all(); | |
550 | return; | |
551 | } | |
552 | ||
553 | /* Case: Kernel Phy addr (0x8000_0000 onwards) */ | |
554 | if (likely(kstart > PAGE_OFFSET)) { | |
7586bf72 VG |
555 | /* |
556 | * The 2nd arg despite being paddr will be used to index icache | |
557 | * This is OK since no alternate virtual mappings will exist | |
558 | * given the callers for this case: kprobe/kgdb in built-in | |
559 | * kernel code only. | |
560 | */ | |
94bad1af | 561 | __sync_icache_dcache(kstart, kstart, kend - kstart); |
95d6976d VG |
562 | return; |
563 | } | |
564 | ||
565 | /* | |
566 | * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) | |
567 | * (1) ARC Cache Maintenance ops only take Phy addr, hence special | |
568 | * handling of kernel vaddr. | |
569 | * | |
570 | * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), | |
571 | * it still needs to handle a 2 page scenario, where the range | |
572 | * straddles across 2 virtual pages and hence need for loop | |
573 | */ | |
574 | while (tot_sz > 0) { | |
575 | off = kstart % PAGE_SIZE; | |
576 | pfn = vmalloc_to_pfn((void *)kstart); | |
577 | phy = (pfn << PAGE_SHIFT) + off; | |
578 | sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); | |
94bad1af | 579 | __sync_icache_dcache(phy, kstart, sz); |
95d6976d VG |
580 | kstart += sz; |
581 | tot_sz -= sz; | |
582 | } | |
583 | } | |
584 | ||
585 | /* | |
94bad1af VG |
586 | * General purpose helper to make I and D cache lines consistent. |
587 | * @paddr is phy addr of region | |
4b06ff35 VG |
588 | * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) |
589 | * However in one instance, when called by kprobe (for a breakpt in | |
94bad1af VG |
590 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will |
591 | * use a paddr to index the cache (despite VIPT). This is fine since since a | |
4b06ff35 VG |
592 | * builtin kernel page will not have any virtual mappings. |
593 | * kprobe on loadable module will be kernel vaddr. | |
95d6976d | 594 | */ |
94bad1af | 595 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) |
95d6976d | 596 | { |
f538881c | 597 | __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); |
2328af0c | 598 | __ic_line_inv_vaddr(paddr, vaddr, len); |
95d6976d VG |
599 | } |
600 | ||
24603fdd VG |
601 | /* wrapper to compile time eliminate alignment checks in flush loop */ |
602 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr) | |
95d6976d | 603 | { |
24603fdd | 604 | __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); |
95d6976d VG |
605 | } |
606 | ||
6ec18a81 VG |
607 | /* |
608 | * wrapper to clearout kernel or userspace mappings of a page | |
609 | * For kernel mappings @vaddr == @paddr | |
610 | */ | |
de2a852c | 611 | void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr) |
eacd0e95 | 612 | { |
6ec18a81 | 613 | __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); |
eacd0e95 VG |
614 | } |
615 | ||
95d6976d VG |
616 | noinline void flush_cache_all(void) |
617 | { | |
618 | unsigned long flags; | |
619 | ||
620 | local_irq_save(flags); | |
621 | ||
336e199e | 622 | __ic_entire_inv(); |
95d6976d VG |
623 | __dc_entire_op(OP_FLUSH_N_INV); |
624 | ||
625 | local_irq_restore(flags); | |
626 | ||
627 | } | |
628 | ||
4102b533 VG |
629 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING |
630 | ||
631 | void flush_cache_mm(struct mm_struct *mm) | |
632 | { | |
633 | flush_cache_all(); | |
634 | } | |
635 | ||
636 | void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, | |
637 | unsigned long pfn) | |
638 | { | |
639 | unsigned int paddr = pfn << PAGE_SHIFT; | |
640 | ||
5971bc71 VG |
641 | u_vaddr &= PAGE_MASK; |
642 | ||
643 | ___flush_dcache_page(paddr, u_vaddr); | |
644 | ||
645 | if (vma->vm_flags & VM_EXEC) | |
646 | __inv_icache_page(paddr, u_vaddr); | |
4102b533 VG |
647 | } |
648 | ||
649 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |
650 | unsigned long end) | |
651 | { | |
652 | flush_cache_all(); | |
653 | } | |
654 | ||
7bb66f6e VG |
655 | void flush_anon_page(struct vm_area_struct *vma, struct page *page, |
656 | unsigned long u_vaddr) | |
657 | { | |
658 | /* TBD: do we really need to clear the kernel mapping */ | |
659 | __flush_dcache_page(page_address(page), u_vaddr); | |
660 | __flush_dcache_page(page_address(page), page_address(page)); | |
661 | ||
662 | } | |
663 | ||
664 | #endif | |
665 | ||
4102b533 VG |
666 | void copy_user_highpage(struct page *to, struct page *from, |
667 | unsigned long u_vaddr, struct vm_area_struct *vma) | |
668 | { | |
669 | void *kfrom = page_address(from); | |
670 | void *kto = page_address(to); | |
671 | int clean_src_k_mappings = 0; | |
672 | ||
673 | /* | |
674 | * If SRC page was already mapped in userspace AND it's U-mapping is | |
675 | * not congruent with K-mapping, sync former to physical page so that | |
676 | * K-mapping in memcpy below, sees the right data | |
677 | * | |
678 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is | |
679 | * equally valid for SRC page as well | |
680 | */ | |
681 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { | |
682 | __flush_dcache_page(kfrom, u_vaddr); | |
683 | clean_src_k_mappings = 1; | |
684 | } | |
685 | ||
686 | copy_page(kto, kfrom); | |
687 | ||
688 | /* | |
689 | * Mark DST page K-mapping as dirty for a later finalization by | |
690 | * update_mmu_cache(). Although the finalization could have been done | |
691 | * here as well (given that both vaddr/paddr are available). | |
692 | * But update_mmu_cache() already has code to do that for other | |
693 | * non copied user pages (e.g. read faults which wire in pagecache page | |
694 | * directly). | |
695 | */ | |
2ed21dae | 696 | clear_bit(PG_dc_clean, &to->flags); |
4102b533 VG |
697 | |
698 | /* | |
699 | * if SRC was already usermapped and non-congruent to kernel mapping | |
700 | * sync the kernel mapping back to physical page | |
701 | */ | |
702 | if (clean_src_k_mappings) { | |
703 | __flush_dcache_page(kfrom, kfrom); | |
2ed21dae | 704 | set_bit(PG_dc_clean, &from->flags); |
4102b533 | 705 | } else { |
2ed21dae | 706 | clear_bit(PG_dc_clean, &from->flags); |
4102b533 VG |
707 | } |
708 | } | |
709 | ||
710 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) | |
711 | { | |
712 | clear_page(to); | |
2ed21dae | 713 | clear_bit(PG_dc_clean, &page->flags); |
4102b533 VG |
714 | } |
715 | ||
4102b533 | 716 | |
95d6976d VG |
717 | /********************************************************************** |
718 | * Explicit Cache flush request from user space via syscall | |
719 | * Needed for JITs which generate code on the fly | |
720 | */ | |
721 | SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) | |
722 | { | |
723 | /* TBD: optimize this */ | |
724 | flush_cache_all(); | |
725 | return 0; | |
726 | } |