ARM: l2c: move l2c save function to __l2c_init()
[deliverable/linux.git] / arch / arm / mm / cache-l2x0.c
CommitLineData
382266ad
CM
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
8c369264 19#include <linux/err.h>
382266ad 20#include <linux/init.h>
07620976 21#include <linux/spinlock.h>
fced80c7 22#include <linux/io.h>
8c369264
RH
23#include <linux/of.h>
24#include <linux/of_address.h>
382266ad
CM
25
26#include <asm/cacheflush.h>
382266ad 27#include <asm/hardware/cache-l2x0.h>
e68f31f4 28#include "cache-tauros3.h"
b8db6b88 29#include "cache-aurora-l2.h"
382266ad 30
c02642bc
RK
31struct l2c_init_data {
32 void (*of_parse)(const struct device_node *, u32 *, u32 *);
9846dfc9 33 void (*save)(void __iomem *);
c02642bc
RK
34 struct outer_cache_fns outer_cache;
35};
36
382266ad
CM
37#define CACHE_LINE_SIZE 32
38
39static void __iomem *l2x0_base;
bd31b859 40static DEFINE_RAW_SPINLOCK(l2x0_lock);
3e175ca4
RK
41static u32 l2x0_way_mask; /* Bitmask of active ways */
42static u32 l2x0_size;
f154fe9b 43static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
382266ad 44
91c2ebb9
BS
45struct l2x0_regs l2x0_saved_regs;
46
37abcdb9
RK
47/*
48 * Common code for all cache controllers.
49 */
83841fe1 50static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
382266ad 51{
9a6655e4 52 /* wait for cache operation by line or way to complete */
6775a558 53 while (readl_relaxed(reg) & mask)
1caf3092 54 cpu_relax();
382266ad
CM
55}
56
2b2a87a1
RK
57/*
58 * This should only be called when we have a requirement that the
59 * register be written due to a work-around, as platforms running
60 * in non-secure mode may not be able to access this register.
61 */
62static inline void l2c_set_debug(void __iomem *base, unsigned long val)
63{
64 outer_cache.set_debug(val);
65}
66
df5dd4c6
RK
67static void __l2c_op_way(void __iomem *reg)
68{
69 writel_relaxed(l2x0_way_mask, reg);
83841fe1 70 l2c_wait_mask(reg, l2x0_way_mask);
df5dd4c6
RK
71}
72
37abcdb9
RK
73static inline void l2c_unlock(void __iomem *base, unsigned num)
74{
75 unsigned i;
76
77 for (i = 0; i < num; i++) {
78 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
79 i * L2X0_LOCKDOWN_STRIDE);
80 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
81 i * L2X0_LOCKDOWN_STRIDE);
82 }
83}
84
9a6655e4
CM
85#ifdef CONFIG_CACHE_PL310
86static inline void cache_wait(void __iomem *reg, unsigned long mask)
87{
88 /* cache operations by line are atomic on PL310 */
89}
90#else
83841fe1 91#define cache_wait l2c_wait_mask
9a6655e4
CM
92#endif
93
382266ad
CM
94static inline void cache_sync(void)
95{
3d107434 96 void __iomem *base = l2x0_base;
885028e4 97
f154fe9b 98 writel_relaxed(0, base + sync_reg_offset);
3d107434 99 cache_wait(base + L2X0_CACHE_SYNC, 1);
382266ad
CM
100}
101
424d6b14
SS
102static inline void l2x0_clean_line(unsigned long addr)
103{
104 void __iomem *base = l2x0_base;
105 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 106 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
424d6b14
SS
107}
108
109static inline void l2x0_inv_line(unsigned long addr)
110{
111 void __iomem *base = l2x0_base;
112 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 113 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
424d6b14
SS
114}
115
2839e06c 116#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
ab4d5368
WD
117static inline void debug_writel(unsigned long val)
118{
119 if (outer_cache.set_debug)
2b2a87a1 120 l2c_set_debug(l2x0_base, val);
ab4d5368 121}
9e65582a 122
ab4d5368 123static void pl310_set_debug(unsigned long val)
2839e06c
SS
124{
125 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
9e65582a 126}
2839e06c
SS
127#else
128/* Optimised out for non-errata case */
129static inline void debug_writel(unsigned long val)
130{
131}
132
ab4d5368 133#define pl310_set_debug NULL
2839e06c 134#endif
9e65582a 135
2839e06c 136#ifdef CONFIG_PL310_ERRATA_588369
9e65582a
SS
137static inline void l2x0_flush_line(unsigned long addr)
138{
139 void __iomem *base = l2x0_base;
140
141 /* Clean by PA followed by Invalidate by PA */
142 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 143 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
9e65582a 144 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 145 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
9e65582a
SS
146}
147#else
148
424d6b14
SS
149static inline void l2x0_flush_line(unsigned long addr)
150{
151 void __iomem *base = l2x0_base;
152 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
6775a558 153 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
424d6b14 154}
9e65582a 155#endif
424d6b14 156
23107c54
CM
157static void l2x0_cache_sync(void)
158{
159 unsigned long flags;
160
bd31b859 161 raw_spin_lock_irqsave(&l2x0_lock, flags);
23107c54 162 cache_sync();
bd31b859 163 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
23107c54
CM
164}
165
38a8914f 166static void __l2x0_flush_all(void)
2fd86589 167{
2839e06c 168 debug_writel(0x03);
df5dd4c6 169 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
2fd86589 170 cache_sync();
2839e06c 171 debug_writel(0x00);
38a8914f
WD
172}
173
174static void l2x0_flush_all(void)
175{
176 unsigned long flags;
177
178 /* clean all ways */
bd31b859 179 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f 180 __l2x0_flush_all();
bd31b859 181 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
182}
183
444457c1
SS
184static void l2x0_clean_all(void)
185{
186 unsigned long flags;
187
188 /* clean all ways */
bd31b859 189 raw_spin_lock_irqsave(&l2x0_lock, flags);
df5dd4c6 190 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
444457c1 191 cache_sync();
bd31b859 192 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
444457c1
SS
193}
194
2fd86589 195static void l2x0_inv_all(void)
382266ad 196{
0eb948dd
RK
197 unsigned long flags;
198
382266ad 199 /* invalidate all ways */
bd31b859 200 raw_spin_lock_irqsave(&l2x0_lock, flags);
2fd86589 201 /* Invalidating when L2 is enabled is a nono */
b8db6b88 202 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
df5dd4c6 203 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
382266ad 204 cache_sync();
bd31b859 205 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
206}
207
208static void l2x0_inv_range(unsigned long start, unsigned long end)
209{
3d107434 210 void __iomem *base = l2x0_base;
0eb948dd 211 unsigned long flags;
382266ad 212
bd31b859 213 raw_spin_lock_irqsave(&l2x0_lock, flags);
4f6627ac
RS
214 if (start & (CACHE_LINE_SIZE - 1)) {
215 start &= ~(CACHE_LINE_SIZE - 1);
9e65582a 216 debug_writel(0x03);
424d6b14 217 l2x0_flush_line(start);
9e65582a 218 debug_writel(0x00);
4f6627ac
RS
219 start += CACHE_LINE_SIZE;
220 }
221
222 if (end & (CACHE_LINE_SIZE - 1)) {
223 end &= ~(CACHE_LINE_SIZE - 1);
9e65582a 224 debug_writel(0x03);
424d6b14 225 l2x0_flush_line(end);
9e65582a 226 debug_writel(0x00);
4f6627ac
RS
227 }
228
0eb948dd
RK
229 while (start < end) {
230 unsigned long blk_end = start + min(end - start, 4096UL);
231
232 while (start < blk_end) {
424d6b14 233 l2x0_inv_line(start);
0eb948dd
RK
234 start += CACHE_LINE_SIZE;
235 }
236
237 if (blk_end < end) {
bd31b859
TG
238 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
239 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
240 }
241 }
3d107434 242 cache_wait(base + L2X0_INV_LINE_PA, 1);
382266ad 243 cache_sync();
bd31b859 244 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
245}
246
247static void l2x0_clean_range(unsigned long start, unsigned long end)
248{
3d107434 249 void __iomem *base = l2x0_base;
0eb948dd 250 unsigned long flags;
382266ad 251
444457c1
SS
252 if ((end - start) >= l2x0_size) {
253 l2x0_clean_all();
254 return;
255 }
256
bd31b859 257 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 258 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
259 while (start < end) {
260 unsigned long blk_end = start + min(end - start, 4096UL);
261
262 while (start < blk_end) {
424d6b14 263 l2x0_clean_line(start);
0eb948dd
RK
264 start += CACHE_LINE_SIZE;
265 }
266
267 if (blk_end < end) {
bd31b859
TG
268 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
269 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
270 }
271 }
3d107434 272 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
382266ad 273 cache_sync();
bd31b859 274 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
275}
276
277static void l2x0_flush_range(unsigned long start, unsigned long end)
278{
3d107434 279 void __iomem *base = l2x0_base;
0eb948dd 280 unsigned long flags;
382266ad 281
444457c1
SS
282 if ((end - start) >= l2x0_size) {
283 l2x0_flush_all();
284 return;
285 }
286
bd31b859 287 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 288 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
289 while (start < end) {
290 unsigned long blk_end = start + min(end - start, 4096UL);
291
9e65582a 292 debug_writel(0x03);
0eb948dd 293 while (start < blk_end) {
424d6b14 294 l2x0_flush_line(start);
0eb948dd
RK
295 start += CACHE_LINE_SIZE;
296 }
9e65582a 297 debug_writel(0x00);
0eb948dd
RK
298
299 if (blk_end < end) {
bd31b859
TG
300 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
301 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
302 }
303 }
3d107434 304 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
382266ad 305 cache_sync();
bd31b859 306 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
307}
308
2fd86589
TG
309static void l2x0_disable(void)
310{
311 unsigned long flags;
312
bd31b859 313 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f
WD
314 __l2x0_flush_all();
315 writel_relaxed(0, l2x0_base + L2X0_CTRL);
9781aa8a 316 dsb(st);
bd31b859 317 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
318}
319
3e175ca4 320static void l2x0_unlock(u32 cache_id)
bac7e6ec
LW
321{
322 int lockregs;
bac7e6ec 323
6e7aceeb 324 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
b8db6b88 325 case L2X0_CACHE_ID_PART_L310:
bac7e6ec 326 lockregs = 8;
b8db6b88
GC
327 break;
328 case AURORA_CACHE_ID:
329 lockregs = 4;
330 break;
331 default:
bac7e6ec
LW
332 /* L210 and unknown types */
333 lockregs = 1;
b8db6b88
GC
334 break;
335 }
bac7e6ec 336
37abcdb9 337 l2c_unlock(l2x0_base, lockregs);
bac7e6ec
LW
338}
339
96054b0a
RK
340static const struct l2c_init_data l2x0_init_fns __initconst = {
341 .outer_cache = {
342 .inv_range = l2x0_inv_range,
343 .clean_range = l2x0_clean_range,
344 .flush_range = l2x0_flush_range,
345 .flush_all = l2x0_flush_all,
346 .disable = l2x0_disable,
347 .sync = l2x0_cache_sync,
348 },
349};
350
351static void __init __l2c_init(const struct l2c_init_data *data,
352 u32 aux_val, u32 aux_mask, u32 cache_id)
382266ad 353{
3e175ca4 354 u32 aux;
3e175ca4 355 u32 way_size = 0;
64039be8 356 int ways;
b8db6b88 357 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
64039be8 358 const char *type;
382266ad 359
c40e7eb6
RK
360 /*
361 * It is strange to save the register state before initialisation,
362 * but hey, this is what the DT implementations decided to do.
363 */
364 if (data->save)
365 data->save(l2x0_base);
366
6775a558 367 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
64039be8 368
4082cfa7
SH
369 aux &= aux_mask;
370 aux |= aux_val;
371
64039be8 372 /* Determine the number of ways */
6e7aceeb 373 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
64039be8
JM
374 case L2X0_CACHE_ID_PART_L310:
375 if (aux & (1 << 16))
376 ways = 16;
377 else
378 ways = 8;
379 type = "L310";
f154fe9b
WD
380#ifdef CONFIG_PL310_ERRATA_753970
381 /* Unmapped register. */
382 sync_reg_offset = L2X0_DUMMY_REG;
383#endif
64039be8
JM
384 break;
385 case L2X0_CACHE_ID_PART_L210:
386 ways = (aux >> 13) & 0xf;
387 type = "L210";
388 break;
b8db6b88
GC
389
390 case AURORA_CACHE_ID:
391 sync_reg_offset = AURORA_SYNC_REG;
392 ways = (aux >> 13) & 0xf;
393 ways = 2 << ((ways + 1) >> 2);
394 way_size_shift = AURORA_WAY_SIZE_SHIFT;
395 type = "Aurora";
396 break;
64039be8
JM
397 default:
398 /* Assume unknown chips have 8 ways */
399 ways = 8;
400 type = "L2x0 series";
401 break;
402 }
403
404 l2x0_way_mask = (1 << ways) - 1;
405
5ba70372
SS
406 /*
407 * L2 cache Size = Way size * Number of ways
408 */
409 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
b8db6b88
GC
410 way_size = 1 << (way_size + way_size_shift);
411
5ba70372
SS
412 l2x0_size = ways * way_size * SZ_1K;
413
48371cd3
SK
414 /*
415 * Check if l2x0 controller is already enabled.
416 * If you are booting from non-secure mode
417 * accessing the below registers will fault.
418 */
b8db6b88 419 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
bac7e6ec
LW
420 /* Make sure that I&D is not locked down when starting */
421 l2x0_unlock(cache_id);
382266ad 422
48371cd3 423 /* l2x0 controller is disabled */
6775a558 424 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
382266ad 425
48371cd3
SK
426 l2x0_inv_all();
427
428 /* enable L2X0 */
b8db6b88 429 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
48371cd3 430 }
382266ad 431
9d4876f0
YM
432 /* Re-read it in case some bits are reserved. */
433 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
434
435 /* Save the value for resuming. */
436 l2x0_saved_regs.aux_ctrl = aux;
437
96054b0a
RK
438 outer_cache = data->outer_cache;
439
440 if ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L310 &&
441 (cache_id & L2X0_CACHE_ID_RTL_MASK) <= L310_CACHE_ID_RTL_R3P0)
442 outer_cache.set_debug = pl310_set_debug;
382266ad 443
c477b8db
FE
444 pr_info("%s cache controller enabled\n", type);
445 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
446 ways, cache_id, aux, l2x0_size >> 10);
382266ad 447}
8c369264 448
96054b0a
RK
449void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
450{
451 u32 cache_id;
452
453 l2x0_base = base;
454
455 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
456
457 __l2c_init(&l2x0_init_fns, aux_val, aux_mask, cache_id);
458}
459
8c369264 460#ifdef CONFIG_OF
b8db6b88
GC
461static int l2_wt_override;
462
96054b0a
RK
463/* Aurora don't have the cache ID register available, so we have to
464 * pass it though the device tree */
465static u32 cache_id_part_number_from_dt;
466
b8db6b88
GC
467/*
468 * Note that the end addresses passed to Linux primitives are
469 * noninclusive, while the hardware cache range operations use
470 * inclusive start and end addresses.
471 */
472static unsigned long calc_range_end(unsigned long start, unsigned long end)
473{
474 /*
475 * Limit the number of cache lines processed at once,
476 * since cache range operations stall the CPU pipeline
477 * until completion.
478 */
479 if (end > start + MAX_RANGE_SIZE)
480 end = start + MAX_RANGE_SIZE;
481
482 /*
483 * Cache range operations can't straddle a page boundary.
484 */
485 if (end > PAGE_ALIGN(start+1))
486 end = PAGE_ALIGN(start+1);
487
488 return end;
489}
490
491/*
492 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
493 * and range operations only do a TLB lookup on the start address.
494 */
495static void aurora_pa_range(unsigned long start, unsigned long end,
496 unsigned long offset)
497{
498 unsigned long flags;
499
500 raw_spin_lock_irqsave(&l2x0_lock, flags);
8a3a180d
GC
501 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
502 writel_relaxed(end, l2x0_base + offset);
b8db6b88
GC
503 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
504
505 cache_sync();
506}
507
508static void aurora_inv_range(unsigned long start, unsigned long end)
509{
510 /*
511 * round start and end adresses up to cache line size
512 */
513 start &= ~(CACHE_LINE_SIZE - 1);
514 end = ALIGN(end, CACHE_LINE_SIZE);
515
516 /*
517 * Invalidate all full cache lines between 'start' and 'end'.
518 */
519 while (start < end) {
520 unsigned long range_end = calc_range_end(start, end);
521 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
522 AURORA_INVAL_RANGE_REG);
523 start = range_end;
524 }
525}
526
527static void aurora_clean_range(unsigned long start, unsigned long end)
528{
529 /*
530 * If L2 is forced to WT, the L2 will always be clean and we
531 * don't need to do anything here.
532 */
533 if (!l2_wt_override) {
534 start &= ~(CACHE_LINE_SIZE - 1);
535 end = ALIGN(end, CACHE_LINE_SIZE);
536 while (start != end) {
537 unsigned long range_end = calc_range_end(start, end);
538 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
539 AURORA_CLEAN_RANGE_REG);
540 start = range_end;
541 }
542 }
543}
544
545static void aurora_flush_range(unsigned long start, unsigned long end)
546{
8b827c60
GC
547 start &= ~(CACHE_LINE_SIZE - 1);
548 end = ALIGN(end, CACHE_LINE_SIZE);
549 while (start != end) {
550 unsigned long range_end = calc_range_end(start, end);
551 /*
552 * If L2 is forced to WT, the L2 will always be clean and we
553 * just need to invalidate.
554 */
555 if (l2_wt_override)
b8db6b88 556 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
8b827c60
GC
557 AURORA_INVAL_RANGE_REG);
558 else
559 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
560 AURORA_FLUSH_RANGE_REG);
561 start = range_end;
b8db6b88
GC
562 }
563}
564
3b656fed
CD
565/*
566 * For certain Broadcom SoCs, depending on the address range, different offsets
567 * need to be added to the address before passing it to L2 for
568 * invalidation/clean/flush
569 *
570 * Section Address Range Offset EMI
571 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
572 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
573 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
574 *
575 * When the start and end addresses have crossed two different sections, we
576 * need to break the L2 operation into two, each within its own section.
577 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
578 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
579 * 0xC0000000 - 0xC0001000
580 *
581 * Note 1:
582 * By breaking a single L2 operation into two, we may potentially suffer some
583 * performance hit, but keep in mind the cross section case is very rare
584 *
585 * Note 2:
586 * We do not need to handle the case when the start address is in
587 * Section 1 and the end address is in Section 3, since it is not a valid use
588 * case
589 *
590 * Note 3:
591 * Section 1 in practical terms can no longer be used on rev A2. Because of
592 * that the code does not need to handle section 1 at all.
593 *
594 */
595#define BCM_SYS_EMI_START_ADDR 0x40000000UL
596#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
597
598#define BCM_SYS_EMI_OFFSET 0x40000000UL
599#define BCM_VC_EMI_OFFSET 0x80000000UL
600
601static inline int bcm_addr_is_sys_emi(unsigned long addr)
602{
603 return (addr >= BCM_SYS_EMI_START_ADDR) &&
604 (addr < BCM_VC_EMI_SEC3_START_ADDR);
605}
606
607static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
608{
609 if (bcm_addr_is_sys_emi(addr))
610 return addr + BCM_SYS_EMI_OFFSET;
611 else
612 return addr + BCM_VC_EMI_OFFSET;
613}
614
615static void bcm_inv_range(unsigned long start, unsigned long end)
616{
617 unsigned long new_start, new_end;
618
619 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
620
621 if (unlikely(end <= start))
622 return;
623
624 new_start = bcm_l2_phys_addr(start);
625 new_end = bcm_l2_phys_addr(end);
626
627 /* normal case, no cross section between start and end */
628 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
629 l2x0_inv_range(new_start, new_end);
630 return;
631 }
632
633 /* They cross sections, so it can only be a cross from section
634 * 2 to section 3
635 */
636 l2x0_inv_range(new_start,
637 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
638 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
639 new_end);
640}
641
642static void bcm_clean_range(unsigned long start, unsigned long end)
643{
644 unsigned long new_start, new_end;
645
646 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
647
648 if (unlikely(end <= start))
649 return;
650
651 if ((end - start) >= l2x0_size) {
652 l2x0_clean_all();
653 return;
654 }
655
656 new_start = bcm_l2_phys_addr(start);
657 new_end = bcm_l2_phys_addr(end);
658
659 /* normal case, no cross section between start and end */
660 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
661 l2x0_clean_range(new_start, new_end);
662 return;
663 }
664
665 /* They cross sections, so it can only be a cross from section
666 * 2 to section 3
667 */
668 l2x0_clean_range(new_start,
669 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
670 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
671 new_end);
672}
673
674static void bcm_flush_range(unsigned long start, unsigned long end)
675{
676 unsigned long new_start, new_end;
677
678 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
679
680 if (unlikely(end <= start))
681 return;
682
683 if ((end - start) >= l2x0_size) {
684 l2x0_flush_all();
685 return;
686 }
687
688 new_start = bcm_l2_phys_addr(start);
689 new_end = bcm_l2_phys_addr(end);
690
691 /* normal case, no cross section between start and end */
692 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
693 l2x0_flush_range(new_start, new_end);
694 return;
695 }
696
697 /* They cross sections, so it can only be a cross from section
698 * 2 to section 3
699 */
700 l2x0_flush_range(new_start,
701 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
702 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
703 new_end);
704}
705
c02642bc 706static void __init l2x0_of_parse(const struct device_node *np,
3e175ca4 707 u32 *aux_val, u32 *aux_mask)
8c369264
RH
708{
709 u32 data[2] = { 0, 0 };
710 u32 tag = 0;
711 u32 dirty = 0;
712 u32 val = 0, mask = 0;
713
714 of_property_read_u32(np, "arm,tag-latency", &tag);
715 if (tag) {
716 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
717 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
718 }
719
720 of_property_read_u32_array(np, "arm,data-latency",
721 data, ARRAY_SIZE(data));
722 if (data[0] && data[1]) {
723 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
724 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
725 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
726 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
727 }
728
729 of_property_read_u32(np, "arm,dirty-latency", &dirty);
730 if (dirty) {
731 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
732 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
733 }
734
735 *aux_val &= ~mask;
736 *aux_val |= val;
737 *aux_mask &= ~mask;
738}
739
c02642bc 740static void __init pl310_of_parse(const struct device_node *np,
3e175ca4 741 u32 *aux_val, u32 *aux_mask)
8c369264
RH
742{
743 u32 data[3] = { 0, 0, 0 };
744 u32 tag[3] = { 0, 0, 0 };
745 u32 filter[2] = { 0, 0 };
746
747 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
748 if (tag[0] && tag[1] && tag[2])
749 writel_relaxed(
750 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
751 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
752 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
753 l2x0_base + L2X0_TAG_LATENCY_CTRL);
754
755 of_property_read_u32_array(np, "arm,data-latency",
756 data, ARRAY_SIZE(data));
757 if (data[0] && data[1] && data[2])
758 writel_relaxed(
759 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
760 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
761 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
762 l2x0_base + L2X0_DATA_LATENCY_CTRL);
763
764 of_property_read_u32_array(np, "arm,filter-ranges",
765 filter, ARRAY_SIZE(filter));
74d41f39 766 if (filter[1]) {
8c369264
RH
767 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
768 l2x0_base + L2X0_ADDR_FILTER_END);
769 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
770 l2x0_base + L2X0_ADDR_FILTER_START);
771 }
772}
773
9846dfc9 774static void __init pl310_save(void __iomem *base)
91c2ebb9 775{
9846dfc9 776 u32 l2x0_revision = readl_relaxed(base + L2X0_CACHE_ID) &
91c2ebb9
BS
777 L2X0_CACHE_ID_RTL_MASK;
778
9846dfc9 779 l2x0_saved_regs.tag_latency = readl_relaxed(base +
91c2ebb9 780 L2X0_TAG_LATENCY_CTRL);
9846dfc9 781 l2x0_saved_regs.data_latency = readl_relaxed(base +
91c2ebb9 782 L2X0_DATA_LATENCY_CTRL);
9846dfc9 783 l2x0_saved_regs.filter_end = readl_relaxed(base +
91c2ebb9 784 L2X0_ADDR_FILTER_END);
9846dfc9 785 l2x0_saved_regs.filter_start = readl_relaxed(base +
91c2ebb9
BS
786 L2X0_ADDR_FILTER_START);
787
14b882cf 788 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
91c2ebb9
BS
789 /*
790 * From r2p0, there is Prefetch offset/control register
791 */
9846dfc9 792 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
91c2ebb9
BS
793 L2X0_PREFETCH_CTRL);
794 /*
795 * From r3p0, there is Power control register
796 */
14b882cf 797 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
9846dfc9 798 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
91c2ebb9
BS
799 L2X0_POWER_CTRL);
800 }
801}
802
9846dfc9 803static void aurora_save(void __iomem *base)
b8db6b88 804{
9846dfc9
RK
805 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
806 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
b8db6b88
GC
807}
808
9846dfc9 809static void __init tauros3_save(void __iomem *base)
e68f31f4
SH
810{
811 l2x0_saved_regs.aux2_ctrl =
9846dfc9 812 readl_relaxed(base + TAUROS3_AUX2_CTRL);
e68f31f4 813 l2x0_saved_regs.prefetch_ctrl =
9846dfc9 814 readl_relaxed(base + L2X0_PREFETCH_CTRL);
e68f31f4
SH
815}
816
91c2ebb9
BS
817static void l2x0_resume(void)
818{
b8db6b88 819 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
91c2ebb9
BS
820 /* restore aux ctrl and enable l2 */
821 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
822
823 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
824 L2X0_AUX_CTRL);
825
826 l2x0_inv_all();
827
b8db6b88 828 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
91c2ebb9
BS
829 }
830}
831
832static void pl310_resume(void)
833{
834 u32 l2x0_revision;
835
b8db6b88 836 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
91c2ebb9
BS
837 /* restore pl310 setup */
838 writel_relaxed(l2x0_saved_regs.tag_latency,
839 l2x0_base + L2X0_TAG_LATENCY_CTRL);
840 writel_relaxed(l2x0_saved_regs.data_latency,
841 l2x0_base + L2X0_DATA_LATENCY_CTRL);
842 writel_relaxed(l2x0_saved_regs.filter_end,
843 l2x0_base + L2X0_ADDR_FILTER_END);
844 writel_relaxed(l2x0_saved_regs.filter_start,
845 l2x0_base + L2X0_ADDR_FILTER_START);
846
847 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
848 L2X0_CACHE_ID_RTL_MASK;
849
14b882cf 850 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
91c2ebb9
BS
851 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
852 l2x0_base + L2X0_PREFETCH_CTRL);
14b882cf 853 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
91c2ebb9
BS
854 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
855 l2x0_base + L2X0_POWER_CTRL);
856 }
857 }
858
859 l2x0_resume();
860}
861
b8db6b88
GC
862static void aurora_resume(void)
863{
864 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
8a3a180d
GC
865 writel_relaxed(l2x0_saved_regs.aux_ctrl,
866 l2x0_base + L2X0_AUX_CTRL);
867 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
b8db6b88
GC
868 }
869}
870
e68f31f4
SH
871static void tauros3_resume(void)
872{
873 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
874 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
875 l2x0_base + TAUROS3_AUX2_CTRL);
876 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
877 l2x0_base + L2X0_PREFETCH_CTRL);
878 }
879
880 l2x0_resume();
881}
882
b8db6b88
GC
883static void __init aurora_broadcast_l2_commands(void)
884{
885 __u32 u;
886 /* Enable Broadcasting of cache commands to L2*/
887 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
888 u |= AURORA_CTRL_FW; /* Set the FW bit */
889 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
890 isb();
891}
892
c02642bc 893static void __init aurora_of_parse(const struct device_node *np,
b8db6b88
GC
894 u32 *aux_val, u32 *aux_mask)
895{
896 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
897 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
898
899 of_property_read_u32(np, "cache-id-part",
900 &cache_id_part_number_from_dt);
901
902 /* Determine and save the write policy */
903 l2_wt_override = of_property_read_bool(np, "wt-override");
904
905 if (l2_wt_override) {
906 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
907 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
908 }
909
910 *aux_val &= ~mask;
911 *aux_val |= val;
912 *aux_mask &= ~mask;
913}
914
c02642bc
RK
915static const struct l2c_init_data of_pl310_data __initconst = {
916 .of_parse = pl310_of_parse,
6248d060
GC
917 .save = pl310_save,
918 .outer_cache = {
6248d060
GC
919 .inv_range = l2x0_inv_range,
920 .clean_range = l2x0_clean_range,
921 .flush_range = l2x0_flush_range,
6248d060 922 .flush_all = l2x0_flush_all,
6248d060 923 .disable = l2x0_disable,
ce841303
RK
924 .sync = l2x0_cache_sync,
925 .resume = pl310_resume,
6248d060 926 },
91c2ebb9
BS
927};
928
c02642bc
RK
929static const struct l2c_init_data of_l2x0_data __initconst = {
930 .of_parse = l2x0_of_parse,
6248d060 931 .outer_cache = {
6248d060
GC
932 .inv_range = l2x0_inv_range,
933 .clean_range = l2x0_clean_range,
934 .flush_range = l2x0_flush_range,
6248d060 935 .flush_all = l2x0_flush_all,
6248d060 936 .disable = l2x0_disable,
ce841303
RK
937 .sync = l2x0_cache_sync,
938 .resume = l2x0_resume,
6248d060 939 },
91c2ebb9
BS
940};
941
c02642bc
RK
942static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
943 .of_parse = aurora_of_parse,
b8db6b88
GC
944 .save = aurora_save,
945 .outer_cache = {
b8db6b88
GC
946 .inv_range = aurora_inv_range,
947 .clean_range = aurora_clean_range,
948 .flush_range = aurora_flush_range,
b8db6b88 949 .flush_all = l2x0_flush_all,
b8db6b88 950 .disable = l2x0_disable,
ce841303
RK
951 .sync = l2x0_cache_sync,
952 .resume = aurora_resume,
b8db6b88
GC
953 },
954};
955
c02642bc
RK
956static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
957 .of_parse = aurora_of_parse,
b8db6b88
GC
958 .save = aurora_save,
959 .outer_cache = {
960 .resume = aurora_resume,
961 },
962};
963
c02642bc 964static const struct l2c_init_data of_tauros3_data __initconst = {
e68f31f4
SH
965 .save = tauros3_save,
966 /* Tauros3 broadcasts L1 cache operations to L2 */
967 .outer_cache = {
968 .resume = tauros3_resume,
969 },
970};
971
c02642bc
RK
972static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
973 .of_parse = pl310_of_parse,
3b656fed
CD
974 .save = pl310_save,
975 .outer_cache = {
3b656fed
CD
976 .inv_range = bcm_inv_range,
977 .clean_range = bcm_clean_range,
978 .flush_range = bcm_flush_range,
3b656fed 979 .flush_all = l2x0_flush_all,
3b656fed 980 .disable = l2x0_disable,
ce841303
RK
981 .sync = l2x0_cache_sync,
982 .resume = pl310_resume,
3b656fed
CD
983 },
984};
985
a65bb925 986#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
8c369264 987static const struct of_device_id l2x0_ids[] __initconst = {
c02642bc
RK
988 L2C_ID("arm,l210-cache", of_l2x0_data),
989 L2C_ID("arm,l220-cache", of_l2x0_data),
990 L2C_ID("arm,pl310-cache", of_pl310_data),
991 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
992 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
993 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
994 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
a65bb925 995 /* Deprecated IDs */
c02642bc 996 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
8c369264
RH
997 {}
998};
999
3e175ca4 1000int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
8c369264 1001{
c02642bc 1002 const struct l2c_init_data *data;
8c369264 1003 struct device_node *np;
91c2ebb9 1004 struct resource res;
96054b0a 1005 u32 cache_id;
8c369264
RH
1006
1007 np = of_find_matching_node(NULL, l2x0_ids);
1008 if (!np)
1009 return -ENODEV;
91c2ebb9
BS
1010
1011 if (of_address_to_resource(np, 0, &res))
1012 return -ENODEV;
1013
1014 l2x0_base = ioremap(res.start, resource_size(&res));
8c369264
RH
1015 if (!l2x0_base)
1016 return -ENOMEM;
1017
91c2ebb9
BS
1018 l2x0_saved_regs.phy_base = res.start;
1019
1020 data = of_match_node(l2x0_ids, np)->data;
1021
8c369264 1022 /* L2 configuration can only be changed if the cache is disabled */
b8db6b88 1023 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
c02642bc
RK
1024 if (data->of_parse)
1025 data->of_parse(np, &aux_val, &aux_mask);
b8db6b88
GC
1026
1027 /* For aurora cache in no outer mode select the
1028 * correct mode using the coprocessor*/
c02642bc 1029 if (data == &of_aurora_no_outer_data)
b8db6b88 1030 aurora_broadcast_l2_commands();
8c369264 1031 }
91c2ebb9 1032
96054b0a
RK
1033 if (cache_id_part_number_from_dt)
1034 cache_id = cache_id_part_number_from_dt;
1035 else
1036 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1037
1038 __l2c_init(data, aux_val, aux_mask, cache_id);
6248d060 1039
8c369264
RH
1040 return 0;
1041}
1042#endif
This page took 0.517688 seconds and 5 git commands to generate.