ARM: l2c: implement fixups for L2 cache controller quirks/errata
[deliverable/linux.git] / arch / arm / mm / cache-l2x0.c
CommitLineData
382266ad
CM
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
8c369264 19#include <linux/err.h>
382266ad 20#include <linux/init.h>
07620976 21#include <linux/spinlock.h>
fced80c7 22#include <linux/io.h>
8c369264
RH
23#include <linux/of.h>
24#include <linux/of_address.h>
382266ad
CM
25
26#include <asm/cacheflush.h>
382266ad 27#include <asm/hardware/cache-l2x0.h>
e68f31f4 28#include "cache-tauros3.h"
b8db6b88 29#include "cache-aurora-l2.h"
382266ad 30
c02642bc 31struct l2c_init_data {
3b8bad57 32 unsigned num_lock;
c02642bc 33 void (*of_parse)(const struct device_node *, u32 *, u32 *);
3b8bad57 34 void (*enable)(void __iomem *, u32, unsigned);
75461f5c 35 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
9846dfc9 36 void (*save)(void __iomem *);
c02642bc
RK
37 struct outer_cache_fns outer_cache;
38};
39
382266ad
CM
40#define CACHE_LINE_SIZE 32
41
42static void __iomem *l2x0_base;
bd31b859 43static DEFINE_RAW_SPINLOCK(l2x0_lock);
3e175ca4
RK
44static u32 l2x0_way_mask; /* Bitmask of active ways */
45static u32 l2x0_size;
f154fe9b 46static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
382266ad 47
91c2ebb9
BS
48struct l2x0_regs l2x0_saved_regs;
49
37abcdb9
RK
50/*
51 * Common code for all cache controllers.
52 */
83841fe1 53static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
382266ad 54{
9a6655e4 55 /* wait for cache operation by line or way to complete */
6775a558 56 while (readl_relaxed(reg) & mask)
1caf3092 57 cpu_relax();
382266ad
CM
58}
59
2b2a87a1
RK
60/*
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
64 */
65static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66{
67 outer_cache.set_debug(val);
68}
69
df5dd4c6
RK
70static void __l2c_op_way(void __iomem *reg)
71{
72 writel_relaxed(l2x0_way_mask, reg);
83841fe1 73 l2c_wait_mask(reg, l2x0_way_mask);
df5dd4c6
RK
74}
75
37abcdb9
RK
76static inline void l2c_unlock(void __iomem *base, unsigned num)
77{
78 unsigned i;
79
80 for (i = 0; i < num; i++) {
81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 i * L2X0_LOCKDOWN_STRIDE);
83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 i * L2X0_LOCKDOWN_STRIDE);
85 }
86}
87
3b8bad57
RK
88/*
89 * Enable the L2 cache controller. This function must only be
90 * called when the cache controller is known to be disabled.
91 */
92static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93{
94 unsigned long flags;
95
9a07f27b
RK
96 /* Only write the aux register if it needs changing */
97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 writel_relaxed(aux, base + L2X0_AUX_CTRL);
3b8bad57 99
17f3f99f
RK
100 l2c_unlock(base, num_lock);
101
3b8bad57
RK
102 local_irq_save(flags);
103 __l2c_op_way(base + L2X0_INV_WAY);
104 writel_relaxed(0, base + sync_reg_offset);
105 l2c_wait_mask(base + sync_reg_offset, 1);
106 local_irq_restore(flags);
107
108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
109}
110
111static void l2c_disable(void)
112{
113 void __iomem *base = l2x0_base;
114
115 outer_cache.flush_all();
116 writel_relaxed(0, base + L2X0_CTRL);
117 dsb(st);
118}
119
9a6655e4
CM
120#ifdef CONFIG_CACHE_PL310
121static inline void cache_wait(void __iomem *reg, unsigned long mask)
122{
123 /* cache operations by line are atomic on PL310 */
124}
125#else
83841fe1 126#define cache_wait l2c_wait_mask
9a6655e4
CM
127#endif
128
382266ad
CM
129static inline void cache_sync(void)
130{
3d107434 131 void __iomem *base = l2x0_base;
885028e4 132
f154fe9b 133 writel_relaxed(0, base + sync_reg_offset);
3d107434 134 cache_wait(base + L2X0_CACHE_SYNC, 1);
382266ad
CM
135}
136
424d6b14
SS
137static inline void l2x0_clean_line(unsigned long addr)
138{
139 void __iomem *base = l2x0_base;
140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
424d6b14
SS
142}
143
144static inline void l2x0_inv_line(unsigned long addr)
145{
146 void __iomem *base = l2x0_base;
147 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 148 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
424d6b14
SS
149}
150
2839e06c 151#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
ab4d5368
WD
152static inline void debug_writel(unsigned long val)
153{
154 if (outer_cache.set_debug)
2b2a87a1 155 l2c_set_debug(l2x0_base, val);
ab4d5368 156}
9e65582a 157
ab4d5368 158static void pl310_set_debug(unsigned long val)
2839e06c
SS
159{
160 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
9e65582a 161}
2839e06c
SS
162#else
163/* Optimised out for non-errata case */
164static inline void debug_writel(unsigned long val)
165{
166}
167
ab4d5368 168#define pl310_set_debug NULL
2839e06c 169#endif
9e65582a 170
2839e06c 171#ifdef CONFIG_PL310_ERRATA_588369
9e65582a
SS
172static inline void l2x0_flush_line(unsigned long addr)
173{
174 void __iomem *base = l2x0_base;
175
176 /* Clean by PA followed by Invalidate by PA */
177 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 178 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
9e65582a 179 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 180 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
9e65582a
SS
181}
182#else
183
424d6b14
SS
184static inline void l2x0_flush_line(unsigned long addr)
185{
186 void __iomem *base = l2x0_base;
187 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
6775a558 188 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
424d6b14 189}
9e65582a 190#endif
424d6b14 191
23107c54
CM
192static void l2x0_cache_sync(void)
193{
194 unsigned long flags;
195
bd31b859 196 raw_spin_lock_irqsave(&l2x0_lock, flags);
23107c54 197 cache_sync();
bd31b859 198 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
23107c54
CM
199}
200
38a8914f 201static void __l2x0_flush_all(void)
2fd86589 202{
2839e06c 203 debug_writel(0x03);
df5dd4c6 204 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
2fd86589 205 cache_sync();
2839e06c 206 debug_writel(0x00);
38a8914f
WD
207}
208
209static void l2x0_flush_all(void)
210{
211 unsigned long flags;
212
213 /* clean all ways */
bd31b859 214 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f 215 __l2x0_flush_all();
bd31b859 216 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
217}
218
444457c1
SS
219static void l2x0_clean_all(void)
220{
221 unsigned long flags;
222
223 /* clean all ways */
bd31b859 224 raw_spin_lock_irqsave(&l2x0_lock, flags);
df5dd4c6 225 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
444457c1 226 cache_sync();
bd31b859 227 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
444457c1
SS
228}
229
2fd86589 230static void l2x0_inv_all(void)
382266ad 231{
0eb948dd
RK
232 unsigned long flags;
233
382266ad 234 /* invalidate all ways */
bd31b859 235 raw_spin_lock_irqsave(&l2x0_lock, flags);
2fd86589 236 /* Invalidating when L2 is enabled is a nono */
b8db6b88 237 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
df5dd4c6 238 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
382266ad 239 cache_sync();
bd31b859 240 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
241}
242
243static void l2x0_inv_range(unsigned long start, unsigned long end)
244{
3d107434 245 void __iomem *base = l2x0_base;
0eb948dd 246 unsigned long flags;
382266ad 247
bd31b859 248 raw_spin_lock_irqsave(&l2x0_lock, flags);
4f6627ac
RS
249 if (start & (CACHE_LINE_SIZE - 1)) {
250 start &= ~(CACHE_LINE_SIZE - 1);
9e65582a 251 debug_writel(0x03);
424d6b14 252 l2x0_flush_line(start);
9e65582a 253 debug_writel(0x00);
4f6627ac
RS
254 start += CACHE_LINE_SIZE;
255 }
256
257 if (end & (CACHE_LINE_SIZE - 1)) {
258 end &= ~(CACHE_LINE_SIZE - 1);
9e65582a 259 debug_writel(0x03);
424d6b14 260 l2x0_flush_line(end);
9e65582a 261 debug_writel(0x00);
4f6627ac
RS
262 }
263
0eb948dd
RK
264 while (start < end) {
265 unsigned long blk_end = start + min(end - start, 4096UL);
266
267 while (start < blk_end) {
424d6b14 268 l2x0_inv_line(start);
0eb948dd
RK
269 start += CACHE_LINE_SIZE;
270 }
271
272 if (blk_end < end) {
bd31b859
TG
273 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
274 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
275 }
276 }
3d107434 277 cache_wait(base + L2X0_INV_LINE_PA, 1);
382266ad 278 cache_sync();
bd31b859 279 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
280}
281
282static void l2x0_clean_range(unsigned long start, unsigned long end)
283{
3d107434 284 void __iomem *base = l2x0_base;
0eb948dd 285 unsigned long flags;
382266ad 286
444457c1
SS
287 if ((end - start) >= l2x0_size) {
288 l2x0_clean_all();
289 return;
290 }
291
bd31b859 292 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 293 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
294 while (start < end) {
295 unsigned long blk_end = start + min(end - start, 4096UL);
296
297 while (start < blk_end) {
424d6b14 298 l2x0_clean_line(start);
0eb948dd
RK
299 start += CACHE_LINE_SIZE;
300 }
301
302 if (blk_end < end) {
bd31b859
TG
303 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
304 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
305 }
306 }
3d107434 307 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
382266ad 308 cache_sync();
bd31b859 309 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
310}
311
312static void l2x0_flush_range(unsigned long start, unsigned long end)
313{
3d107434 314 void __iomem *base = l2x0_base;
0eb948dd 315 unsigned long flags;
382266ad 316
444457c1
SS
317 if ((end - start) >= l2x0_size) {
318 l2x0_flush_all();
319 return;
320 }
321
bd31b859 322 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 323 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
324 while (start < end) {
325 unsigned long blk_end = start + min(end - start, 4096UL);
326
9e65582a 327 debug_writel(0x03);
0eb948dd 328 while (start < blk_end) {
424d6b14 329 l2x0_flush_line(start);
0eb948dd
RK
330 start += CACHE_LINE_SIZE;
331 }
9e65582a 332 debug_writel(0x00);
0eb948dd
RK
333
334 if (blk_end < end) {
bd31b859
TG
335 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
336 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
337 }
338 }
3d107434 339 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
382266ad 340 cache_sync();
bd31b859 341 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
342}
343
2fd86589
TG
344static void l2x0_disable(void)
345{
346 unsigned long flags;
347
bd31b859 348 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f
WD
349 __l2x0_flush_all();
350 writel_relaxed(0, l2x0_base + L2X0_CTRL);
9781aa8a 351 dsb(st);
bd31b859 352 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
353}
354
3e175ca4 355static void l2x0_unlock(u32 cache_id)
bac7e6ec
LW
356{
357 int lockregs;
bac7e6ec 358
6e7aceeb 359 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
b8db6b88 360 case L2X0_CACHE_ID_PART_L310:
bac7e6ec 361 lockregs = 8;
b8db6b88 362 break;
b8db6b88 363 default:
bac7e6ec
LW
364 /* L210 and unknown types */
365 lockregs = 1;
b8db6b88
GC
366 break;
367 }
bac7e6ec 368
37abcdb9 369 l2c_unlock(l2x0_base, lockregs);
bac7e6ec
LW
370}
371
3b8bad57
RK
372static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
373{
3b8bad57
RK
374 /* l2x0 controller is disabled */
375 writel_relaxed(aux, base + L2X0_AUX_CTRL);
376
17f3f99f
RK
377 /* Make sure that I&D is not locked down when starting */
378 l2x0_unlock(readl_relaxed(base + L2X0_CACHE_ID));
379
3b8bad57
RK
380 l2x0_inv_all();
381
382 /* enable L2X0 */
383 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
384}
385
96054b0a 386static const struct l2c_init_data l2x0_init_fns __initconst = {
3b8bad57 387 .enable = l2x0_enable,
96054b0a
RK
388 .outer_cache = {
389 .inv_range = l2x0_inv_range,
390 .clean_range = l2x0_clean_range,
391 .flush_range = l2x0_flush_range,
392 .flush_all = l2x0_flush_all,
393 .disable = l2x0_disable,
394 .sync = l2x0_cache_sync,
395 },
396};
397
75461f5c
RK
398/*
399 * L2C-310 specific code.
400 *
401 * Errata:
402 * 588369: PL310 R0P0->R1P0, fixed R2P0.
403 * Affects: all clean+invalidate operations
404 * clean and invalidate skips the invalidate step, so we need to issue
405 * separate operations. We also require the above debug workaround
406 * enclosing this code fragment on affected parts. On unaffected parts,
407 * we must not use this workaround without the debug register writes
408 * to avoid exposing a problem similar to 727915.
409 *
410 * 727915: PL310 R2P0->R3P0, fixed R3P1.
411 * Affects: clean+invalidate by way
412 * clean and invalidate by way runs in the background, and a store can
413 * hit the line between the clean operation and invalidate operation,
414 * resulting in the store being lost.
415 *
416 * 753970: PL310 R3P0, fixed R3P1.
417 * Affects: sync
418 * prevents merging writes after the sync operation, until another L2C
419 * operation is performed (or a number of other conditions.)
420 *
421 * 769419: PL310 R0P0->R3P1, fixed R3P2.
422 * Affects: store buffer
423 * store buffer is not automatically drained.
424 */
425static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
426 struct outer_cache_fns *fns)
427{
428 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
429 const char *errata[4];
430 unsigned n = 0;
431
432 if (revision <= L310_CACHE_ID_RTL_R3P0)
433 fns->set_debug = pl310_set_debug;
434
435 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
436 revision == L310_CACHE_ID_RTL_R3P0) {
437 sync_reg_offset = L2X0_DUMMY_REG;
438 errata[n++] = "753970";
439 }
440
441 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
442 errata[n++] = "769419";
443
444 if (n) {
445 unsigned i;
446
447 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
448 for (i = 0; i < n; i++)
449 pr_cont(" %s", errata[i]);
450 pr_cont(" enabled\n");
451 }
452}
453
454static const struct l2c_init_data l2c310_init_fns __initconst = {
455 .num_lock = 8,
456 .enable = l2c_enable,
457 .fixup = l2c310_fixup,
458 .outer_cache = {
459 .inv_range = l2x0_inv_range,
460 .clean_range = l2x0_clean_range,
461 .flush_range = l2x0_flush_range,
462 .flush_all = l2x0_flush_all,
463 .disable = l2x0_disable,
464 .sync = l2x0_cache_sync,
465 },
466};
467
96054b0a
RK
468static void __init __l2c_init(const struct l2c_init_data *data,
469 u32 aux_val, u32 aux_mask, u32 cache_id)
382266ad 470{
75461f5c 471 struct outer_cache_fns fns;
3e175ca4 472 u32 aux;
3e175ca4 473 u32 way_size = 0;
64039be8 474 int ways;
b8db6b88 475 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
64039be8 476 const char *type;
382266ad 477
c40e7eb6
RK
478 /*
479 * It is strange to save the register state before initialisation,
480 * but hey, this is what the DT implementations decided to do.
481 */
482 if (data->save)
483 data->save(l2x0_base);
484
6775a558 485 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
64039be8 486
4082cfa7
SH
487 aux &= aux_mask;
488 aux |= aux_val;
489
64039be8 490 /* Determine the number of ways */
6e7aceeb 491 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
64039be8
JM
492 case L2X0_CACHE_ID_PART_L310:
493 if (aux & (1 << 16))
494 ways = 16;
495 else
496 ways = 8;
497 type = "L310";
498 break;
75461f5c 499
64039be8
JM
500 case L2X0_CACHE_ID_PART_L210:
501 ways = (aux >> 13) & 0xf;
502 type = "L210";
503 break;
b8db6b88
GC
504
505 case AURORA_CACHE_ID:
b8db6b88
GC
506 ways = (aux >> 13) & 0xf;
507 ways = 2 << ((ways + 1) >> 2);
508 way_size_shift = AURORA_WAY_SIZE_SHIFT;
509 type = "Aurora";
510 break;
75461f5c 511
64039be8
JM
512 default:
513 /* Assume unknown chips have 8 ways */
514 ways = 8;
515 type = "L2x0 series";
516 break;
517 }
518
519 l2x0_way_mask = (1 << ways) - 1;
520
5ba70372
SS
521 /*
522 * L2 cache Size = Way size * Number of ways
523 */
524 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
b8db6b88
GC
525 way_size = 1 << (way_size + way_size_shift);
526
5ba70372
SS
527 l2x0_size = ways * way_size * SZ_1K;
528
75461f5c
RK
529 fns = data->outer_cache;
530 if (data->fixup)
531 data->fixup(l2x0_base, cache_id, &fns);
532
48371cd3 533 /*
3b8bad57
RK
534 * Check if l2x0 controller is already enabled. If we are booting
535 * in non-secure mode accessing the below registers will fault.
48371cd3 536 */
3b8bad57
RK
537 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
538 data->enable(l2x0_base, aux, data->num_lock);
382266ad 539
9d4876f0
YM
540 /* Re-read it in case some bits are reserved. */
541 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
542
543 /* Save the value for resuming. */
544 l2x0_saved_regs.aux_ctrl = aux;
545
75461f5c 546 outer_cache = fns;
382266ad 547
c477b8db
FE
548 pr_info("%s cache controller enabled\n", type);
549 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
550 ways, cache_id, aux, l2x0_size >> 10);
382266ad 551}
8c369264 552
96054b0a
RK
553void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
554{
75461f5c 555 const struct l2c_init_data *data;
96054b0a
RK
556 u32 cache_id;
557
558 l2x0_base = base;
559
560 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
561
75461f5c
RK
562 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
563 default:
564 data = &l2x0_init_fns;
565 break;
566
567 case L2X0_CACHE_ID_PART_L310:
568 data = &l2c310_init_fns;
569 break;
570 }
571
572 __l2c_init(data, aux_val, aux_mask, cache_id);
96054b0a
RK
573}
574
8c369264 575#ifdef CONFIG_OF
b8db6b88
GC
576static int l2_wt_override;
577
96054b0a
RK
578/* Aurora don't have the cache ID register available, so we have to
579 * pass it though the device tree */
580static u32 cache_id_part_number_from_dt;
581
da3627fb
RK
582static void __init l2x0_of_parse(const struct device_node *np,
583 u32 *aux_val, u32 *aux_mask)
584{
585 u32 data[2] = { 0, 0 };
586 u32 tag = 0;
587 u32 dirty = 0;
588 u32 val = 0, mask = 0;
589
590 of_property_read_u32(np, "arm,tag-latency", &tag);
591 if (tag) {
592 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
593 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
594 }
595
596 of_property_read_u32_array(np, "arm,data-latency",
597 data, ARRAY_SIZE(data));
598 if (data[0] && data[1]) {
599 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
600 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
601 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
602 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
603 }
604
605 of_property_read_u32(np, "arm,dirty-latency", &dirty);
606 if (dirty) {
607 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
608 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
609 }
610
611 *aux_val &= ~mask;
612 *aux_val |= val;
613 *aux_mask &= ~mask;
614}
615
616static void l2x0_resume(void)
617{
618 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
619 /* restore aux ctrl and enable l2 */
620 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
621
622 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
623 L2X0_AUX_CTRL);
624
625 l2x0_inv_all();
626
627 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
628 }
629}
630
631static const struct l2c_init_data of_l2x0_data __initconst = {
632 .of_parse = l2x0_of_parse,
3b8bad57 633 .enable = l2x0_enable,
da3627fb
RK
634 .outer_cache = {
635 .inv_range = l2x0_inv_range,
636 .clean_range = l2x0_clean_range,
637 .flush_range = l2x0_flush_range,
638 .flush_all = l2x0_flush_all,
639 .disable = l2x0_disable,
640 .sync = l2x0_cache_sync,
641 .resume = l2x0_resume,
642 },
643};
644
645static void __init pl310_of_parse(const struct device_node *np,
646 u32 *aux_val, u32 *aux_mask)
647{
648 u32 data[3] = { 0, 0, 0 };
649 u32 tag[3] = { 0, 0, 0 };
650 u32 filter[2] = { 0, 0 };
651
652 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
653 if (tag[0] && tag[1] && tag[2])
654 writel_relaxed(
655 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
656 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
657 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
658 l2x0_base + L2X0_TAG_LATENCY_CTRL);
659
660 of_property_read_u32_array(np, "arm,data-latency",
661 data, ARRAY_SIZE(data));
662 if (data[0] && data[1] && data[2])
663 writel_relaxed(
664 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
665 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
666 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
667 l2x0_base + L2X0_DATA_LATENCY_CTRL);
668
669 of_property_read_u32_array(np, "arm,filter-ranges",
670 filter, ARRAY_SIZE(filter));
671 if (filter[1]) {
672 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
673 l2x0_base + L2X0_ADDR_FILTER_END);
674 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
675 l2x0_base + L2X0_ADDR_FILTER_START);
676 }
677}
678
679static void __init pl310_save(void __iomem *base)
680{
681 u32 l2x0_revision = readl_relaxed(base + L2X0_CACHE_ID) &
682 L2X0_CACHE_ID_RTL_MASK;
683
684 l2x0_saved_regs.tag_latency = readl_relaxed(base +
685 L2X0_TAG_LATENCY_CTRL);
686 l2x0_saved_regs.data_latency = readl_relaxed(base +
687 L2X0_DATA_LATENCY_CTRL);
688 l2x0_saved_regs.filter_end = readl_relaxed(base +
689 L2X0_ADDR_FILTER_END);
690 l2x0_saved_regs.filter_start = readl_relaxed(base +
691 L2X0_ADDR_FILTER_START);
692
693 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
694 /*
695 * From r2p0, there is Prefetch offset/control register
696 */
697 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
698 L2X0_PREFETCH_CTRL);
699 /*
700 * From r3p0, there is Power control register
701 */
702 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
703 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
704 L2X0_POWER_CTRL);
705 }
706}
707
708static void pl310_resume(void)
709{
710 u32 l2x0_revision;
711
712 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
713 /* restore pl310 setup */
714 writel_relaxed(l2x0_saved_regs.tag_latency,
715 l2x0_base + L2X0_TAG_LATENCY_CTRL);
716 writel_relaxed(l2x0_saved_regs.data_latency,
717 l2x0_base + L2X0_DATA_LATENCY_CTRL);
718 writel_relaxed(l2x0_saved_regs.filter_end,
719 l2x0_base + L2X0_ADDR_FILTER_END);
720 writel_relaxed(l2x0_saved_regs.filter_start,
721 l2x0_base + L2X0_ADDR_FILTER_START);
722
723 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
724 L2X0_CACHE_ID_RTL_MASK;
725
726 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
727 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
728 l2x0_base + L2X0_PREFETCH_CTRL);
729 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
730 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
731 l2x0_base + L2X0_POWER_CTRL);
732 }
733 }
734
735 l2x0_resume();
736}
737
738static const struct l2c_init_data of_pl310_data __initconst = {
3b8bad57 739 .num_lock = 8,
da3627fb 740 .of_parse = pl310_of_parse,
3b8bad57 741 .enable = l2c_enable,
75461f5c 742 .fixup = l2c310_fixup,
da3627fb
RK
743 .save = pl310_save,
744 .outer_cache = {
745 .inv_range = l2x0_inv_range,
746 .clean_range = l2x0_clean_range,
747 .flush_range = l2x0_flush_range,
748 .flush_all = l2x0_flush_all,
749 .disable = l2x0_disable,
750 .sync = l2x0_cache_sync,
751 .resume = pl310_resume,
752 },
753};
754
b8db6b88
GC
755/*
756 * Note that the end addresses passed to Linux primitives are
757 * noninclusive, while the hardware cache range operations use
758 * inclusive start and end addresses.
759 */
760static unsigned long calc_range_end(unsigned long start, unsigned long end)
761{
762 /*
763 * Limit the number of cache lines processed at once,
764 * since cache range operations stall the CPU pipeline
765 * until completion.
766 */
767 if (end > start + MAX_RANGE_SIZE)
768 end = start + MAX_RANGE_SIZE;
769
770 /*
771 * Cache range operations can't straddle a page boundary.
772 */
773 if (end > PAGE_ALIGN(start+1))
774 end = PAGE_ALIGN(start+1);
775
776 return end;
777}
778
779/*
780 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
781 * and range operations only do a TLB lookup on the start address.
782 */
783static void aurora_pa_range(unsigned long start, unsigned long end,
784 unsigned long offset)
785{
786 unsigned long flags;
787
788 raw_spin_lock_irqsave(&l2x0_lock, flags);
8a3a180d
GC
789 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
790 writel_relaxed(end, l2x0_base + offset);
b8db6b88
GC
791 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
792
793 cache_sync();
794}
795
796static void aurora_inv_range(unsigned long start, unsigned long end)
797{
798 /*
799 * round start and end adresses up to cache line size
800 */
801 start &= ~(CACHE_LINE_SIZE - 1);
802 end = ALIGN(end, CACHE_LINE_SIZE);
803
804 /*
805 * Invalidate all full cache lines between 'start' and 'end'.
806 */
807 while (start < end) {
808 unsigned long range_end = calc_range_end(start, end);
809 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
810 AURORA_INVAL_RANGE_REG);
811 start = range_end;
812 }
813}
814
815static void aurora_clean_range(unsigned long start, unsigned long end)
816{
817 /*
818 * If L2 is forced to WT, the L2 will always be clean and we
819 * don't need to do anything here.
820 */
821 if (!l2_wt_override) {
822 start &= ~(CACHE_LINE_SIZE - 1);
823 end = ALIGN(end, CACHE_LINE_SIZE);
824 while (start != end) {
825 unsigned long range_end = calc_range_end(start, end);
826 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
827 AURORA_CLEAN_RANGE_REG);
828 start = range_end;
829 }
830 }
831}
832
833static void aurora_flush_range(unsigned long start, unsigned long end)
834{
8b827c60
GC
835 start &= ~(CACHE_LINE_SIZE - 1);
836 end = ALIGN(end, CACHE_LINE_SIZE);
837 while (start != end) {
838 unsigned long range_end = calc_range_end(start, end);
839 /*
840 * If L2 is forced to WT, the L2 will always be clean and we
841 * just need to invalidate.
842 */
843 if (l2_wt_override)
b8db6b88 844 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
8b827c60
GC
845 AURORA_INVAL_RANGE_REG);
846 else
847 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
848 AURORA_FLUSH_RANGE_REG);
849 start = range_end;
b8db6b88
GC
850 }
851}
852
da3627fb
RK
853static void aurora_save(void __iomem *base)
854{
855 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
856 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
857}
858
859static void aurora_resume(void)
860{
861 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
862 writel_relaxed(l2x0_saved_regs.aux_ctrl,
863 l2x0_base + L2X0_AUX_CTRL);
864 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
865 }
866}
867
40266d6f
RK
868/*
869 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
870 * broadcasting of cache commands to L2.
871 */
872static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
873 unsigned num_lock)
da3627fb 874{
40266d6f
RK
875 u32 u;
876
877 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
da3627fb 878 u |= AURORA_CTRL_FW; /* Set the FW bit */
40266d6f
RK
879 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
880
da3627fb 881 isb();
40266d6f
RK
882
883 l2c_enable(base, aux, num_lock);
da3627fb
RK
884}
885
75461f5c
RK
886static void __init aurora_fixup(void __iomem *base, u32 cache_id,
887 struct outer_cache_fns *fns)
888{
889 sync_reg_offset = AURORA_SYNC_REG;
890}
891
da3627fb
RK
892static void __init aurora_of_parse(const struct device_node *np,
893 u32 *aux_val, u32 *aux_mask)
894{
895 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
896 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
897
898 of_property_read_u32(np, "cache-id-part",
899 &cache_id_part_number_from_dt);
900
901 /* Determine and save the write policy */
902 l2_wt_override = of_property_read_bool(np, "wt-override");
903
904 if (l2_wt_override) {
905 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
906 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
907 }
908
909 *aux_val &= ~mask;
910 *aux_val |= val;
911 *aux_mask &= ~mask;
912}
913
914static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
3b8bad57 915 .num_lock = 4,
da3627fb 916 .of_parse = aurora_of_parse,
3b8bad57 917 .enable = l2c_enable,
75461f5c 918 .fixup = aurora_fixup,
da3627fb
RK
919 .save = aurora_save,
920 .outer_cache = {
921 .inv_range = aurora_inv_range,
922 .clean_range = aurora_clean_range,
923 .flush_range = aurora_flush_range,
924 .flush_all = l2x0_flush_all,
925 .disable = l2x0_disable,
926 .sync = l2x0_cache_sync,
927 .resume = aurora_resume,
928 },
929};
930
931static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
3b8bad57 932 .num_lock = 4,
da3627fb 933 .of_parse = aurora_of_parse,
40266d6f 934 .enable = aurora_enable_no_outer,
75461f5c 935 .fixup = aurora_fixup,
da3627fb
RK
936 .save = aurora_save,
937 .outer_cache = {
938 .resume = aurora_resume,
939 },
940};
941
3b656fed
CD
942/*
943 * For certain Broadcom SoCs, depending on the address range, different offsets
944 * need to be added to the address before passing it to L2 for
945 * invalidation/clean/flush
946 *
947 * Section Address Range Offset EMI
948 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
949 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
950 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
951 *
952 * When the start and end addresses have crossed two different sections, we
953 * need to break the L2 operation into two, each within its own section.
954 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
955 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
956 * 0xC0000000 - 0xC0001000
957 *
958 * Note 1:
959 * By breaking a single L2 operation into two, we may potentially suffer some
960 * performance hit, but keep in mind the cross section case is very rare
961 *
962 * Note 2:
963 * We do not need to handle the case when the start address is in
964 * Section 1 and the end address is in Section 3, since it is not a valid use
965 * case
966 *
967 * Note 3:
968 * Section 1 in practical terms can no longer be used on rev A2. Because of
969 * that the code does not need to handle section 1 at all.
970 *
971 */
972#define BCM_SYS_EMI_START_ADDR 0x40000000UL
973#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
974
975#define BCM_SYS_EMI_OFFSET 0x40000000UL
976#define BCM_VC_EMI_OFFSET 0x80000000UL
977
978static inline int bcm_addr_is_sys_emi(unsigned long addr)
979{
980 return (addr >= BCM_SYS_EMI_START_ADDR) &&
981 (addr < BCM_VC_EMI_SEC3_START_ADDR);
982}
983
984static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
985{
986 if (bcm_addr_is_sys_emi(addr))
987 return addr + BCM_SYS_EMI_OFFSET;
988 else
989 return addr + BCM_VC_EMI_OFFSET;
990}
991
992static void bcm_inv_range(unsigned long start, unsigned long end)
993{
994 unsigned long new_start, new_end;
995
996 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
997
998 if (unlikely(end <= start))
999 return;
1000
1001 new_start = bcm_l2_phys_addr(start);
1002 new_end = bcm_l2_phys_addr(end);
1003
1004 /* normal case, no cross section between start and end */
1005 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1006 l2x0_inv_range(new_start, new_end);
1007 return;
1008 }
1009
1010 /* They cross sections, so it can only be a cross from section
1011 * 2 to section 3
1012 */
1013 l2x0_inv_range(new_start,
1014 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1015 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1016 new_end);
1017}
1018
1019static void bcm_clean_range(unsigned long start, unsigned long end)
1020{
1021 unsigned long new_start, new_end;
1022
1023 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1024
1025 if (unlikely(end <= start))
1026 return;
1027
1028 if ((end - start) >= l2x0_size) {
1029 l2x0_clean_all();
1030 return;
1031 }
1032
1033 new_start = bcm_l2_phys_addr(start);
1034 new_end = bcm_l2_phys_addr(end);
1035
1036 /* normal case, no cross section between start and end */
1037 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1038 l2x0_clean_range(new_start, new_end);
1039 return;
1040 }
1041
1042 /* They cross sections, so it can only be a cross from section
1043 * 2 to section 3
1044 */
1045 l2x0_clean_range(new_start,
1046 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1047 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1048 new_end);
1049}
1050
1051static void bcm_flush_range(unsigned long start, unsigned long end)
1052{
1053 unsigned long new_start, new_end;
1054
1055 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1056
1057 if (unlikely(end <= start))
1058 return;
1059
1060 if ((end - start) >= l2x0_size) {
1061 l2x0_flush_all();
1062 return;
1063 }
1064
1065 new_start = bcm_l2_phys_addr(start);
1066 new_end = bcm_l2_phys_addr(end);
1067
1068 /* normal case, no cross section between start and end */
1069 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1070 l2x0_flush_range(new_start, new_end);
1071 return;
1072 }
1073
1074 /* They cross sections, so it can only be a cross from section
1075 * 2 to section 3
1076 */
1077 l2x0_flush_range(new_start,
1078 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1079 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1080 new_end);
1081}
1082
da3627fb 1083static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
3b8bad57 1084 .num_lock = 8,
da3627fb 1085 .of_parse = pl310_of_parse,
3b8bad57 1086 .enable = l2c_enable,
75461f5c 1087 .fixup = l2c310_fixup,
da3627fb
RK
1088 .save = pl310_save,
1089 .outer_cache = {
1090 .inv_range = bcm_inv_range,
1091 .clean_range = bcm_clean_range,
1092 .flush_range = bcm_flush_range,
1093 .flush_all = l2x0_flush_all,
1094 .disable = l2x0_disable,
1095 .sync = l2x0_cache_sync,
1096 .resume = pl310_resume,
1097 },
1098};
b8db6b88 1099
9846dfc9 1100static void __init tauros3_save(void __iomem *base)
e68f31f4
SH
1101{
1102 l2x0_saved_regs.aux2_ctrl =
9846dfc9 1103 readl_relaxed(base + TAUROS3_AUX2_CTRL);
e68f31f4 1104 l2x0_saved_regs.prefetch_ctrl =
9846dfc9 1105 readl_relaxed(base + L2X0_PREFETCH_CTRL);
e68f31f4
SH
1106}
1107
e68f31f4
SH
1108static void tauros3_resume(void)
1109{
1110 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1111 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1112 l2x0_base + TAUROS3_AUX2_CTRL);
1113 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1114 l2x0_base + L2X0_PREFETCH_CTRL);
1115 }
1116
1117 l2x0_resume();
1118}
1119
c02642bc 1120static const struct l2c_init_data of_tauros3_data __initconst = {
3b8bad57
RK
1121 .num_lock = 8,
1122 .enable = l2c_enable,
e68f31f4
SH
1123 .save = tauros3_save,
1124 /* Tauros3 broadcasts L1 cache operations to L2 */
1125 .outer_cache = {
1126 .resume = tauros3_resume,
1127 },
1128};
1129
a65bb925 1130#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
8c369264 1131static const struct of_device_id l2x0_ids[] __initconst = {
c02642bc
RK
1132 L2C_ID("arm,l210-cache", of_l2x0_data),
1133 L2C_ID("arm,l220-cache", of_l2x0_data),
1134 L2C_ID("arm,pl310-cache", of_pl310_data),
1135 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1136 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1137 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1138 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
a65bb925 1139 /* Deprecated IDs */
c02642bc 1140 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
8c369264
RH
1141 {}
1142};
1143
3e175ca4 1144int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
8c369264 1145{
c02642bc 1146 const struct l2c_init_data *data;
8c369264 1147 struct device_node *np;
91c2ebb9 1148 struct resource res;
96054b0a 1149 u32 cache_id;
8c369264
RH
1150
1151 np = of_find_matching_node(NULL, l2x0_ids);
1152 if (!np)
1153 return -ENODEV;
91c2ebb9
BS
1154
1155 if (of_address_to_resource(np, 0, &res))
1156 return -ENODEV;
1157
1158 l2x0_base = ioremap(res.start, resource_size(&res));
8c369264
RH
1159 if (!l2x0_base)
1160 return -ENOMEM;
1161
91c2ebb9
BS
1162 l2x0_saved_regs.phy_base = res.start;
1163
1164 data = of_match_node(l2x0_ids, np)->data;
1165
8c369264 1166 /* L2 configuration can only be changed if the cache is disabled */
40266d6f 1167 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
c02642bc
RK
1168 if (data->of_parse)
1169 data->of_parse(np, &aux_val, &aux_mask);
b8db6b88 1170
96054b0a
RK
1171 if (cache_id_part_number_from_dt)
1172 cache_id = cache_id_part_number_from_dt;
1173 else
1174 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1175
1176 __l2c_init(data, aux_val, aux_mask, cache_id);
6248d060 1177
8c369264
RH
1178 return 0;
1179}
1180#endif
This page took 0.53361 seconds and 5 git commands to generate.