ARM: mvebu: remove unused register offset definition
[deliverable/linux.git] / arch / arm / mach-mvebu / coherency.c
CommitLineData
009f1315
GC
1/*
2 * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * The Armada 370 and Armada XP SOCs have a coherency fabric which is
15 * responsible for ensuring hardware coherency between all CPUs and between
16 * CPUs and I/O masters. This file initializes the coherency fabric and
17 * supplies basic routines for configuring and controlling hardware coherency
18 */
19
5ab5afd8
TP
20#define pr_fmt(fmt) "mvebu-coherency: " fmt
21
009f1315
GC
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/of_address.h>
25#include <linux/io.h>
26#include <linux/smp.h>
e60304f8
GC
27#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
5ab5afd8
TP
29#include <linux/slab.h>
30#include <linux/mbus.h>
31#include <linux/clk.h>
b0063aad 32#include <linux/pci.h>
009f1315 33#include <asm/smp_plat.h>
580ff0ee 34#include <asm/cacheflush.h>
497a9230 35#include <asm/mach/map.h>
009f1315 36#include "armada-370-xp.h"
b12634e3 37#include "coherency.h"
39438567 38#include "mvebu-soc-id.h"
009f1315 39
8bd26e3a 40unsigned long coherency_phys_base;
ccd6a131 41void __iomem *coherency_base;
e60304f8 42static void __iomem *coherency_cpu_base;
009f1315
GC
43
44/* Coherency fabric registers */
e60304f8
GC
45#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
46
924d38f4 47enum {
501f928e 48 COHERENCY_FABRIC_TYPE_NONE,
924d38f4 49 COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
77fa4b9a 50 COHERENCY_FABRIC_TYPE_ARMADA_375,
d0de9323 51 COHERENCY_FABRIC_TYPE_ARMADA_380,
924d38f4
TP
52};
53
009f1315 54static struct of_device_id of_coherency_table[] = {
924d38f4
TP
55 {.compatible = "marvell,coherency-fabric",
56 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
77fa4b9a
TP
57 {.compatible = "marvell,armada-375-coherency-fabric",
58 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
d0de9323
TP
59 {.compatible = "marvell,armada-380-coherency-fabric",
60 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
009f1315
GC
61 { /* end of list */ },
62};
63
2e8a5942
GC
64/* Functions defined in coherency_ll.S */
65int ll_enable_coherency(void);
66void ll_add_cpu_to_smp_group(void);
009f1315 67
952f4ca7 68int set_cpu_coherent(void)
009f1315
GC
69{
70 if (!coherency_base) {
b41375f7 71 pr_warn("Can't make current CPU cache coherent.\n");
009f1315
GC
72 pr_warn("Coherency fabric is not initialized\n");
73 return 1;
74 }
75
2e8a5942
GC
76 ll_add_cpu_to_smp_group();
77 return ll_enable_coherency();
009f1315
GC
78}
79
5ab5afd8
TP
80/*
81 * The below code implements the I/O coherency workaround on Armada
82 * 375. This workaround consists in using the two channels of the
83 * first XOR engine to trigger a XOR transaction that serves as the
84 * I/O coherency barrier.
85 */
86
87static void __iomem *xor_base, *xor_high_base;
88static dma_addr_t coherency_wa_buf_phys[CONFIG_NR_CPUS];
89static void *coherency_wa_buf[CONFIG_NR_CPUS];
90static bool coherency_wa_enabled;
91
92#define XOR_CONFIG(chan) (0x10 + (chan * 4))
93#define XOR_ACTIVATION(chan) (0x20 + (chan * 4))
94#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
95#define WINDOW_BASE(w) (0x250 + ((w) << 2))
96#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
97#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
98#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
99#define XOR_DEST_POINTER(chan) (0x2B0 + (chan * 4))
100#define XOR_BLOCK_SIZE(chan) (0x2C0 + (chan * 4))
101#define XOR_INIT_VALUE_LOW 0x2E0
102#define XOR_INIT_VALUE_HIGH 0x2E4
103
104static inline void mvebu_hwcc_armada375_sync_io_barrier_wa(void)
105{
106 int idx = smp_processor_id();
107
108 /* Write '1' to the first word of the buffer */
109 writel(0x1, coherency_wa_buf[idx]);
110
111 /* Wait until the engine is idle */
112 while ((readl(xor_base + XOR_ACTIVATION(idx)) >> 4) & 0x3)
113 ;
114
115 dmb();
116
117 /* Trigger channel */
118 writel(0x1, xor_base + XOR_ACTIVATION(idx));
119
120 /* Poll the data until it is cleared by the XOR transaction */
121 while (readl(coherency_wa_buf[idx]))
122 ;
123}
124
125static void __init armada_375_coherency_init_wa(void)
126{
127 const struct mbus_dram_target_info *dram;
128 struct device_node *xor_node;
129 struct property *xor_status;
130 struct clk *xor_clk;
131 u32 win_enable = 0;
132 int i;
133
134 pr_warn("enabling coherency workaround for Armada 375 Z1, one XOR engine disabled\n");
135
136 /*
137 * Since the workaround uses one XOR engine, we grab a
138 * reference to its Device Tree node first.
139 */
140 xor_node = of_find_compatible_node(NULL, NULL, "marvell,orion-xor");
141 BUG_ON(!xor_node);
142
143 /*
144 * Then we mark it as disabled so that the real XOR driver
145 * will not use it.
146 */
147 xor_status = kzalloc(sizeof(struct property), GFP_KERNEL);
148 BUG_ON(!xor_status);
149
150 xor_status->value = kstrdup("disabled", GFP_KERNEL);
151 BUG_ON(!xor_status->value);
152
153 xor_status->length = 8;
154 xor_status->name = kstrdup("status", GFP_KERNEL);
155 BUG_ON(!xor_status->name);
156
157 of_update_property(xor_node, xor_status);
158
159 /*
160 * And we remap the registers, get the clock, and do the
161 * initial configuration of the XOR engine.
162 */
163 xor_base = of_iomap(xor_node, 0);
164 xor_high_base = of_iomap(xor_node, 1);
165
166 xor_clk = of_clk_get_by_name(xor_node, NULL);
167 BUG_ON(!xor_clk);
168
169 clk_prepare_enable(xor_clk);
170
171 dram = mv_mbus_dram_info();
172
173 for (i = 0; i < 8; i++) {
174 writel(0, xor_base + WINDOW_BASE(i));
175 writel(0, xor_base + WINDOW_SIZE(i));
176 if (i < 4)
177 writel(0, xor_base + WINDOW_REMAP_HIGH(i));
178 }
179
180 for (i = 0; i < dram->num_cs; i++) {
181 const struct mbus_dram_window *cs = dram->cs + i;
182 writel((cs->base & 0xffff0000) |
183 (cs->mbus_attr << 8) |
184 dram->mbus_dram_target_id, xor_base + WINDOW_BASE(i));
185 writel((cs->size - 1) & 0xffff0000, xor_base + WINDOW_SIZE(i));
186
187 win_enable |= (1 << i);
188 win_enable |= 3 << (16 + (2 * i));
189 }
190
191 writel(win_enable, xor_base + WINDOW_BAR_ENABLE(0));
192 writel(win_enable, xor_base + WINDOW_BAR_ENABLE(1));
193 writel(0, xor_base + WINDOW_OVERRIDE_CTRL(0));
194 writel(0, xor_base + WINDOW_OVERRIDE_CTRL(1));
195
196 for (i = 0; i < CONFIG_NR_CPUS; i++) {
197 coherency_wa_buf[i] = kzalloc(PAGE_SIZE, GFP_KERNEL);
198 BUG_ON(!coherency_wa_buf[i]);
199
200 /*
201 * We can't use the DMA mapping API, since we don't
202 * have a valid 'struct device' pointer
203 */
204 coherency_wa_buf_phys[i] =
205 virt_to_phys(coherency_wa_buf[i]);
206 BUG_ON(!coherency_wa_buf_phys[i]);
207
208 /*
209 * Configure the XOR engine for memset operation, with
210 * a 128 bytes block size
211 */
212 writel(0x444, xor_base + XOR_CONFIG(i));
213 writel(128, xor_base + XOR_BLOCK_SIZE(i));
214 writel(coherency_wa_buf_phys[i],
215 xor_base + XOR_DEST_POINTER(i));
216 }
217
218 writel(0x0, xor_base + XOR_INIT_VALUE_LOW);
219 writel(0x0, xor_base + XOR_INIT_VALUE_HIGH);
220
221 coherency_wa_enabled = true;
222}
223
e60304f8
GC
224static inline void mvebu_hwcc_sync_io_barrier(void)
225{
5ab5afd8
TP
226 if (coherency_wa_enabled) {
227 mvebu_hwcc_armada375_sync_io_barrier_wa();
228 return;
229 }
230
e60304f8
GC
231 writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
232 while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
233}
234
235static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
236 unsigned long offset, size_t size,
237 enum dma_data_direction dir,
238 struct dma_attrs *attrs)
239{
240 if (dir != DMA_TO_DEVICE)
241 mvebu_hwcc_sync_io_barrier();
242 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
243}
244
245
246static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
247 size_t size, enum dma_data_direction dir,
248 struct dma_attrs *attrs)
249{
250 if (dir != DMA_TO_DEVICE)
251 mvebu_hwcc_sync_io_barrier();
252}
253
254static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
255 size_t size, enum dma_data_direction dir)
256{
257 if (dir != DMA_TO_DEVICE)
258 mvebu_hwcc_sync_io_barrier();
259}
260
261static struct dma_map_ops mvebu_hwcc_dma_ops = {
262 .alloc = arm_dma_alloc,
263 .free = arm_dma_free,
264 .mmap = arm_dma_mmap,
265 .map_page = mvebu_hwcc_dma_map_page,
266 .unmap_page = mvebu_hwcc_dma_unmap_page,
267 .get_sgtable = arm_dma_get_sgtable,
268 .map_sg = arm_dma_map_sg,
269 .unmap_sg = arm_dma_unmap_sg,
270 .sync_single_for_cpu = mvebu_hwcc_dma_sync,
271 .sync_single_for_device = mvebu_hwcc_dma_sync,
272 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
273 .sync_sg_for_device = arm_dma_sync_sg_for_device,
274 .set_dma_mask = arm_dma_set_mask,
275};
276
b0063aad
TP
277static int mvebu_hwcc_notifier(struct notifier_block *nb,
278 unsigned long event, void *__dev)
e60304f8
GC
279{
280 struct device *dev = __dev;
281
282 if (event != BUS_NOTIFY_ADD_DEVICE)
283 return NOTIFY_DONE;
284 set_dma_ops(dev, &mvebu_hwcc_dma_ops);
285
286 return NOTIFY_OK;
287}
288
b0063aad
TP
289static struct notifier_block mvebu_hwcc_nb = {
290 .notifier_call = mvebu_hwcc_notifier,
e60304f8
GC
291};
292
a728b977
EG
293static struct notifier_block mvebu_hwcc_pci_nb = {
294 .notifier_call = mvebu_hwcc_notifier,
295};
296
924d38f4
TP
297static void __init armada_370_coherency_init(struct device_node *np)
298{
299 struct resource res;
300
301 of_address_to_resource(np, 0, &res);
302 coherency_phys_base = res.start;
303 /*
304 * Ensure secondary CPUs will see the updated value,
305 * which they read before they join the coherency
306 * fabric, and therefore before they are coherent with
307 * the boot CPU cache.
308 */
309 sync_cache_w(&coherency_phys_base);
310 coherency_base = of_iomap(np, 0);
311 coherency_cpu_base = of_iomap(np, 1);
952f4ca7 312 set_cpu_coherent();
924d38f4
TP
313}
314
497a9230
TP
315/*
316 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
317 * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
318 * is needed as a workaround for a deadlock issue between the PCIe
319 * interface and the cache controller.
320 */
321static void __iomem *
322armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
323 unsigned int mtype, void *caller)
324{
325 struct resource pcie_mem;
326
327 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
328
329 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
330 mtype = MT_UNCACHED;
331
332 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
333}
334
d0de9323 335static void __init armada_375_380_coherency_init(struct device_node *np)
77fa4b9a 336{
497a9230
TP
337 struct device_node *cache_dn;
338
77fa4b9a 339 coherency_cpu_base = of_iomap(np, 0);
497a9230
TP
340 arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
341
342 /*
343 * Add the PL310 property "arm,io-coherent". This makes sure the
344 * outer sync operation is not used, which allows to
345 * workaround the system erratum that causes deadlocks when
346 * doing PCIe in an SMP situation on Armada 375 and Armada
347 * 38x.
348 */
349 for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") {
350 struct property *p;
351
352 p = kzalloc(sizeof(*p), GFP_KERNEL);
353 p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
354 of_add_property(cache_dn, p);
355 }
77fa4b9a
TP
356}
357
501f928e 358static int coherency_type(void)
009f1315
GC
359{
360 struct device_node *np;
5fbba080 361 const struct of_device_id *match;
e5535545 362 int type;
009f1315 363
e5535545
TP
364 /*
365 * The coherency fabric is needed:
366 * - For coherency between processors on Armada XP, so only
367 * when SMP is enabled.
368 * - For coherency between the processor and I/O devices, but
369 * this coherency requires many pre-requisites (write
370 * allocate cache policy, shareable pages, SMP bit set) that
371 * are only meant in SMP situations.
372 *
373 * Note that this means that on Armada 370, there is currently
374 * no way to use hardware I/O coherency, because even when
375 * CONFIG_SMP is enabled, is_smp() returns false due to the
376 * Armada 370 being a single-core processor. To lift this
377 * limitation, we would have to find a way to make the cache
378 * policy set to write-allocate (on all Armada SoCs), and to
379 * set the shareable attribute in page tables (on all Armada
380 * SoCs except the Armada 370). Unfortunately, such decisions
381 * are taken very early in the kernel boot process, at a point
382 * where we don't know yet on which SoC we are running.
383
384 */
385 if (!is_smp())
386 return COHERENCY_FABRIC_TYPE_NONE;
924d38f4 387
e5535545
TP
388 np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
389 if (!np)
390 return COHERENCY_FABRIC_TYPE_NONE;
924d38f4 391
e5535545 392 type = (int) match->data;
77fa4b9a 393
e5535545 394 of_node_put(np);
009f1315 395
e5535545 396 return type;
009f1315 397}
865e0527 398
501f928e 399int coherency_available(void)
865e0527 400{
501f928e
TP
401 return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
402}
403
404int __init coherency_init(void)
405{
406 int type = coherency_type();
abe511ac
JZ
407 struct device_node *np;
408
409 np = of_find_matching_node(NULL, of_coherency_table);
501f928e
TP
410
411 if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
412 armada_370_coherency_init(np);
d0de9323
TP
413 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
414 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
415 armada_375_380_coherency_init(np);
501f928e 416
2eb04ae0
TP
417 of_node_put(np);
418
501f928e
TP
419 return 0;
420}
421
422static int __init coherency_late_init(void)
423{
5ab5afd8
TP
424 int type = coherency_type();
425
426 if (type == COHERENCY_FABRIC_TYPE_NONE)
427 return 0;
428
39438567
TP
429 if (type == COHERENCY_FABRIC_TYPE_ARMADA_375) {
430 u32 dev, rev;
431
432 if (mvebu_get_soc_id(&dev, &rev) == 0 &&
433 rev == ARMADA_375_Z1_REV)
434 armada_375_coherency_init_wa();
435 }
5ab5afd8
TP
436
437 bus_register_notifier(&platform_bus_type,
b0063aad 438 &mvebu_hwcc_nb);
5ab5afd8 439
865e0527
TP
440 return 0;
441}
442
443postcore_initcall(coherency_late_init);
b0063aad 444
8828ccc3 445#if IS_ENABLED(CONFIG_PCI)
b0063aad
TP
446static int __init coherency_pci_init(void)
447{
448 if (coherency_available())
449 bus_register_notifier(&pci_bus_type,
a728b977 450 &mvebu_hwcc_pci_nb);
b0063aad
TP
451 return 0;
452}
453
454arch_initcall(coherency_pci_init);
8828ccc3 455#endif
This page took 0.150935 seconds and 5 git commands to generate.