Commit | Line | Data |
---|---|---|
dc35a10f MW |
1 | /* |
2 | * Driver for Marvell NETA network controller Buffer Manager. | |
3 | * | |
4 | * Copyright (C) 2015 Marvell | |
5 | * | |
6 | * Marcin Wojtas <mw@semihalf.com> | |
7 | * | |
8 | * This file is licensed under the terms of the GNU General Public | |
9 | * License version 2. This program is licensed "as is" without any | |
10 | * warranty of any kind, whether express or implied. | |
11 | */ | |
12 | ||
baa11ebc | 13 | #include <linux/clk.h> |
dc35a10f | 14 | #include <linux/genalloc.h> |
baa11ebc GC |
15 | #include <linux/io.h> |
16 | #include <linux/kernel.h> | |
dc35a10f MW |
17 | #include <linux/mbus.h> |
18 | #include <linux/module.h> | |
baa11ebc | 19 | #include <linux/netdevice.h> |
dc35a10f | 20 | #include <linux/of.h> |
baa11ebc GC |
21 | #include <linux/platform_device.h> |
22 | #include <linux/skbuff.h> | |
23 | #include <net/hwbm.h> | |
dc35a10f MW |
24 | #include "mvneta_bm.h" |
25 | ||
26 | #define MVNETA_BM_DRIVER_NAME "mvneta_bm" | |
27 | #define MVNETA_BM_DRIVER_VERSION "1.0" | |
28 | ||
29 | static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data) | |
30 | { | |
31 | writel(data, priv->reg_base + offset); | |
32 | } | |
33 | ||
34 | static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset) | |
35 | { | |
36 | return readl(priv->reg_base + offset); | |
37 | } | |
38 | ||
39 | static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) | |
40 | { | |
41 | u32 val; | |
42 | ||
43 | val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); | |
44 | val |= MVNETA_BM_POOL_ENABLE_MASK; | |
45 | mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); | |
46 | ||
47 | /* Clear BM cause register */ | |
48 | mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); | |
49 | } | |
50 | ||
51 | static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) | |
52 | { | |
53 | u32 val; | |
54 | ||
55 | val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); | |
56 | val &= ~MVNETA_BM_POOL_ENABLE_MASK; | |
57 | mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); | |
58 | } | |
59 | ||
60 | static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask) | |
61 | { | |
62 | u32 val; | |
63 | ||
64 | val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); | |
65 | val |= mask; | |
66 | mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); | |
67 | } | |
68 | ||
69 | static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask) | |
70 | { | |
71 | u32 val; | |
72 | ||
73 | val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); | |
74 | val &= ~mask; | |
75 | mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); | |
76 | } | |
77 | ||
78 | static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, | |
79 | u8 target_id, u8 attr) | |
80 | { | |
81 | u32 val; | |
82 | ||
83 | val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); | |
84 | val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); | |
85 | val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); | |
86 | val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id); | |
87 | val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr); | |
88 | ||
89 | mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); | |
90 | } | |
91 | ||
baa11ebc | 92 | int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) |
dc35a10f | 93 | { |
baa11ebc GC |
94 | struct mvneta_bm_pool *bm_pool = |
95 | (struct mvneta_bm_pool *)hwbm_pool->priv; | |
96 | struct mvneta_bm *priv = bm_pool->priv; | |
dc35a10f MW |
97 | dma_addr_t phys_addr; |
98 | ||
dc35a10f MW |
99 | /* In order to update buf_cookie field of RX descriptor properly, |
100 | * BM hardware expects buf virtual address to be placed in the | |
101 | * first four bytes of mapped buffer. | |
102 | */ | |
103 | *(u32 *)buf = (u32)buf; | |
104 | phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, | |
105 | DMA_FROM_DEVICE); | |
baa11ebc | 106 | if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) |
dc35a10f MW |
107 | return -ENOMEM; |
108 | ||
baa11ebc | 109 | mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr); |
dc35a10f MW |
110 | return 0; |
111 | } | |
baa11ebc | 112 | EXPORT_SYMBOL_GPL(mvneta_bm_construct); |
dc35a10f MW |
113 | |
114 | /* Create pool */ | |
115 | static int mvneta_bm_pool_create(struct mvneta_bm *priv, | |
116 | struct mvneta_bm_pool *bm_pool) | |
117 | { | |
118 | struct platform_device *pdev = priv->pdev; | |
119 | u8 target_id, attr; | |
120 | int size_bytes, err; | |
baa11ebc | 121 | size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size; |
dc35a10f MW |
122 | bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, |
123 | &bm_pool->phys_addr, | |
124 | GFP_KERNEL); | |
125 | if (!bm_pool->virt_addr) | |
126 | return -ENOMEM; | |
127 | ||
128 | if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) { | |
129 | dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, | |
130 | bm_pool->phys_addr); | |
131 | dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", | |
132 | bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN); | |
133 | return -ENOMEM; | |
134 | } | |
135 | ||
136 | err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id, | |
137 | &attr); | |
138 | if (err < 0) { | |
139 | dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, | |
140 | bm_pool->phys_addr); | |
141 | return err; | |
142 | } | |
143 | ||
144 | /* Set pool address */ | |
145 | mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id), | |
146 | bm_pool->phys_addr); | |
147 | ||
148 | mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr); | |
149 | mvneta_bm_pool_enable(priv, bm_pool->id); | |
150 | ||
151 | return 0; | |
152 | } | |
153 | ||
154 | /* Notify the driver that BM pool is being used as specific type and return the | |
155 | * pool pointer on success | |
156 | */ | |
157 | struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, | |
158 | enum mvneta_bm_type type, u8 port_id, | |
159 | int pkt_size) | |
160 | { | |
161 | struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id]; | |
162 | int num, err; | |
163 | ||
164 | if (new_pool->type == MVNETA_BM_LONG && | |
165 | new_pool->port_map != 1 << port_id) { | |
166 | dev_err(&priv->pdev->dev, | |
167 | "long pool cannot be shared by the ports\n"); | |
168 | return NULL; | |
169 | } | |
170 | ||
171 | if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) { | |
172 | dev_err(&priv->pdev->dev, | |
173 | "mixing pools' types between the ports is forbidden\n"); | |
174 | return NULL; | |
175 | } | |
176 | ||
177 | if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT) | |
178 | new_pool->pkt_size = pkt_size; | |
179 | ||
180 | /* Allocate buffers in case BM pool hasn't been used yet */ | |
181 | if (new_pool->type == MVNETA_BM_FREE) { | |
baa11ebc GC |
182 | struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool; |
183 | ||
184 | new_pool->priv = priv; | |
dc35a10f MW |
185 | new_pool->type = type; |
186 | new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); | |
baa11ebc | 187 | hwbm_pool->frag_size = |
dc35a10f MW |
188 | SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + |
189 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
baa11ebc GC |
190 | hwbm_pool->construct = mvneta_bm_construct; |
191 | hwbm_pool->priv = new_pool; | |
dc35a10f MW |
192 | |
193 | /* Create new pool */ | |
194 | err = mvneta_bm_pool_create(priv, new_pool); | |
195 | if (err) { | |
196 | dev_err(&priv->pdev->dev, "fail to create pool %d\n", | |
197 | new_pool->id); | |
198 | return NULL; | |
199 | } | |
200 | ||
201 | /* Allocate buffers for this pool */ | |
baa11ebc GC |
202 | num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); |
203 | if (num != hwbm_pool->size) { | |
dc35a10f | 204 | WARN(1, "pool %d: %d of %d allocated\n", |
baa11ebc | 205 | new_pool->id, num, hwbm_pool->size); |
dc35a10f MW |
206 | return NULL; |
207 | } | |
208 | } | |
209 | ||
210 | return new_pool; | |
211 | } | |
212 | EXPORT_SYMBOL_GPL(mvneta_bm_pool_use); | |
213 | ||
214 | /* Free all buffers from the pool */ | |
215 | void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, | |
216 | u8 port_map) | |
217 | { | |
218 | int i; | |
219 | ||
220 | bm_pool->port_map &= ~port_map; | |
221 | if (bm_pool->port_map) | |
222 | return; | |
223 | ||
224 | mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); | |
225 | ||
baa11ebc | 226 | for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) { |
dc35a10f MW |
227 | dma_addr_t buf_phys_addr; |
228 | u32 *vaddr; | |
229 | ||
230 | /* Get buffer physical address (indirect access) */ | |
231 | buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool); | |
232 | ||
233 | /* Work-around to the problems when destroying the pool, | |
234 | * when it occurs that a read access to BPPI returns 0. | |
235 | */ | |
236 | if (buf_phys_addr == 0) | |
237 | continue; | |
238 | ||
239 | vaddr = phys_to_virt(buf_phys_addr); | |
240 | if (!vaddr) | |
241 | break; | |
242 | ||
243 | dma_unmap_single(&priv->pdev->dev, buf_phys_addr, | |
244 | bm_pool->buf_size, DMA_FROM_DEVICE); | |
baa11ebc | 245 | hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); |
dc35a10f MW |
246 | } |
247 | ||
248 | mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); | |
249 | ||
250 | /* Update BM driver with number of buffers removed from pool */ | |
baa11ebc | 251 | bm_pool->hwbm_pool.buf_num -= i; |
dc35a10f MW |
252 | } |
253 | EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); | |
254 | ||
255 | /* Cleanup pool */ | |
256 | void mvneta_bm_pool_destroy(struct mvneta_bm *priv, | |
257 | struct mvneta_bm_pool *bm_pool, u8 port_map) | |
258 | { | |
baa11ebc | 259 | struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; |
dc35a10f MW |
260 | bm_pool->port_map &= ~port_map; |
261 | if (bm_pool->port_map) | |
262 | return; | |
263 | ||
264 | bm_pool->type = MVNETA_BM_FREE; | |
265 | ||
266 | mvneta_bm_bufs_free(priv, bm_pool, port_map); | |
baa11ebc | 267 | if (hwbm_pool->buf_num) |
dc35a10f MW |
268 | WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); |
269 | ||
270 | if (bm_pool->virt_addr) { | |
baa11ebc GC |
271 | dma_free_coherent(&priv->pdev->dev, |
272 | sizeof(u32) * hwbm_pool->size, | |
dc35a10f MW |
273 | bm_pool->virt_addr, bm_pool->phys_addr); |
274 | bm_pool->virt_addr = NULL; | |
275 | } | |
276 | ||
277 | mvneta_bm_pool_disable(priv, bm_pool->id); | |
278 | } | |
279 | EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy); | |
280 | ||
281 | static void mvneta_bm_pools_init(struct mvneta_bm *priv) | |
282 | { | |
283 | struct device_node *dn = priv->pdev->dev.of_node; | |
284 | struct mvneta_bm_pool *bm_pool; | |
285 | char prop[15]; | |
286 | u32 size; | |
287 | int i; | |
288 | ||
289 | /* Activate BM unit */ | |
290 | mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK); | |
291 | ||
292 | /* Create all pools with maximum size */ | |
293 | for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { | |
294 | bm_pool = &priv->bm_pools[i]; | |
295 | bm_pool->id = i; | |
296 | bm_pool->type = MVNETA_BM_FREE; | |
297 | ||
298 | /* Reset read pointer */ | |
299 | mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0); | |
300 | ||
301 | /* Reset write pointer */ | |
302 | mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0); | |
303 | ||
304 | /* Configure pool size according to DT or use default value */ | |
305 | sprintf(prop, "pool%d,capacity", i); | |
306 | if (of_property_read_u32(dn, prop, &size)) { | |
307 | size = MVNETA_BM_POOL_CAP_DEF; | |
308 | } else if (size > MVNETA_BM_POOL_CAP_MAX) { | |
309 | dev_warn(&priv->pdev->dev, | |
310 | "Illegal pool %d capacity %d, set to %d\n", | |
311 | i, size, MVNETA_BM_POOL_CAP_MAX); | |
312 | size = MVNETA_BM_POOL_CAP_MAX; | |
313 | } else if (size < MVNETA_BM_POOL_CAP_MIN) { | |
314 | dev_warn(&priv->pdev->dev, | |
315 | "Illegal pool %d capacity %d, set to %d\n", | |
316 | i, size, MVNETA_BM_POOL_CAP_MIN); | |
317 | size = MVNETA_BM_POOL_CAP_MIN; | |
318 | } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) { | |
319 | dev_warn(&priv->pdev->dev, | |
320 | "Illegal pool %d capacity %d, round to %d\n", | |
321 | i, size, ALIGN(size, | |
322 | MVNETA_BM_POOL_CAP_ALIGN)); | |
323 | size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); | |
324 | } | |
baa11ebc | 325 | bm_pool->hwbm_pool.size = size; |
dc35a10f MW |
326 | |
327 | mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), | |
baa11ebc | 328 | bm_pool->hwbm_pool.size); |
dc35a10f MW |
329 | |
330 | /* Obtain custom pkt_size from DT */ | |
331 | sprintf(prop, "pool%d,pkt-size", i); | |
332 | if (of_property_read_u32(dn, prop, &bm_pool->pkt_size)) | |
333 | bm_pool->pkt_size = 0; | |
334 | } | |
335 | } | |
336 | ||
337 | static void mvneta_bm_default_set(struct mvneta_bm *priv) | |
338 | { | |
339 | u32 val; | |
340 | ||
341 | /* Mask BM all interrupts */ | |
342 | mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0); | |
343 | ||
344 | /* Clear BM cause register */ | |
345 | mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); | |
346 | ||
347 | /* Set BM configuration register */ | |
348 | val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); | |
349 | ||
350 | /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */ | |
351 | val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK; | |
352 | val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP; | |
353 | mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); | |
354 | } | |
355 | ||
356 | static int mvneta_bm_init(struct mvneta_bm *priv) | |
357 | { | |
358 | mvneta_bm_default_set(priv); | |
359 | ||
360 | /* Allocate and initialize BM pools structures */ | |
361 | priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM, | |
362 | sizeof(struct mvneta_bm_pool), | |
363 | GFP_KERNEL); | |
364 | if (!priv->bm_pools) | |
365 | return -ENOMEM; | |
366 | ||
367 | mvneta_bm_pools_init(priv); | |
368 | ||
369 | return 0; | |
370 | } | |
371 | ||
372 | static int mvneta_bm_get_sram(struct device_node *dn, | |
373 | struct mvneta_bm *priv) | |
374 | { | |
375 | priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0); | |
376 | if (!priv->bppi_pool) | |
377 | return -ENOMEM; | |
378 | ||
379 | priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool, | |
380 | MVNETA_BM_BPPI_SIZE, | |
381 | &priv->bppi_phys_addr); | |
382 | if (!priv->bppi_virt_addr) | |
383 | return -ENOMEM; | |
384 | ||
385 | return 0; | |
386 | } | |
387 | ||
388 | static void mvneta_bm_put_sram(struct mvneta_bm *priv) | |
389 | { | |
390 | gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr, | |
391 | MVNETA_BM_BPPI_SIZE); | |
392 | } | |
393 | ||
394 | static int mvneta_bm_probe(struct platform_device *pdev) | |
395 | { | |
396 | struct device_node *dn = pdev->dev.of_node; | |
397 | struct mvneta_bm *priv; | |
398 | struct resource *res; | |
399 | int err; | |
400 | ||
401 | priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); | |
402 | if (!priv) | |
403 | return -ENOMEM; | |
404 | ||
405 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
406 | priv->reg_base = devm_ioremap_resource(&pdev->dev, res); | |
407 | if (IS_ERR(priv->reg_base)) | |
408 | return PTR_ERR(priv->reg_base); | |
409 | ||
410 | priv->clk = devm_clk_get(&pdev->dev, NULL); | |
411 | if (IS_ERR(priv->clk)) | |
412 | return PTR_ERR(priv->clk); | |
413 | err = clk_prepare_enable(priv->clk); | |
414 | if (err < 0) | |
415 | return err; | |
416 | ||
417 | err = mvneta_bm_get_sram(dn, priv); | |
418 | if (err < 0) { | |
419 | dev_err(&pdev->dev, "failed to allocate internal memory\n"); | |
420 | goto err_clk; | |
421 | } | |
422 | ||
423 | priv->pdev = pdev; | |
424 | ||
425 | /* Initialize buffer manager internals */ | |
426 | err = mvneta_bm_init(priv); | |
427 | if (err < 0) { | |
428 | dev_err(&pdev->dev, "failed to initialize controller\n"); | |
429 | goto err_sram; | |
430 | } | |
431 | ||
432 | dn->data = priv; | |
433 | platform_set_drvdata(pdev, priv); | |
434 | ||
435 | dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n"); | |
436 | ||
437 | return 0; | |
438 | ||
439 | err_sram: | |
440 | mvneta_bm_put_sram(priv); | |
441 | err_clk: | |
442 | clk_disable_unprepare(priv->clk); | |
443 | return err; | |
444 | } | |
445 | ||
446 | static int mvneta_bm_remove(struct platform_device *pdev) | |
447 | { | |
448 | struct mvneta_bm *priv = platform_get_drvdata(pdev); | |
449 | u8 all_ports_map = 0xff; | |
450 | int i = 0; | |
451 | ||
452 | for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { | |
453 | struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i]; | |
454 | ||
455 | mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map); | |
456 | } | |
457 | ||
458 | mvneta_bm_put_sram(priv); | |
459 | ||
460 | /* Dectivate BM unit */ | |
461 | mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); | |
462 | ||
463 | clk_disable_unprepare(priv->clk); | |
464 | ||
465 | return 0; | |
466 | } | |
467 | ||
468 | static const struct of_device_id mvneta_bm_match[] = { | |
469 | { .compatible = "marvell,armada-380-neta-bm" }, | |
470 | { } | |
471 | }; | |
472 | MODULE_DEVICE_TABLE(of, mvneta_bm_match); | |
473 | ||
474 | static struct platform_driver mvneta_bm_driver = { | |
475 | .probe = mvneta_bm_probe, | |
476 | .remove = mvneta_bm_remove, | |
477 | .driver = { | |
478 | .name = MVNETA_BM_DRIVER_NAME, | |
479 | .of_match_table = mvneta_bm_match, | |
480 | }, | |
481 | }; | |
482 | ||
483 | module_platform_driver(mvneta_bm_driver); | |
484 | ||
485 | MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com"); | |
486 | MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); | |
487 | MODULE_LICENSE("GPL v2"); |