2 * Driver for Marvell NETA network controller Buffer Manager.
4 * Copyright (C) 2015 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/genalloc.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/mbus.h>
19 #include <linux/module.h>
22 #include <linux/clk.h>
23 #include "mvneta_bm.h"
25 #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
26 #define MVNETA_BM_DRIVER_VERSION "1.0"
28 static void mvneta_bm_write(struct mvneta_bm
*priv
, u32 offset
, u32 data
)
30 writel(data
, priv
->reg_base
+ offset
);
33 static u32
mvneta_bm_read(struct mvneta_bm
*priv
, u32 offset
)
35 return readl(priv
->reg_base
+ offset
);
38 static void mvneta_bm_pool_enable(struct mvneta_bm
*priv
, int pool_id
)
42 val
= mvneta_bm_read(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
));
43 val
|= MVNETA_BM_POOL_ENABLE_MASK
;
44 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
), val
);
46 /* Clear BM cause register */
47 mvneta_bm_write(priv
, MVNETA_BM_INTR_CAUSE_REG
, 0);
50 static void mvneta_bm_pool_disable(struct mvneta_bm
*priv
, int pool_id
)
54 val
= mvneta_bm_read(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
));
55 val
&= ~MVNETA_BM_POOL_ENABLE_MASK
;
56 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
), val
);
59 static inline void mvneta_bm_config_set(struct mvneta_bm
*priv
, u32 mask
)
63 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
65 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
68 static inline void mvneta_bm_config_clear(struct mvneta_bm
*priv
, u32 mask
)
72 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
74 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
77 static void mvneta_bm_pool_target_set(struct mvneta_bm
*priv
, int pool_id
,
78 u8 target_id
, u8 attr
)
82 val
= mvneta_bm_read(priv
, MVNETA_BM_XBAR_POOL_REG(pool_id
));
83 val
&= ~MVNETA_BM_TARGET_ID_MASK(pool_id
);
84 val
&= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id
);
85 val
|= MVNETA_BM_TARGET_ID_VAL(pool_id
, target_id
);
86 val
|= MVNETA_BM_XBAR_ATTR_VAL(pool_id
, attr
);
88 mvneta_bm_write(priv
, MVNETA_BM_XBAR_POOL_REG(pool_id
), val
);
91 /* Allocate skb for BM pool */
92 void *mvneta_buf_alloc(struct mvneta_bm
*priv
, struct mvneta_bm_pool
*bm_pool
,
93 dma_addr_t
*buf_phys_addr
)
98 buf
= mvneta_frag_alloc(bm_pool
->frag_size
);
102 /* In order to update buf_cookie field of RX descriptor properly,
103 * BM hardware expects buf virtual address to be placed in the
104 * first four bytes of mapped buffer.
106 *(u32
*)buf
= (u32
)buf
;
107 phys_addr
= dma_map_single(&priv
->pdev
->dev
, buf
, bm_pool
->buf_size
,
109 if (unlikely(dma_mapping_error(&priv
->pdev
->dev
, phys_addr
))) {
110 mvneta_frag_free(bm_pool
->frag_size
, buf
);
113 *buf_phys_addr
= phys_addr
;
118 /* Refill processing for HW buffer management */
119 int mvneta_bm_pool_refill(struct mvneta_bm
*priv
,
120 struct mvneta_bm_pool
*bm_pool
)
122 dma_addr_t buf_phys_addr
;
125 buf
= mvneta_buf_alloc(priv
, bm_pool
, &buf_phys_addr
);
129 mvneta_bm_pool_put_bp(priv
, bm_pool
, buf_phys_addr
);
133 EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill
);
135 /* Allocate buffers for the pool */
136 int mvneta_bm_bufs_add(struct mvneta_bm
*priv
, struct mvneta_bm_pool
*bm_pool
,
141 if (bm_pool
->buf_num
== bm_pool
->size
) {
142 dev_dbg(&priv
->pdev
->dev
, "pool %d already filled\n",
144 return bm_pool
->buf_num
;
148 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
149 dev_err(&priv
->pdev
->dev
,
150 "cannot allocate %d buffers for pool %d\n",
151 buf_num
, bm_pool
->id
);
155 for (i
= 0; i
< buf_num
; i
++) {
156 err
= mvneta_bm_pool_refill(priv
, bm_pool
);
161 /* Update BM driver with number of buffers added to pool */
162 bm_pool
->buf_num
+= i
;
164 dev_dbg(&priv
->pdev
->dev
,
165 "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n",
166 bm_pool
->type
== MVNETA_BM_SHORT
? "short" : "long",
167 bm_pool
->id
, bm_pool
->pkt_size
, bm_pool
->buf_size
,
170 dev_dbg(&priv
->pdev
->dev
,
171 "%s pool %d: %d of %d buffers added\n",
172 bm_pool
->type
== MVNETA_BM_SHORT
? "short" : "long",
173 bm_pool
->id
, i
, buf_num
);
177 EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add
);
180 static int mvneta_bm_pool_create(struct mvneta_bm
*priv
,
181 struct mvneta_bm_pool
*bm_pool
)
183 struct platform_device
*pdev
= priv
->pdev
;
187 size_bytes
= sizeof(u32
) * bm_pool
->size
;
188 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, size_bytes
,
191 if (!bm_pool
->virt_addr
)
194 if (!IS_ALIGNED((u32
)bm_pool
->virt_addr
, MVNETA_BM_POOL_PTR_ALIGN
)) {
195 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
197 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
198 bm_pool
->id
, MVNETA_BM_POOL_PTR_ALIGN
);
202 err
= mvebu_mbus_get_dram_win_info(bm_pool
->phys_addr
, &target_id
,
205 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
210 /* Set pool address */
211 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(bm_pool
->id
),
214 mvneta_bm_pool_target_set(priv
, bm_pool
->id
, target_id
, attr
);
215 mvneta_bm_pool_enable(priv
, bm_pool
->id
);
220 /* Notify the driver that BM pool is being used as specific type and return the
221 * pool pointer on success
223 struct mvneta_bm_pool
*mvneta_bm_pool_use(struct mvneta_bm
*priv
, u8 pool_id
,
224 enum mvneta_bm_type type
, u8 port_id
,
227 struct mvneta_bm_pool
*new_pool
= &priv
->bm_pools
[pool_id
];
230 if (new_pool
->type
== MVNETA_BM_LONG
&&
231 new_pool
->port_map
!= 1 << port_id
) {
232 dev_err(&priv
->pdev
->dev
,
233 "long pool cannot be shared by the ports\n");
237 if (new_pool
->type
== MVNETA_BM_SHORT
&& new_pool
->type
!= type
) {
238 dev_err(&priv
->pdev
->dev
,
239 "mixing pools' types between the ports is forbidden\n");
243 if (new_pool
->pkt_size
== 0 || type
!= MVNETA_BM_SHORT
)
244 new_pool
->pkt_size
= pkt_size
;
246 /* Allocate buffers in case BM pool hasn't been used yet */
247 if (new_pool
->type
== MVNETA_BM_FREE
) {
248 new_pool
->type
= type
;
249 new_pool
->buf_size
= MVNETA_RX_BUF_SIZE(new_pool
->pkt_size
);
250 new_pool
->frag_size
=
251 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool
->pkt_size
)) +
252 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
254 /* Create new pool */
255 err
= mvneta_bm_pool_create(priv
, new_pool
);
257 dev_err(&priv
->pdev
->dev
, "fail to create pool %d\n",
262 /* Allocate buffers for this pool */
263 num
= mvneta_bm_bufs_add(priv
, new_pool
, new_pool
->size
);
264 if (num
!= new_pool
->size
) {
265 WARN(1, "pool %d: %d of %d allocated\n",
266 new_pool
->id
, num
, new_pool
->size
);
273 EXPORT_SYMBOL_GPL(mvneta_bm_pool_use
);
275 /* Free all buffers from the pool */
276 void mvneta_bm_bufs_free(struct mvneta_bm
*priv
, struct mvneta_bm_pool
*bm_pool
,
281 bm_pool
->port_map
&= ~port_map
;
282 if (bm_pool
->port_map
)
285 mvneta_bm_config_set(priv
, MVNETA_BM_EMPTY_LIMIT_MASK
);
287 for (i
= 0; i
< bm_pool
->buf_num
; i
++) {
288 dma_addr_t buf_phys_addr
;
291 /* Get buffer physical address (indirect access) */
292 buf_phys_addr
= mvneta_bm_pool_get_bp(priv
, bm_pool
);
294 /* Work-around to the problems when destroying the pool,
295 * when it occurs that a read access to BPPI returns 0.
297 if (buf_phys_addr
== 0)
300 vaddr
= phys_to_virt(buf_phys_addr
);
304 dma_unmap_single(&priv
->pdev
->dev
, buf_phys_addr
,
305 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
306 mvneta_frag_free(bm_pool
->frag_size
, vaddr
);
309 mvneta_bm_config_clear(priv
, MVNETA_BM_EMPTY_LIMIT_MASK
);
311 /* Update BM driver with number of buffers removed from pool */
312 bm_pool
->buf_num
-= i
;
314 EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free
);
317 void mvneta_bm_pool_destroy(struct mvneta_bm
*priv
,
318 struct mvneta_bm_pool
*bm_pool
, u8 port_map
)
320 bm_pool
->port_map
&= ~port_map
;
321 if (bm_pool
->port_map
)
324 bm_pool
->type
= MVNETA_BM_FREE
;
326 mvneta_bm_bufs_free(priv
, bm_pool
, port_map
);
327 if (bm_pool
->buf_num
)
328 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
330 if (bm_pool
->virt_addr
) {
331 dma_free_coherent(&priv
->pdev
->dev
, sizeof(u32
) * bm_pool
->size
,
332 bm_pool
->virt_addr
, bm_pool
->phys_addr
);
333 bm_pool
->virt_addr
= NULL
;
336 mvneta_bm_pool_disable(priv
, bm_pool
->id
);
338 EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy
);
340 static void mvneta_bm_pools_init(struct mvneta_bm
*priv
)
342 struct device_node
*dn
= priv
->pdev
->dev
.of_node
;
343 struct mvneta_bm_pool
*bm_pool
;
348 /* Activate BM unit */
349 mvneta_bm_write(priv
, MVNETA_BM_COMMAND_REG
, MVNETA_BM_START_MASK
);
351 /* Create all pools with maximum size */
352 for (i
= 0; i
< MVNETA_BM_POOLS_NUM
; i
++) {
353 bm_pool
= &priv
->bm_pools
[i
];
355 bm_pool
->type
= MVNETA_BM_FREE
;
357 /* Reset read pointer */
358 mvneta_bm_write(priv
, MVNETA_BM_POOL_READ_PTR_REG(i
), 0);
360 /* Reset write pointer */
361 mvneta_bm_write(priv
, MVNETA_BM_POOL_WRITE_PTR_REG(i
), 0);
363 /* Configure pool size according to DT or use default value */
364 sprintf(prop
, "pool%d,capacity", i
);
365 if (of_property_read_u32(dn
, prop
, &size
)) {
366 size
= MVNETA_BM_POOL_CAP_DEF
;
367 } else if (size
> MVNETA_BM_POOL_CAP_MAX
) {
368 dev_warn(&priv
->pdev
->dev
,
369 "Illegal pool %d capacity %d, set to %d\n",
370 i
, size
, MVNETA_BM_POOL_CAP_MAX
);
371 size
= MVNETA_BM_POOL_CAP_MAX
;
372 } else if (size
< MVNETA_BM_POOL_CAP_MIN
) {
373 dev_warn(&priv
->pdev
->dev
,
374 "Illegal pool %d capacity %d, set to %d\n",
375 i
, size
, MVNETA_BM_POOL_CAP_MIN
);
376 size
= MVNETA_BM_POOL_CAP_MIN
;
377 } else if (!IS_ALIGNED(size
, MVNETA_BM_POOL_CAP_ALIGN
)) {
378 dev_warn(&priv
->pdev
->dev
,
379 "Illegal pool %d capacity %d, round to %d\n",
381 MVNETA_BM_POOL_CAP_ALIGN
));
382 size
= ALIGN(size
, MVNETA_BM_POOL_CAP_ALIGN
);
384 bm_pool
->size
= size
;
386 mvneta_bm_write(priv
, MVNETA_BM_POOL_SIZE_REG(i
),
389 /* Obtain custom pkt_size from DT */
390 sprintf(prop
, "pool%d,pkt-size", i
);
391 if (of_property_read_u32(dn
, prop
, &bm_pool
->pkt_size
))
392 bm_pool
->pkt_size
= 0;
396 static void mvneta_bm_default_set(struct mvneta_bm
*priv
)
400 /* Mask BM all interrupts */
401 mvneta_bm_write(priv
, MVNETA_BM_INTR_MASK_REG
, 0);
403 /* Clear BM cause register */
404 mvneta_bm_write(priv
, MVNETA_BM_INTR_CAUSE_REG
, 0);
406 /* Set BM configuration register */
407 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
409 /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
410 val
&= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK
;
411 val
|= MVNETA_BM_MAX_IN_BURST_SIZE_16BP
;
412 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
415 static int mvneta_bm_init(struct mvneta_bm
*priv
)
417 mvneta_bm_default_set(priv
);
419 /* Allocate and initialize BM pools structures */
420 priv
->bm_pools
= devm_kcalloc(&priv
->pdev
->dev
, MVNETA_BM_POOLS_NUM
,
421 sizeof(struct mvneta_bm_pool
),
426 mvneta_bm_pools_init(priv
);
431 static int mvneta_bm_get_sram(struct device_node
*dn
,
432 struct mvneta_bm
*priv
)
434 priv
->bppi_pool
= of_gen_pool_get(dn
, "internal-mem", 0);
435 if (!priv
->bppi_pool
)
438 priv
->bppi_virt_addr
= gen_pool_dma_alloc(priv
->bppi_pool
,
440 &priv
->bppi_phys_addr
);
441 if (!priv
->bppi_virt_addr
)
447 static void mvneta_bm_put_sram(struct mvneta_bm
*priv
)
449 gen_pool_free(priv
->bppi_pool
, priv
->bppi_phys_addr
,
450 MVNETA_BM_BPPI_SIZE
);
453 static int mvneta_bm_probe(struct platform_device
*pdev
)
455 struct device_node
*dn
= pdev
->dev
.of_node
;
456 struct mvneta_bm
*priv
;
457 struct resource
*res
;
460 priv
= devm_kzalloc(&pdev
->dev
, sizeof(struct mvneta_bm
), GFP_KERNEL
);
464 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
465 priv
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
466 if (IS_ERR(priv
->reg_base
))
467 return PTR_ERR(priv
->reg_base
);
469 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
470 if (IS_ERR(priv
->clk
))
471 return PTR_ERR(priv
->clk
);
472 err
= clk_prepare_enable(priv
->clk
);
476 err
= mvneta_bm_get_sram(dn
, priv
);
478 dev_err(&pdev
->dev
, "failed to allocate internal memory\n");
484 /* Initialize buffer manager internals */
485 err
= mvneta_bm_init(priv
);
487 dev_err(&pdev
->dev
, "failed to initialize controller\n");
492 platform_set_drvdata(pdev
, priv
);
494 dev_info(&pdev
->dev
, "Buffer Manager for network controller enabled\n");
499 mvneta_bm_put_sram(priv
);
501 clk_disable_unprepare(priv
->clk
);
505 static int mvneta_bm_remove(struct platform_device
*pdev
)
507 struct mvneta_bm
*priv
= platform_get_drvdata(pdev
);
508 u8 all_ports_map
= 0xff;
511 for (i
= 0; i
< MVNETA_BM_POOLS_NUM
; i
++) {
512 struct mvneta_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
514 mvneta_bm_pool_destroy(priv
, bm_pool
, all_ports_map
);
517 mvneta_bm_put_sram(priv
);
519 /* Dectivate BM unit */
520 mvneta_bm_write(priv
, MVNETA_BM_COMMAND_REG
, MVNETA_BM_STOP_MASK
);
522 clk_disable_unprepare(priv
->clk
);
527 static const struct of_device_id mvneta_bm_match
[] = {
528 { .compatible
= "marvell,armada-380-neta-bm" },
531 MODULE_DEVICE_TABLE(of
, mvneta_bm_match
);
533 static struct platform_driver mvneta_bm_driver
= {
534 .probe
= mvneta_bm_probe
,
535 .remove
= mvneta_bm_remove
,
537 .name
= MVNETA_BM_DRIVER_NAME
,
538 .of_match_table
= mvneta_bm_match
,
542 module_platform_driver(mvneta_bm_driver
);
544 MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
545 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
546 MODULE_LICENSE("GPL v2");