2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/cdev.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <asm/cacheflush.h>
17 #include <linux/platform_device.h>
19 #include <linux/of_address.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_irq.h>
22 #include <linux/spinlock.h>
24 #include "hns_dsaf_main.h"
25 #include "hns_dsaf_ppe.h"
26 #include "hns_dsaf_rcb.h"
28 #define RCB_COMMON_REG_OFFSET 0x80000
32 #define RCB_RESET_WAIT_TIMES 30
33 #define RCB_RESET_TRY_TIMES 10
36 *hns_rcb_wait_fbd_clean - clean fbd
37 *@qs: ring struct pointer array
41 void hns_rcb_wait_fbd_clean(struct hnae_queue
**qs
, int q_num
, u32 flag
)
46 for (wait_cnt
= i
= 0; i
< q_num
; wait_cnt
++) {
47 usleep_range(200, 300);
49 if (flag
& RCB_INT_FLAG_TX
)
50 fbd_num
+= dsaf_read_dev(qs
[i
],
51 RCB_RING_TX_RING_FBDNUM_REG
);
52 if (flag
& RCB_INT_FLAG_RX
)
53 fbd_num
+= dsaf_read_dev(qs
[i
],
54 RCB_RING_RX_RING_FBDNUM_REG
);
57 if (wait_cnt
>= 10000)
62 dev_err(qs
[i
]->handle
->owner_dev
,
63 "queue(%d) wait fbd(%d) clean fail!!\n", i
, fbd_num
);
67 *hns_rcb_reset_ring_hw - ring reset
68 *@q: ring struct pointer
70 void hns_rcb_reset_ring_hw(struct hnae_queue
*q
)
78 while (try_cnt
++ < RCB_RESET_TRY_TIMES
) {
79 usleep_range(100, 200);
80 tx_fbd_num
= dsaf_read_dev(q
, RCB_RING_TX_RING_FBDNUM_REG
);
84 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, 0);
86 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
89 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
92 while (!could_ret
&& (wait_cnt
< RCB_RESET_WAIT_TIMES
)) {
93 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
95 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
98 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
103 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
109 if (try_cnt
>= RCB_RESET_TRY_TIMES
)
110 dev_err(q
->dev
->dev
, "port%d reset ring fail\n",
111 hns_ae_get_vf_cb(q
->handle
)->port_index
);
115 *hns_rcb_int_ctrl_hw - rcb irq enable control
116 *@q: hnae queue struct pointer
117 *@flag:ring flag tx or rx
120 void hns_rcb_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
122 u32 int_mask_en
= !!mask
;
124 if (flag
& RCB_INT_FLAG_TX
) {
125 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
126 dsaf_write_dev(q
, RCB_RING_INTMSK_TX_OVERTIME_REG
,
130 if (flag
& RCB_INT_FLAG_RX
) {
131 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
132 dsaf_write_dev(q
, RCB_RING_INTMSK_RX_OVERTIME_REG
,
137 void hns_rcb_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
139 if (flag
& RCB_INT_FLAG_TX
) {
140 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_RING_REG
, 1);
141 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_OVERTIME_REG
, 1);
144 if (flag
& RCB_INT_FLAG_RX
) {
145 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_RING_REG
, 1);
146 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_OVERTIME_REG
, 1);
150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
152 u32 int_mask_en
= !!mask
;
154 if (flag
& RCB_INT_FLAG_TX
)
155 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
157 if (flag
& RCB_INT_FLAG_RX
)
158 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
161 void hns_rcbv2_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
163 if (flag
& RCB_INT_FLAG_TX
)
164 dsaf_write_dev(q
, RCBV2_TX_RING_INT_STS_REG
, 1);
166 if (flag
& RCB_INT_FLAG_RX
)
167 dsaf_write_dev(q
, RCBV2_RX_RING_INT_STS_REG
, 1);
171 *hns_rcb_ring_enable_hw - enable ring
174 void hns_rcb_ring_enable_hw(struct hnae_queue
*q
, u32 val
)
176 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, !!val
);
179 void hns_rcb_start(struct hnae_queue
*q
, u32 val
)
181 hns_rcb_ring_enable_hw(q
, val
);
185 *hns_rcb_common_init_commit_hw - make rcb common init completed
186 *@rcb_common: rcb common device
188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb
*rcb_common
)
190 wmb(); /* Sync point before breakpoint */
191 dsaf_write_dev(rcb_common
, RCB_COM_CFG_SYS_FSH_REG
, 1);
192 wmb(); /* Sync point after breakpoint */
196 *hns_rcb_ring_init - init rcb ring
197 *@ring_pair: ring pair control block
198 *@ring_type: ring type, RX_RING or TX_RING
200 static void hns_rcb_ring_init(struct ring_pair_cb
*ring_pair
, int ring_type
)
202 struct hnae_queue
*q
= &ring_pair
->q
;
203 struct rcb_common_cb
*rcb_common
= ring_pair
->rcb_common
;
204 u32 bd_size_type
= rcb_common
->dsaf_dev
->buf_size_type
;
205 struct hnae_ring
*ring
=
206 (ring_type
== RX_RING
) ? &q
->rx_ring
: &q
->tx_ring
;
207 dma_addr_t dma
= ring
->desc_dma_addr
;
209 if (ring_type
== RX_RING
) {
210 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_L_REG
,
212 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_H_REG
,
213 (u32
)((dma
>> 31) >> 1));
215 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_LEN_REG
,
217 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_NUM_REG
,
218 ring_pair
->port_id_in_comm
);
219 dsaf_write_dev(q
, RCB_RING_RX_RING_PKTLINE_REG
,
220 ring_pair
->port_id_in_comm
);
222 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_L_REG
,
224 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_H_REG
,
225 (u32
)((dma
>> 31) >> 1));
227 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_LEN_REG
,
229 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_NUM_REG
,
230 ring_pair
->port_id_in_comm
);
231 dsaf_write_dev(q
, RCB_RING_TX_RING_PKTLINE_REG
,
232 ring_pair
->port_id_in_comm
);
237 *hns_rcb_init_hw - init rcb hardware
240 void hns_rcb_init_hw(struct ring_pair_cb
*ring
)
242 hns_rcb_ring_init(ring
, RX_RING
);
243 hns_rcb_ring_init(ring
, TX_RING
);
247 *hns_rcb_set_port_desc_cnt - set rcb port description num
248 *@rcb_common: rcb_common device
249 *@port_idx:port index
252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb
*rcb_common
,
253 u32 port_idx
, u32 desc_cnt
)
255 dsaf_write_dev(rcb_common
, RCB_CFG_BD_NUM_REG
+ port_idx
* 4,
259 static void hns_rcb_set_port_timeout(
260 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
262 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
))
263 dsaf_write_dev(rcb_common
, RCB_CFG_OVERTIME_REG
,
264 timeout
* HNS_RCB_CLK_FREQ_MHZ
);
266 dsaf_write_dev(rcb_common
,
267 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4,
271 static int hns_rcb_common_get_port_num(struct rcb_common_cb
*rcb_common
)
273 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
274 return HNS_RCB_SERVICE_NW_ENGINE_NUM
;
276 return HNS_RCB_DEBUG_NW_ENGINE_NUM
;
279 /*clr rcb comm exception irq**/
280 static void hns_rcb_comm_exc_irq_en(
281 struct rcb_common_cb
*rcb_common
, int en
)
283 u32 clr_vlue
= 0xfffffffful
;
284 u32 msk_vlue
= en
? 0 : 0xfffffffful
;
287 dsaf_write_dev(rcb_common
, RCB_COM_INTSTS_ECC_ERR_REG
, clr_vlue
);
289 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_RING_STS
, clr_vlue
);
291 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_BD_RINT_STS
, clr_vlue
);
293 dsaf_write_dev(rcb_common
, RCB_COM_RINT_TX_PKT_REG
, clr_vlue
);
294 dsaf_write_dev(rcb_common
, RCB_COM_AXI_ERR_STS
, clr_vlue
);
297 dsaf_write_dev(rcb_common
, RCB_COM_INTMASK_ECC_ERR_REG
, msk_vlue
);
299 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_RING
, msk_vlue
);
301 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
302 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_BD
, msk_vlue
| 2);
304 dsaf_write_dev(rcb_common
, RCB_COM_INTMSK_TX_PKT_REG
, msk_vlue
);
305 dsaf_write_dev(rcb_common
, RCB_COM_AXI_WR_ERR_INTMASK
, msk_vlue
);
309 *hns_rcb_common_init_hw - init rcb common hardware
310 *@rcb_common: rcb_common device
311 *retuen 0 - success , negative --fail
313 int hns_rcb_common_init_hw(struct rcb_common_cb
*rcb_common
)
317 int port_num
= hns_rcb_common_get_port_num(rcb_common
);
319 hns_rcb_comm_exc_irq_en(rcb_common
, 0);
321 reg_val
= dsaf_read_dev(rcb_common
, RCB_COM_CFG_INIT_FLAG_REG
);
322 if (0x1 != (reg_val
& 0x1)) {
323 dev_err(rcb_common
->dsaf_dev
->dev
,
324 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val
);
328 for (i
= 0; i
< port_num
; i
++) {
329 hns_rcb_set_port_desc_cnt(rcb_common
, i
, rcb_common
->desc_num
);
330 (void)hns_rcb_set_coalesced_frames(
331 rcb_common
, i
, HNS_RCB_DEF_COALESCED_FRAMES
);
332 hns_rcb_set_port_timeout(
333 rcb_common
, i
, HNS_RCB_DEF_COALESCED_USECS
);
336 dsaf_write_dev(rcb_common
, RCB_COM_CFG_ENDIAN_REG
,
337 HNS_RCB_COMMON_ENDIAN
);
339 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
340 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FNA_REG
, 0x0);
341 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FA_REG
, 0x1);
343 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
344 RCB_COM_CFG_FNA_B
, false);
345 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
346 RCB_COM_CFG_FA_B
, true);
347 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_TSO_MODE_REG
,
348 RCB_COM_TSO_MODE_B
, HNS_TSO_MODE_8BD_32K
);
354 int hns_rcb_buf_size2type(u32 buf_size
)
360 bd_size_type
= HNS_BD_SIZE_512_TYPE
;
363 bd_size_type
= HNS_BD_SIZE_1024_TYPE
;
366 bd_size_type
= HNS_BD_SIZE_2048_TYPE
;
369 bd_size_type
= HNS_BD_SIZE_4096_TYPE
;
372 bd_size_type
= -EINVAL
;
378 static void hns_rcb_ring_get_cfg(struct hnae_queue
*q
, int ring_type
)
380 struct hnae_ring
*ring
;
381 struct rcb_common_cb
*rcb_common
;
382 struct ring_pair_cb
*ring_pair_cb
;
384 u16 desc_num
, mdnum_ppkt
;
385 bool irq_idx
, is_ver1
;
387 ring_pair_cb
= container_of(q
, struct ring_pair_cb
, q
);
388 is_ver1
= AE_IS_VER1(ring_pair_cb
->rcb_common
->dsaf_dev
->dsaf_ver
);
389 if (ring_type
== RX_RING
) {
391 ring
->io_base
= ring_pair_cb
->q
.io_base
;
392 irq_idx
= HNS_RCB_IRQ_IDX_RX
;
393 mdnum_ppkt
= HNS_RCB_RING_MAX_BD_PER_PKT
;
396 ring
->io_base
= (u8 __iomem
*)ring_pair_cb
->q
.io_base
+
397 HNS_RCB_TX_REG_OFFSET
;
398 irq_idx
= HNS_RCB_IRQ_IDX_TX
;
399 mdnum_ppkt
= is_ver1
? HNS_RCB_RING_MAX_TXBD_PER_PKT
:
400 HNS_RCBV2_RING_MAX_TXBD_PER_PKT
;
403 rcb_common
= ring_pair_cb
->rcb_common
;
404 buf_size
= rcb_common
->dsaf_dev
->buf_size
;
405 desc_num
= rcb_common
->dsaf_dev
->desc_num
;
408 ring
->desc_cb
= NULL
;
410 ring
->irq
= ring_pair_cb
->virq
[irq_idx
];
411 ring
->desc_dma_addr
= 0;
413 ring
->buf_size
= buf_size
;
414 ring
->desc_num
= desc_num
;
415 ring
->max_desc_num_per_pkt
= mdnum_ppkt
;
416 ring
->max_raw_data_sz_per_desc
= HNS_RCB_MAX_PKT_SIZE
;
417 ring
->max_pkt_size
= HNS_RCB_MAX_PKT_SIZE
;
418 ring
->next_to_use
= 0;
419 ring
->next_to_clean
= 0;
422 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb
*ring_pair_cb
)
424 ring_pair_cb
->q
.handle
= NULL
;
426 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, RX_RING
);
427 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, TX_RING
);
430 static int hns_rcb_get_port_in_comm(
431 struct rcb_common_cb
*rcb_common
, int ring_idx
)
434 return ring_idx
/ (rcb_common
->max_q_per_vf
* rcb_common
->max_vfn
);
437 #define SERVICE_RING_IRQ_IDX(v1) \
438 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
439 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb
*rcb_common
)
441 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
443 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
444 return SERVICE_RING_IRQ_IDX(is_ver1
);
446 return HNS_DEBUG_RING_IRQ_IDX
;
449 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
450 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
452 *hns_rcb_get_cfg - get rcb config
453 *@rcb_common: rcb common device
455 void hns_rcb_get_cfg(struct rcb_common_cb
*rcb_common
)
457 struct ring_pair_cb
*ring_pair_cb
;
459 u32 ring_num
= rcb_common
->ring_num
;
460 int base_irq_idx
= hns_rcb_get_base_irq_idx(rcb_common
);
461 struct platform_device
*pdev
=
462 to_platform_device(rcb_common
->dsaf_dev
->dev
);
463 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
465 for (i
= 0; i
< ring_num
; i
++) {
466 ring_pair_cb
= &rcb_common
->ring_pair_cb
[i
];
467 ring_pair_cb
->rcb_common
= rcb_common
;
468 ring_pair_cb
->dev
= rcb_common
->dsaf_dev
->dev
;
469 ring_pair_cb
->index
= i
;
470 ring_pair_cb
->q
.io_base
=
471 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->io_base
, i
);
472 ring_pair_cb
->port_id_in_comm
=
473 hns_rcb_get_port_in_comm(rcb_common
, i
);
474 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_TX
] =
475 is_ver1
? platform_get_irq(pdev
, base_irq_idx
+ i
* 2) :
476 platform_get_irq(pdev
, base_irq_idx
+ i
* 3 + 1);
477 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_RX
] =
478 is_ver1
? platform_get_irq(pdev
, base_irq_idx
+ i
* 2 + 1) :
479 platform_get_irq(pdev
, base_irq_idx
+ i
* 3);
480 ring_pair_cb
->q
.phy_base
=
481 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->phy_base
, i
);
482 hns_rcb_ring_pair_get_cfg(ring_pair_cb
);
487 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
488 *@rcb_common: rcb_common device
489 *@port_idx:port id in comm
491 *Returns: coalesced_frames
493 u32
hns_rcb_get_coalesced_frames(
494 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
496 return dsaf_read_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4);
500 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
501 *@rcb_common: rcb_common device
502 *@port_idx:port id in comm
506 u32
hns_rcb_get_coalesce_usecs(
507 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
509 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
))
510 return dsaf_read_dev(rcb_common
, RCB_CFG_OVERTIME_REG
) /
511 HNS_RCB_CLK_FREQ_MHZ
;
513 return dsaf_read_dev(rcb_common
,
514 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4);
518 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
519 *@rcb_common: rcb_common device
520 *@port_idx:port id in comm
521 *@timeout:tx/rx time for coalesced time_out
524 * Zero for success, or an error code in case of failure
526 int hns_rcb_set_coalesce_usecs(
527 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
529 u32 old_timeout
= hns_rcb_get_coalesce_usecs(rcb_common
, port_idx
);
531 if (timeout
== old_timeout
)
534 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
535 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
)) {
536 dev_err(rcb_common
->dsaf_dev
->dev
,
537 "error: not support coalesce_usecs setting!\n");
541 if (timeout
> HNS_RCB_MAX_COALESCED_USECS
) {
542 dev_err(rcb_common
->dsaf_dev
->dev
,
543 "error: not support coalesce %dus!\n", timeout
);
546 hns_rcb_set_port_timeout(rcb_common
, port_idx
, timeout
);
551 *hns_rcb_set_coalesced_frames - set rcb coalesced frames
552 *@rcb_common: rcb_common device
553 *@port_idx:port id in comm
554 *@coalesced_frames:tx/rx BD num for coalesced frames
557 * Zero for success, or an error code in case of failure
559 int hns_rcb_set_coalesced_frames(
560 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 coalesced_frames
)
562 u32 old_waterline
= hns_rcb_get_coalesced_frames(rcb_common
, port_idx
);
564 if (coalesced_frames
== old_waterline
)
567 if (coalesced_frames
>= rcb_common
->desc_num
||
568 coalesced_frames
> HNS_RCB_MAX_COALESCED_FRAMES
||
569 coalesced_frames
< HNS_RCB_MIN_COALESCED_FRAMES
) {
570 dev_err(rcb_common
->dsaf_dev
->dev
,
571 "error: not support coalesce_frames setting!\n");
575 dsaf_write_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4,
581 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
582 * accordding to dsaf mode
583 *@dsaf_mode: dsaf mode
584 *@max_vfn : max vfn number
585 *@max_q_per_vf:max ring number per vm
587 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode
, u16
*max_vfn
,
591 case DSAF_MODE_DISABLE_6PORT_0VM
:
595 case DSAF_MODE_DISABLE_FIX
:
596 case DSAF_MODE_DISABLE_SP
:
600 case DSAF_MODE_DISABLE_2PORT_64VM
:
604 case DSAF_MODE_DISABLE_6PORT_16VM
:
615 int hns_rcb_get_ring_num(struct dsaf_device
*dsaf_dev
)
617 switch (dsaf_dev
->dsaf_mode
) {
618 case DSAF_MODE_ENABLE_FIX
:
619 case DSAF_MODE_DISABLE_SP
:
622 case DSAF_MODE_DISABLE_FIX
:
625 case DSAF_MODE_ENABLE_0VM
:
628 case DSAF_MODE_DISABLE_6PORT_0VM
:
629 case DSAF_MODE_ENABLE_16VM
:
630 case DSAF_MODE_DISABLE_6PORT_2VM
:
631 case DSAF_MODE_DISABLE_6PORT_16VM
:
632 case DSAF_MODE_DISABLE_6PORT_4VM
:
633 case DSAF_MODE_ENABLE_8VM
:
636 case DSAF_MODE_DISABLE_2PORT_16VM
:
637 case DSAF_MODE_DISABLE_2PORT_8VM
:
638 case DSAF_MODE_ENABLE_32VM
:
639 case DSAF_MODE_DISABLE_2PORT_64VM
:
640 case DSAF_MODE_ENABLE_128VM
:
644 dev_warn(dsaf_dev
->dev
,
645 "get ring num fail,use default!dsaf_mode=%d\n",
646 dsaf_dev
->dsaf_mode
);
651 void __iomem
*hns_rcb_common_get_vaddr(struct rcb_common_cb
*rcb_common
)
653 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
655 return dsaf_dev
->ppe_base
+ RCB_COMMON_REG_OFFSET
;
658 static phys_addr_t
hns_rcb_common_get_paddr(struct rcb_common_cb
*rcb_common
)
660 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
662 return dsaf_dev
->ppe_paddr
+ RCB_COMMON_REG_OFFSET
;
665 int hns_rcb_common_get_cfg(struct dsaf_device
*dsaf_dev
,
668 struct rcb_common_cb
*rcb_common
;
669 enum dsaf_mode dsaf_mode
= dsaf_dev
->dsaf_mode
;
672 int ring_num
= hns_rcb_get_ring_num(dsaf_dev
);
675 devm_kzalloc(dsaf_dev
->dev
, sizeof(*rcb_common
) +
676 ring_num
* sizeof(struct ring_pair_cb
), GFP_KERNEL
);
678 dev_err(dsaf_dev
->dev
, "rcb common devm_kzalloc fail!\n");
681 rcb_common
->comm_index
= comm_index
;
682 rcb_common
->ring_num
= ring_num
;
683 rcb_common
->dsaf_dev
= dsaf_dev
;
685 rcb_common
->desc_num
= dsaf_dev
->desc_num
;
687 hns_rcb_get_queue_mode(dsaf_mode
, &max_vfn
, &max_q_per_vf
);
688 rcb_common
->max_vfn
= max_vfn
;
689 rcb_common
->max_q_per_vf
= max_q_per_vf
;
691 rcb_common
->io_base
= hns_rcb_common_get_vaddr(rcb_common
);
692 rcb_common
->phy_base
= hns_rcb_common_get_paddr(rcb_common
);
694 dsaf_dev
->rcb_common
[comm_index
] = rcb_common
;
698 void hns_rcb_common_free_cfg(struct dsaf_device
*dsaf_dev
,
701 dsaf_dev
->rcb_common
[comm_index
] = NULL
;
704 void hns_rcb_update_stats(struct hnae_queue
*queue
)
706 struct ring_pair_cb
*ring
=
707 container_of(queue
, struct ring_pair_cb
, q
);
708 struct dsaf_device
*dsaf_dev
= ring
->rcb_common
->dsaf_dev
;
709 struct ppe_common_cb
*ppe_common
710 = dsaf_dev
->ppe_common
[ring
->rcb_common
->comm_index
];
711 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
713 hw_stats
->rx_pkts
+= dsaf_read_dev(queue
,
714 RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
715 dsaf_write_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
, 0x1);
717 hw_stats
->ppe_rx_ok_pkts
+= dsaf_read_dev(ppe_common
,
718 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
719 hw_stats
->ppe_rx_drop_pkts
+= dsaf_read_dev(ppe_common
,
720 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG
+ 4 * ring
->index
);
722 hw_stats
->tx_pkts
+= dsaf_read_dev(queue
,
723 RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
724 dsaf_write_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
, 0x1);
726 hw_stats
->ppe_tx_ok_pkts
+= dsaf_read_dev(ppe_common
,
727 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
728 hw_stats
->ppe_tx_drop_pkts
+= dsaf_read_dev(ppe_common
,
729 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG
+ 4 * ring
->index
);
733 *hns_rcb_get_stats - get rcb statistic
735 *@data:statistic value
737 void hns_rcb_get_stats(struct hnae_queue
*queue
, u64
*data
)
739 u64
*regs_buff
= data
;
740 struct ring_pair_cb
*ring
=
741 container_of(queue
, struct ring_pair_cb
, q
);
742 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
744 regs_buff
[0] = hw_stats
->tx_pkts
;
745 regs_buff
[1] = hw_stats
->ppe_tx_ok_pkts
;
746 regs_buff
[2] = hw_stats
->ppe_tx_drop_pkts
;
748 dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
750 regs_buff
[4] = queue
->tx_ring
.stats
.tx_pkts
;
751 regs_buff
[5] = queue
->tx_ring
.stats
.tx_bytes
;
752 regs_buff
[6] = queue
->tx_ring
.stats
.tx_err_cnt
;
753 regs_buff
[7] = queue
->tx_ring
.stats
.io_err_cnt
;
754 regs_buff
[8] = queue
->tx_ring
.stats
.sw_err_cnt
;
755 regs_buff
[9] = queue
->tx_ring
.stats
.seg_pkt_cnt
;
756 regs_buff
[10] = queue
->tx_ring
.stats
.restart_queue
;
757 regs_buff
[11] = queue
->tx_ring
.stats
.tx_busy
;
759 regs_buff
[12] = hw_stats
->rx_pkts
;
760 regs_buff
[13] = hw_stats
->ppe_rx_ok_pkts
;
761 regs_buff
[14] = hw_stats
->ppe_rx_drop_pkts
;
763 dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
765 regs_buff
[16] = queue
->rx_ring
.stats
.rx_pkts
;
766 regs_buff
[17] = queue
->rx_ring
.stats
.rx_bytes
;
767 regs_buff
[18] = queue
->rx_ring
.stats
.rx_err_cnt
;
768 regs_buff
[19] = queue
->rx_ring
.stats
.io_err_cnt
;
769 regs_buff
[20] = queue
->rx_ring
.stats
.sw_err_cnt
;
770 regs_buff
[21] = queue
->rx_ring
.stats
.seg_pkt_cnt
;
771 regs_buff
[22] = queue
->rx_ring
.stats
.reuse_pg_cnt
;
772 regs_buff
[23] = queue
->rx_ring
.stats
.err_pkt_len
;
773 regs_buff
[24] = queue
->rx_ring
.stats
.non_vld_descs
;
774 regs_buff
[25] = queue
->rx_ring
.stats
.err_bd_num
;
775 regs_buff
[26] = queue
->rx_ring
.stats
.l2_err
;
776 regs_buff
[27] = queue
->rx_ring
.stats
.l3l4_csum_err
;
780 *hns_rcb_get_ring_sset_count - rcb string set count
781 *@stringset:ethtool cmd
782 *return rcb ring string set count
784 int hns_rcb_get_ring_sset_count(int stringset
)
786 if (stringset
== ETH_SS_STATS
)
787 return HNS_RING_STATIC_REG_NUM
;
793 *hns_rcb_get_common_regs_count - rcb common regs count
796 int hns_rcb_get_common_regs_count(void)
798 return HNS_RCB_COMMON_DUMP_REG_NUM
;
802 *rcb_get_sset_count - rcb ring regs count
805 int hns_rcb_get_ring_regs_count(void)
807 return HNS_RCB_RING_DUMP_REG_NUM
;
811 *hns_rcb_get_strings - get rcb string set
812 *@stringset:string set index
813 *@data:strings name value
816 void hns_rcb_get_strings(int stringset
, u8
*data
, int index
)
818 char *buff
= (char *)data
;
820 if (stringset
!= ETH_SS_STATS
)
823 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_rcb_pkt_num", index
);
824 buff
= buff
+ ETH_GSTRING_LEN
;
825 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_tx_pkt_num", index
);
826 buff
= buff
+ ETH_GSTRING_LEN
;
827 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_drop_pkt_num", index
);
828 buff
= buff
+ ETH_GSTRING_LEN
;
829 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_fbd_num", index
);
830 buff
= buff
+ ETH_GSTRING_LEN
;
832 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_pkt_num", index
);
833 buff
= buff
+ ETH_GSTRING_LEN
;
834 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_bytes", index
);
835 buff
= buff
+ ETH_GSTRING_LEN
;
836 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_err_cnt", index
);
837 buff
= buff
+ ETH_GSTRING_LEN
;
838 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_io_err", index
);
839 buff
= buff
+ ETH_GSTRING_LEN
;
840 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_sw_err", index
);
841 buff
= buff
+ ETH_GSTRING_LEN
;
842 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_seg_pkt", index
);
843 buff
= buff
+ ETH_GSTRING_LEN
;
844 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_restart_queue", index
);
845 buff
= buff
+ ETH_GSTRING_LEN
;
846 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_tx_busy", index
);
847 buff
= buff
+ ETH_GSTRING_LEN
;
849 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_rcb_pkt_num", index
);
850 buff
= buff
+ ETH_GSTRING_LEN
;
851 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_pkt_num", index
);
852 buff
= buff
+ ETH_GSTRING_LEN
;
853 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_drop_pkt_num", index
);
854 buff
= buff
+ ETH_GSTRING_LEN
;
855 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_fbd_num", index
);
856 buff
= buff
+ ETH_GSTRING_LEN
;
858 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_pkt_num", index
);
859 buff
= buff
+ ETH_GSTRING_LEN
;
860 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bytes", index
);
861 buff
= buff
+ ETH_GSTRING_LEN
;
862 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_err_cnt", index
);
863 buff
= buff
+ ETH_GSTRING_LEN
;
864 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_io_err", index
);
865 buff
= buff
+ ETH_GSTRING_LEN
;
866 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_sw_err", index
);
867 buff
= buff
+ ETH_GSTRING_LEN
;
868 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_seg_pkt", index
);
869 buff
= buff
+ ETH_GSTRING_LEN
;
870 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_reuse_pg", index
);
871 buff
= buff
+ ETH_GSTRING_LEN
;
872 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_len_err", index
);
873 buff
= buff
+ ETH_GSTRING_LEN
;
874 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_non_vld_desc_err", index
);
875 buff
= buff
+ ETH_GSTRING_LEN
;
876 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bd_num_err", index
);
877 buff
= buff
+ ETH_GSTRING_LEN
;
878 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l2_err", index
);
879 buff
= buff
+ ETH_GSTRING_LEN
;
880 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l3l4csum_err", index
);
883 void hns_rcb_get_common_regs(struct rcb_common_cb
*rcb_com
, void *data
)
886 bool is_ver1
= AE_IS_VER1(rcb_com
->dsaf_dev
->dsaf_ver
);
887 bool is_dbg
= HNS_DSAF_IS_DEBUG(rcb_com
->dsaf_dev
);
892 /*rcb common registers */
893 regs
[0] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_ENDIAN_REG
);
894 regs
[1] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_SYS_FSH_REG
);
895 regs
[2] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_INIT_FLAG_REG
);
897 regs
[3] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_REG
);
898 regs
[4] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_RINVLD_REG
);
899 regs
[5] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FNA_REG
);
900 regs
[6] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FA_REG
);
901 regs
[7] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_TC_BP_REG
);
902 regs
[8] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PPE_TNL_CLKEN_REG
);
904 regs
[9] = dsaf_read_dev(rcb_com
, RCB_COM_INTMSK_TX_PKT_REG
);
905 regs
[10] = dsaf_read_dev(rcb_com
, RCB_COM_RINT_TX_PKT_REG
);
906 regs
[11] = dsaf_read_dev(rcb_com
, RCB_COM_INTMASK_ECC_ERR_REG
);
907 regs
[12] = dsaf_read_dev(rcb_com
, RCB_COM_INTSTS_ECC_ERR_REG
);
908 regs
[13] = dsaf_read_dev(rcb_com
, RCB_COM_EBD_SRAM_ERR_REG
);
909 regs
[14] = dsaf_read_dev(rcb_com
, RCB_COM_RXRING_ERR_REG
);
910 regs
[15] = dsaf_read_dev(rcb_com
, RCB_COM_TXRING_ERR_REG
);
911 regs
[16] = dsaf_read_dev(rcb_com
, RCB_COM_TX_FBD_ERR_REG
);
912 regs
[17] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK_EN_REG
);
913 regs
[18] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK0_REG
);
914 regs
[19] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK1_REG
);
915 regs
[20] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK2_REG
);
916 regs
[21] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK3_REG
);
917 regs
[22] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK4_REG
);
918 regs
[23] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK5_REG
);
919 regs
[24] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR0_REG
);
920 regs
[25] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR3_REG
);
921 regs
[26] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR4_REG
);
922 regs
[27] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR5_REG
);
924 regs
[28] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_RING
);
925 regs
[29] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING_STS
);
926 regs
[30] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING
);
927 regs
[31] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_BD
);
928 regs
[32] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_BD_RINT_STS
);
929 regs
[33] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_RD_BD_BUSY
);
930 regs
[34] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_FBD_CRT_EN
);
931 regs
[35] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_WR_ERR_INTMASK
);
932 regs
[36] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_ERR_STS
);
933 regs
[37] = dsaf_read_dev(rcb_com
, RCB_COM_CHK_TX_FBD_NUM_REG
);
935 /* rcb common entry registers */
936 for (i
= 0; i
< 16; i
++) { /* total 16 model registers */
938 = dsaf_read_dev(rcb_com
, RCB_CFG_BD_NUM_REG
+ 4 * i
);
940 = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_REG
+ 4 * i
);
943 reg_tmp
= is_ver1
? RCB_CFG_OVERTIME_REG
: RCB_PORT_CFG_OVERTIME_REG
;
944 reg_num_tmp
= (is_ver1
|| is_dbg
) ? 1 : 6;
945 for (i
= 0; i
< reg_num_tmp
; i
++)
946 regs
[70 + i
] = dsaf_read_dev(rcb_com
, reg_tmp
);
948 regs
[76] = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_INT_NUM_REG
);
949 regs
[77] = dsaf_read_dev(rcb_com
, RCB_CFG_OVERTIME_INT_NUM_REG
);
951 /* mark end of rcb common regs */
952 for (i
= 78; i
< 80; i
++)
953 regs
[i
] = 0xcccccccc;
956 void hns_rcb_get_ring_regs(struct hnae_queue
*queue
, void *data
)
959 struct ring_pair_cb
*ring_pair
960 = container_of(queue
, struct ring_pair_cb
, q
);
963 /*rcb ring registers */
964 regs
[0] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_L_REG
);
965 regs
[1] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_H_REG
);
966 regs
[2] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_NUM_REG
);
967 regs
[3] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_LEN_REG
);
968 regs
[4] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTLINE_REG
);
969 regs
[5] = dsaf_read_dev(queue
, RCB_RING_RX_RING_TAIL_REG
);
970 regs
[6] = dsaf_read_dev(queue
, RCB_RING_RX_RING_HEAD_REG
);
971 regs
[7] = dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
972 regs
[8] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
974 regs
[9] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_L_REG
);
975 regs
[10] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_H_REG
);
976 regs
[11] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_NUM_REG
);
977 regs
[12] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_LEN_REG
);
978 regs
[13] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTLINE_REG
);
979 regs
[15] = dsaf_read_dev(queue
, RCB_RING_TX_RING_TAIL_REG
);
980 regs
[16] = dsaf_read_dev(queue
, RCB_RING_TX_RING_HEAD_REG
);
981 regs
[17] = dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
982 regs
[18] = dsaf_read_dev(queue
, RCB_RING_TX_RING_OFFSET_REG
);
983 regs
[19] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
985 regs
[20] = dsaf_read_dev(queue
, RCB_RING_PREFETCH_EN_REG
);
986 regs
[21] = dsaf_read_dev(queue
, RCB_RING_CFG_VF_NUM_REG
);
987 regs
[22] = dsaf_read_dev(queue
, RCB_RING_ASID_REG
);
988 regs
[23] = dsaf_read_dev(queue
, RCB_RING_RX_VM_REG
);
989 regs
[24] = dsaf_read_dev(queue
, RCB_RING_T0_BE_RST
);
990 regs
[25] = dsaf_read_dev(queue
, RCB_RING_COULD_BE_RST
);
991 regs
[26] = dsaf_read_dev(queue
, RCB_RING_WRR_WEIGHT_REG
);
993 regs
[27] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RXWL_REG
);
994 regs
[28] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_RING_REG
);
995 regs
[29] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TXWL_REG
);
996 regs
[30] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_RING_REG
);
997 regs
[31] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RX_OVERTIME_REG
);
998 regs
[32] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_OVERTIME_REG
);
999 regs
[33] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TX_OVERTIME_REG
);
1000 regs
[34] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_OVERTIME_REG
);
1002 /* mark end of ring regs */
1003 for (i
= 35; i
< 40; i
++)
1004 regs
[i
] = 0xcccccc00 + ring_pair
->index
;