2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/cdev.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <asm/cacheflush.h>
17 #include <linux/platform_device.h>
19 #include <linux/of_address.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_irq.h>
22 #include <linux/spinlock.h>
24 #include "hns_dsaf_main.h"
25 #include "hns_dsaf_ppe.h"
26 #include "hns_dsaf_rcb.h"
28 #define RCB_COMMON_REG_OFFSET 0x80000
32 #define RCB_RESET_WAIT_TIMES 30
33 #define RCB_RESET_TRY_TIMES 10
36 *hns_rcb_wait_fbd_clean - clean fbd
37 *@qs: ring struct pointer array
41 void hns_rcb_wait_fbd_clean(struct hnae_queue
**qs
, int q_num
, u32 flag
)
46 for (wait_cnt
= i
= 0; i
< q_num
; wait_cnt
++) {
47 usleep_range(200, 300);
49 if (flag
& RCB_INT_FLAG_TX
)
50 fbd_num
+= dsaf_read_dev(qs
[i
],
51 RCB_RING_TX_RING_FBDNUM_REG
);
52 if (flag
& RCB_INT_FLAG_RX
)
53 fbd_num
+= dsaf_read_dev(qs
[i
],
54 RCB_RING_RX_RING_FBDNUM_REG
);
57 if (wait_cnt
>= 10000)
62 dev_err(qs
[i
]->handle
->owner_dev
,
63 "queue(%d) wait fbd(%d) clean fail!!\n", i
, fbd_num
);
67 *hns_rcb_reset_ring_hw - ring reset
68 *@q: ring struct pointer
70 void hns_rcb_reset_ring_hw(struct hnae_queue
*q
)
78 while (try_cnt
++ < RCB_RESET_TRY_TIMES
) {
79 usleep_range(100, 200);
80 tx_fbd_num
= dsaf_read_dev(q
, RCB_RING_TX_RING_FBDNUM_REG
);
84 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, 0);
86 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
89 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
92 while (!could_ret
&& (wait_cnt
< RCB_RESET_WAIT_TIMES
)) {
93 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
95 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 1);
98 could_ret
= dsaf_read_dev(q
, RCB_RING_COULD_BE_RST
);
103 dsaf_write_dev(q
, RCB_RING_T0_BE_RST
, 0);
109 if (try_cnt
>= RCB_RESET_TRY_TIMES
)
110 dev_err(q
->dev
->dev
, "port%d reset ring fail\n",
111 hns_ae_get_vf_cb(q
->handle
)->port_index
);
115 *hns_rcb_int_ctrl_hw - rcb irq enable control
116 *@q: hnae queue struct pointer
117 *@flag:ring flag tx or rx
120 void hns_rcb_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
122 u32 int_mask_en
= !!mask
;
124 if (flag
& RCB_INT_FLAG_TX
) {
125 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
126 dsaf_write_dev(q
, RCB_RING_INTMSK_TX_OVERTIME_REG
,
130 if (flag
& RCB_INT_FLAG_RX
) {
131 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
132 dsaf_write_dev(q
, RCB_RING_INTMSK_RX_OVERTIME_REG
,
137 void hns_rcb_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
139 if (flag
& RCB_INT_FLAG_TX
) {
140 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_RING_REG
, 1);
141 dsaf_write_dev(q
, RCB_RING_INTSTS_TX_OVERTIME_REG
, 1);
144 if (flag
& RCB_INT_FLAG_RX
) {
145 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_RING_REG
, 1);
146 dsaf_write_dev(q
, RCB_RING_INTSTS_RX_OVERTIME_REG
, 1);
150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue
*q
, u32 flag
, u32 mask
)
152 u32 int_mask_en
= !!mask
;
154 if (flag
& RCB_INT_FLAG_TX
)
155 dsaf_write_dev(q
, RCB_RING_INTMSK_TXWL_REG
, int_mask_en
);
157 if (flag
& RCB_INT_FLAG_RX
)
158 dsaf_write_dev(q
, RCB_RING_INTMSK_RXWL_REG
, int_mask_en
);
161 void hns_rcbv2_int_clr_hw(struct hnae_queue
*q
, u32 flag
)
163 if (flag
& RCB_INT_FLAG_TX
)
164 dsaf_write_dev(q
, RCBV2_TX_RING_INT_STS_REG
, 1);
166 if (flag
& RCB_INT_FLAG_RX
)
167 dsaf_write_dev(q
, RCBV2_RX_RING_INT_STS_REG
, 1);
171 *hns_rcb_ring_enable_hw - enable ring
174 void hns_rcb_ring_enable_hw(struct hnae_queue
*q
, u32 val
)
176 dsaf_write_dev(q
, RCB_RING_PREFETCH_EN_REG
, !!val
);
179 void hns_rcb_start(struct hnae_queue
*q
, u32 val
)
181 hns_rcb_ring_enable_hw(q
, val
);
185 *hns_rcb_common_init_commit_hw - make rcb common init completed
186 *@rcb_common: rcb common device
188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb
*rcb_common
)
190 wmb(); /* Sync point before breakpoint */
191 dsaf_write_dev(rcb_common
, RCB_COM_CFG_SYS_FSH_REG
, 1);
192 wmb(); /* Sync point after breakpoint */
196 *hns_rcb_ring_init - init rcb ring
197 *@ring_pair: ring pair control block
198 *@ring_type: ring type, RX_RING or TX_RING
200 static void hns_rcb_ring_init(struct ring_pair_cb
*ring_pair
, int ring_type
)
202 struct hnae_queue
*q
= &ring_pair
->q
;
203 struct rcb_common_cb
*rcb_common
= ring_pair
->rcb_common
;
204 u32 bd_size_type
= rcb_common
->dsaf_dev
->buf_size_type
;
205 struct hnae_ring
*ring
=
206 (ring_type
== RX_RING
) ? &q
->rx_ring
: &q
->tx_ring
;
207 dma_addr_t dma
= ring
->desc_dma_addr
;
209 if (ring_type
== RX_RING
) {
210 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_L_REG
,
212 dsaf_write_dev(q
, RCB_RING_RX_RING_BASEADDR_H_REG
,
213 (u32
)((dma
>> 31) >> 1));
215 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_LEN_REG
,
217 dsaf_write_dev(q
, RCB_RING_RX_RING_BD_NUM_REG
,
218 ring_pair
->port_id_in_comm
);
219 dsaf_write_dev(q
, RCB_RING_RX_RING_PKTLINE_REG
,
220 ring_pair
->port_id_in_comm
);
222 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_L_REG
,
224 dsaf_write_dev(q
, RCB_RING_TX_RING_BASEADDR_H_REG
,
225 (u32
)((dma
>> 31) >> 1));
227 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_LEN_REG
,
229 dsaf_write_dev(q
, RCB_RING_TX_RING_BD_NUM_REG
,
230 ring_pair
->port_id_in_comm
);
231 dsaf_write_dev(q
, RCB_RING_TX_RING_PKTLINE_REG
,
232 ring_pair
->port_id_in_comm
);
237 *hns_rcb_init_hw - init rcb hardware
240 void hns_rcb_init_hw(struct ring_pair_cb
*ring
)
242 hns_rcb_ring_init(ring
, RX_RING
);
243 hns_rcb_ring_init(ring
, TX_RING
);
247 *hns_rcb_set_port_desc_cnt - set rcb port description num
248 *@rcb_common: rcb_common device
249 *@port_idx:port index
252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb
*rcb_common
,
253 u32 port_idx
, u32 desc_cnt
)
255 dsaf_write_dev(rcb_common
, RCB_CFG_BD_NUM_REG
+ port_idx
* 4,
259 static void hns_rcb_set_port_timeout(
260 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
262 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
))
263 dsaf_write_dev(rcb_common
, RCB_CFG_OVERTIME_REG
,
264 timeout
* HNS_RCB_CLK_FREQ_MHZ
);
266 dsaf_write_dev(rcb_common
,
267 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4,
271 static int hns_rcb_common_get_port_num(struct rcb_common_cb
*rcb_common
)
273 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
274 return HNS_RCB_SERVICE_NW_ENGINE_NUM
;
276 return HNS_RCB_DEBUG_NW_ENGINE_NUM
;
279 /*clr rcb comm exception irq**/
280 static void hns_rcb_comm_exc_irq_en(
281 struct rcb_common_cb
*rcb_common
, int en
)
283 u32 clr_vlue
= 0xfffffffful
;
284 u32 msk_vlue
= en
? 0 : 0xfffffffful
;
287 dsaf_write_dev(rcb_common
, RCB_COM_INTSTS_ECC_ERR_REG
, clr_vlue
);
289 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_RING_STS
, clr_vlue
);
291 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_BD_RINT_STS
, clr_vlue
);
293 dsaf_write_dev(rcb_common
, RCB_COM_RINT_TX_PKT_REG
, clr_vlue
);
294 dsaf_write_dev(rcb_common
, RCB_COM_AXI_ERR_STS
, clr_vlue
);
297 dsaf_write_dev(rcb_common
, RCB_COM_INTMASK_ECC_ERR_REG
, msk_vlue
);
299 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_RING
, msk_vlue
);
301 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
302 dsaf_write_dev(rcb_common
, RCB_COM_SF_CFG_INTMASK_BD
, msk_vlue
| 2);
304 dsaf_write_dev(rcb_common
, RCB_COM_INTMSK_TX_PKT_REG
, msk_vlue
);
305 dsaf_write_dev(rcb_common
, RCB_COM_AXI_WR_ERR_INTMASK
, msk_vlue
);
309 *hns_rcb_common_init_hw - init rcb common hardware
310 *@rcb_common: rcb_common device
311 *retuen 0 - success , negative --fail
313 int hns_rcb_common_init_hw(struct rcb_common_cb
*rcb_common
)
317 int port_num
= hns_rcb_common_get_port_num(rcb_common
);
319 hns_rcb_comm_exc_irq_en(rcb_common
, 0);
321 reg_val
= dsaf_read_dev(rcb_common
, RCB_COM_CFG_INIT_FLAG_REG
);
322 if (0x1 != (reg_val
& 0x1)) {
323 dev_err(rcb_common
->dsaf_dev
->dev
,
324 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val
);
328 for (i
= 0; i
< port_num
; i
++) {
329 hns_rcb_set_port_desc_cnt(rcb_common
, i
, rcb_common
->desc_num
);
330 (void)hns_rcb_set_coalesced_frames(
331 rcb_common
, i
, HNS_RCB_DEF_COALESCED_FRAMES
);
332 hns_rcb_set_port_timeout(
333 rcb_common
, i
, HNS_RCB_DEF_COALESCED_USECS
);
336 dsaf_write_dev(rcb_common
, RCB_COM_CFG_ENDIAN_REG
,
337 HNS_RCB_COMMON_ENDIAN
);
339 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
340 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FNA_REG
, 0x0);
341 dsaf_write_dev(rcb_common
, RCB_COM_CFG_FA_REG
, 0x1);
343 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
344 RCB_COM_CFG_FNA_B
, false);
345 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_USER_REG
,
346 RCB_COM_CFG_FA_B
, true);
347 dsaf_set_dev_bit(rcb_common
, RCBV2_COM_CFG_TSO_MODE_REG
,
348 RCB_COM_TSO_MODE_B
, HNS_TSO_MODE_8BD_32K
);
354 int hns_rcb_buf_size2type(u32 buf_size
)
360 bd_size_type
= HNS_BD_SIZE_512_TYPE
;
363 bd_size_type
= HNS_BD_SIZE_1024_TYPE
;
366 bd_size_type
= HNS_BD_SIZE_2048_TYPE
;
369 bd_size_type
= HNS_BD_SIZE_4096_TYPE
;
372 bd_size_type
= -EINVAL
;
378 static void hns_rcb_ring_get_cfg(struct hnae_queue
*q
, int ring_type
)
380 struct hnae_ring
*ring
;
381 struct rcb_common_cb
*rcb_common
;
382 struct ring_pair_cb
*ring_pair_cb
;
384 u16 desc_num
, mdnum_ppkt
;
385 bool irq_idx
, is_ver1
;
387 ring_pair_cb
= container_of(q
, struct ring_pair_cb
, q
);
388 is_ver1
= AE_IS_VER1(ring_pair_cb
->rcb_common
->dsaf_dev
->dsaf_ver
);
389 if (ring_type
== RX_RING
) {
391 ring
->io_base
= ring_pair_cb
->q
.io_base
;
392 irq_idx
= HNS_RCB_IRQ_IDX_RX
;
393 mdnum_ppkt
= HNS_RCB_RING_MAX_BD_PER_PKT
;
396 ring
->io_base
= (u8 __iomem
*)ring_pair_cb
->q
.io_base
+
397 HNS_RCB_TX_REG_OFFSET
;
398 irq_idx
= HNS_RCB_IRQ_IDX_TX
;
399 mdnum_ppkt
= is_ver1
? HNS_RCB_RING_MAX_TXBD_PER_PKT
:
400 HNS_RCBV2_RING_MAX_TXBD_PER_PKT
;
403 rcb_common
= ring_pair_cb
->rcb_common
;
404 buf_size
= rcb_common
->dsaf_dev
->buf_size
;
405 desc_num
= rcb_common
->dsaf_dev
->desc_num
;
408 ring
->desc_cb
= NULL
;
410 ring
->irq
= ring_pair_cb
->virq
[irq_idx
];
411 ring
->desc_dma_addr
= 0;
413 ring
->buf_size
= buf_size
;
414 ring
->desc_num
= desc_num
;
415 ring
->max_desc_num_per_pkt
= mdnum_ppkt
;
416 ring
->max_raw_data_sz_per_desc
= HNS_RCB_MAX_PKT_SIZE
;
417 ring
->max_pkt_size
= HNS_RCB_MAX_PKT_SIZE
;
418 ring
->next_to_use
= 0;
419 ring
->next_to_clean
= 0;
422 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb
*ring_pair_cb
)
424 ring_pair_cb
->q
.handle
= NULL
;
426 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, RX_RING
);
427 hns_rcb_ring_get_cfg(&ring_pair_cb
->q
, TX_RING
);
430 static int hns_rcb_get_port_in_comm(
431 struct rcb_common_cb
*rcb_common
, int ring_idx
)
434 return ring_idx
/ (rcb_common
->max_q_per_vf
* rcb_common
->max_vfn
);
437 #define SERVICE_RING_IRQ_IDX(v1) \
438 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
439 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb
*rcb_common
)
441 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
443 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
))
444 return SERVICE_RING_IRQ_IDX(is_ver1
);
446 return HNS_DEBUG_RING_IRQ_IDX
;
449 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
450 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
452 *hns_rcb_get_cfg - get rcb config
453 *@rcb_common: rcb common device
455 void hns_rcb_get_cfg(struct rcb_common_cb
*rcb_common
)
457 struct ring_pair_cb
*ring_pair_cb
;
459 u32 ring_num
= rcb_common
->ring_num
;
460 int base_irq_idx
= hns_rcb_get_base_irq_idx(rcb_common
);
461 struct device_node
*np
= rcb_common
->dsaf_dev
->dev
->of_node
;
462 struct platform_device
*pdev
=
463 to_platform_device(rcb_common
->dsaf_dev
->dev
);
464 bool is_ver1
= AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
);
466 for (i
= 0; i
< ring_num
; i
++) {
467 ring_pair_cb
= &rcb_common
->ring_pair_cb
[i
];
468 ring_pair_cb
->rcb_common
= rcb_common
;
469 ring_pair_cb
->dev
= rcb_common
->dsaf_dev
->dev
;
470 ring_pair_cb
->index
= i
;
471 ring_pair_cb
->q
.io_base
=
472 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->io_base
, i
);
473 ring_pair_cb
->port_id_in_comm
=
474 hns_rcb_get_port_in_comm(rcb_common
, i
);
475 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_TX
] =
476 is_ver1
? irq_of_parse_and_map(np
, base_irq_idx
+ i
* 2) :
477 platform_get_irq(pdev
, base_irq_idx
+ i
* 3 + 1);
478 ring_pair_cb
->virq
[HNS_RCB_IRQ_IDX_RX
] =
479 is_ver1
? irq_of_parse_and_map(np
, base_irq_idx
+ i
* 2 + 1) :
480 platform_get_irq(pdev
, base_irq_idx
+ i
* 3);
481 ring_pair_cb
->q
.phy_base
=
482 RCB_COMM_BASE_TO_RING_BASE(rcb_common
->phy_base
, i
);
483 hns_rcb_ring_pair_get_cfg(ring_pair_cb
);
488 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
489 *@rcb_common: rcb_common device
490 *@port_idx:port id in comm
492 *Returns: coalesced_frames
494 u32
hns_rcb_get_coalesced_frames(
495 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
497 return dsaf_read_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4);
501 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
502 *@rcb_common: rcb_common device
503 *@port_idx:port id in comm
507 u32
hns_rcb_get_coalesce_usecs(
508 struct rcb_common_cb
*rcb_common
, u32 port_idx
)
510 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
))
511 return dsaf_read_dev(rcb_common
, RCB_CFG_OVERTIME_REG
) /
512 HNS_RCB_CLK_FREQ_MHZ
;
514 return dsaf_read_dev(rcb_common
,
515 RCB_PORT_CFG_OVERTIME_REG
+ port_idx
* 4);
519 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
520 *@rcb_common: rcb_common device
521 *@port_idx:port id in comm
522 *@timeout:tx/rx time for coalesced time_out
525 * Zero for success, or an error code in case of failure
527 int hns_rcb_set_coalesce_usecs(
528 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 timeout
)
530 u32 old_timeout
= hns_rcb_get_coalesce_usecs(rcb_common
, port_idx
);
532 if (timeout
== old_timeout
)
535 if (AE_IS_VER1(rcb_common
->dsaf_dev
->dsaf_ver
)) {
536 if (!HNS_DSAF_IS_DEBUG(rcb_common
->dsaf_dev
)) {
537 dev_err(rcb_common
->dsaf_dev
->dev
,
538 "error: not support coalesce_usecs setting!\n");
542 if (timeout
> HNS_RCB_MAX_COALESCED_USECS
) {
543 dev_err(rcb_common
->dsaf_dev
->dev
,
544 "error: not support coalesce %dus!\n", timeout
);
547 hns_rcb_set_port_timeout(rcb_common
, port_idx
, timeout
);
552 *hns_rcb_set_coalesced_frames - set rcb coalesced frames
553 *@rcb_common: rcb_common device
554 *@port_idx:port id in comm
555 *@coalesced_frames:tx/rx BD num for coalesced frames
558 * Zero for success, or an error code in case of failure
560 int hns_rcb_set_coalesced_frames(
561 struct rcb_common_cb
*rcb_common
, u32 port_idx
, u32 coalesced_frames
)
563 u32 old_waterline
= hns_rcb_get_coalesced_frames(rcb_common
, port_idx
);
565 if (coalesced_frames
== old_waterline
)
568 if (coalesced_frames
>= rcb_common
->desc_num
||
569 coalesced_frames
> HNS_RCB_MAX_COALESCED_FRAMES
||
570 coalesced_frames
< HNS_RCB_MIN_COALESCED_FRAMES
) {
571 dev_err(rcb_common
->dsaf_dev
->dev
,
572 "error: not support coalesce_frames setting!\n");
576 dsaf_write_dev(rcb_common
, RCB_CFG_PKTLINE_REG
+ port_idx
* 4,
582 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
583 * accordding to dsaf mode
584 *@dsaf_mode: dsaf mode
585 *@max_vfn : max vfn number
586 *@max_q_per_vf:max ring number per vm
588 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode
, u16
*max_vfn
,
592 case DSAF_MODE_DISABLE_6PORT_0VM
:
596 case DSAF_MODE_DISABLE_FIX
:
597 case DSAF_MODE_DISABLE_SP
:
601 case DSAF_MODE_DISABLE_2PORT_64VM
:
605 case DSAF_MODE_DISABLE_6PORT_16VM
:
616 int hns_rcb_get_ring_num(struct dsaf_device
*dsaf_dev
)
618 switch (dsaf_dev
->dsaf_mode
) {
619 case DSAF_MODE_ENABLE_FIX
:
620 case DSAF_MODE_DISABLE_SP
:
623 case DSAF_MODE_DISABLE_FIX
:
626 case DSAF_MODE_ENABLE_0VM
:
629 case DSAF_MODE_DISABLE_6PORT_0VM
:
630 case DSAF_MODE_ENABLE_16VM
:
631 case DSAF_MODE_DISABLE_6PORT_2VM
:
632 case DSAF_MODE_DISABLE_6PORT_16VM
:
633 case DSAF_MODE_DISABLE_6PORT_4VM
:
634 case DSAF_MODE_ENABLE_8VM
:
637 case DSAF_MODE_DISABLE_2PORT_16VM
:
638 case DSAF_MODE_DISABLE_2PORT_8VM
:
639 case DSAF_MODE_ENABLE_32VM
:
640 case DSAF_MODE_DISABLE_2PORT_64VM
:
641 case DSAF_MODE_ENABLE_128VM
:
645 dev_warn(dsaf_dev
->dev
,
646 "get ring num fail,use default!dsaf_mode=%d\n",
647 dsaf_dev
->dsaf_mode
);
652 void __iomem
*hns_rcb_common_get_vaddr(struct rcb_common_cb
*rcb_common
)
654 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
656 return dsaf_dev
->ppe_base
+ RCB_COMMON_REG_OFFSET
;
659 static phys_addr_t
hns_rcb_common_get_paddr(struct rcb_common_cb
*rcb_common
)
661 struct dsaf_device
*dsaf_dev
= rcb_common
->dsaf_dev
;
663 return dsaf_dev
->ppe_paddr
+ RCB_COMMON_REG_OFFSET
;
666 int hns_rcb_common_get_cfg(struct dsaf_device
*dsaf_dev
,
669 struct rcb_common_cb
*rcb_common
;
670 enum dsaf_mode dsaf_mode
= dsaf_dev
->dsaf_mode
;
673 int ring_num
= hns_rcb_get_ring_num(dsaf_dev
);
676 devm_kzalloc(dsaf_dev
->dev
, sizeof(*rcb_common
) +
677 ring_num
* sizeof(struct ring_pair_cb
), GFP_KERNEL
);
679 dev_err(dsaf_dev
->dev
, "rcb common devm_kzalloc fail!\n");
682 rcb_common
->comm_index
= comm_index
;
683 rcb_common
->ring_num
= ring_num
;
684 rcb_common
->dsaf_dev
= dsaf_dev
;
686 rcb_common
->desc_num
= dsaf_dev
->desc_num
;
688 hns_rcb_get_queue_mode(dsaf_mode
, &max_vfn
, &max_q_per_vf
);
689 rcb_common
->max_vfn
= max_vfn
;
690 rcb_common
->max_q_per_vf
= max_q_per_vf
;
692 rcb_common
->io_base
= hns_rcb_common_get_vaddr(rcb_common
);
693 rcb_common
->phy_base
= hns_rcb_common_get_paddr(rcb_common
);
695 dsaf_dev
->rcb_common
[comm_index
] = rcb_common
;
699 void hns_rcb_common_free_cfg(struct dsaf_device
*dsaf_dev
,
702 dsaf_dev
->rcb_common
[comm_index
] = NULL
;
705 void hns_rcb_update_stats(struct hnae_queue
*queue
)
707 struct ring_pair_cb
*ring
=
708 container_of(queue
, struct ring_pair_cb
, q
);
709 struct dsaf_device
*dsaf_dev
= ring
->rcb_common
->dsaf_dev
;
710 struct ppe_common_cb
*ppe_common
711 = dsaf_dev
->ppe_common
[ring
->rcb_common
->comm_index
];
712 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
714 hw_stats
->rx_pkts
+= dsaf_read_dev(queue
,
715 RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
716 dsaf_write_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
, 0x1);
718 hw_stats
->ppe_rx_ok_pkts
+= dsaf_read_dev(ppe_common
,
719 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
720 hw_stats
->ppe_rx_drop_pkts
+= dsaf_read_dev(ppe_common
,
721 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG
+ 4 * ring
->index
);
723 hw_stats
->tx_pkts
+= dsaf_read_dev(queue
,
724 RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
725 dsaf_write_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
, 0x1);
727 hw_stats
->ppe_tx_ok_pkts
+= dsaf_read_dev(ppe_common
,
728 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG
+ 4 * ring
->index
);
729 hw_stats
->ppe_tx_drop_pkts
+= dsaf_read_dev(ppe_common
,
730 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG
+ 4 * ring
->index
);
734 *hns_rcb_get_stats - get rcb statistic
736 *@data:statistic value
738 void hns_rcb_get_stats(struct hnae_queue
*queue
, u64
*data
)
740 u64
*regs_buff
= data
;
741 struct ring_pair_cb
*ring
=
742 container_of(queue
, struct ring_pair_cb
, q
);
743 struct hns_ring_hw_stats
*hw_stats
= &ring
->hw_stats
;
745 regs_buff
[0] = hw_stats
->tx_pkts
;
746 regs_buff
[1] = hw_stats
->ppe_tx_ok_pkts
;
747 regs_buff
[2] = hw_stats
->ppe_tx_drop_pkts
;
749 dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
751 regs_buff
[4] = queue
->tx_ring
.stats
.tx_pkts
;
752 regs_buff
[5] = queue
->tx_ring
.stats
.tx_bytes
;
753 regs_buff
[6] = queue
->tx_ring
.stats
.tx_err_cnt
;
754 regs_buff
[7] = queue
->tx_ring
.stats
.io_err_cnt
;
755 regs_buff
[8] = queue
->tx_ring
.stats
.sw_err_cnt
;
756 regs_buff
[9] = queue
->tx_ring
.stats
.seg_pkt_cnt
;
757 regs_buff
[10] = queue
->tx_ring
.stats
.restart_queue
;
758 regs_buff
[11] = queue
->tx_ring
.stats
.tx_busy
;
760 regs_buff
[12] = hw_stats
->rx_pkts
;
761 regs_buff
[13] = hw_stats
->ppe_rx_ok_pkts
;
762 regs_buff
[14] = hw_stats
->ppe_rx_drop_pkts
;
764 dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
766 regs_buff
[16] = queue
->rx_ring
.stats
.rx_pkts
;
767 regs_buff
[17] = queue
->rx_ring
.stats
.rx_bytes
;
768 regs_buff
[18] = queue
->rx_ring
.stats
.rx_err_cnt
;
769 regs_buff
[19] = queue
->rx_ring
.stats
.io_err_cnt
;
770 regs_buff
[20] = queue
->rx_ring
.stats
.sw_err_cnt
;
771 regs_buff
[21] = queue
->rx_ring
.stats
.seg_pkt_cnt
;
772 regs_buff
[22] = queue
->rx_ring
.stats
.reuse_pg_cnt
;
773 regs_buff
[23] = queue
->rx_ring
.stats
.err_pkt_len
;
774 regs_buff
[24] = queue
->rx_ring
.stats
.non_vld_descs
;
775 regs_buff
[25] = queue
->rx_ring
.stats
.err_bd_num
;
776 regs_buff
[26] = queue
->rx_ring
.stats
.l2_err
;
777 regs_buff
[27] = queue
->rx_ring
.stats
.l3l4_csum_err
;
781 *hns_rcb_get_ring_sset_count - rcb string set count
782 *@stringset:ethtool cmd
783 *return rcb ring string set count
785 int hns_rcb_get_ring_sset_count(int stringset
)
787 if (stringset
== ETH_SS_STATS
)
788 return HNS_RING_STATIC_REG_NUM
;
794 *hns_rcb_get_common_regs_count - rcb common regs count
797 int hns_rcb_get_common_regs_count(void)
799 return HNS_RCB_COMMON_DUMP_REG_NUM
;
803 *rcb_get_sset_count - rcb ring regs count
806 int hns_rcb_get_ring_regs_count(void)
808 return HNS_RCB_RING_DUMP_REG_NUM
;
812 *hns_rcb_get_strings - get rcb string set
813 *@stringset:string set index
814 *@data:strings name value
817 void hns_rcb_get_strings(int stringset
, u8
*data
, int index
)
819 char *buff
= (char *)data
;
821 if (stringset
!= ETH_SS_STATS
)
824 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_rcb_pkt_num", index
);
825 buff
= buff
+ ETH_GSTRING_LEN
;
826 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_tx_pkt_num", index
);
827 buff
= buff
+ ETH_GSTRING_LEN
;
828 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_ppe_drop_pkt_num", index
);
829 buff
= buff
+ ETH_GSTRING_LEN
;
830 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_fbd_num", index
);
831 buff
= buff
+ ETH_GSTRING_LEN
;
833 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_pkt_num", index
);
834 buff
= buff
+ ETH_GSTRING_LEN
;
835 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_bytes", index
);
836 buff
= buff
+ ETH_GSTRING_LEN
;
837 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_err_cnt", index
);
838 buff
= buff
+ ETH_GSTRING_LEN
;
839 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_io_err", index
);
840 buff
= buff
+ ETH_GSTRING_LEN
;
841 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_sw_err", index
);
842 buff
= buff
+ ETH_GSTRING_LEN
;
843 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_seg_pkt", index
);
844 buff
= buff
+ ETH_GSTRING_LEN
;
845 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_restart_queue", index
);
846 buff
= buff
+ ETH_GSTRING_LEN
;
847 snprintf(buff
, ETH_GSTRING_LEN
, "tx_ring%d_tx_busy", index
);
848 buff
= buff
+ ETH_GSTRING_LEN
;
850 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_rcb_pkt_num", index
);
851 buff
= buff
+ ETH_GSTRING_LEN
;
852 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_pkt_num", index
);
853 buff
= buff
+ ETH_GSTRING_LEN
;
854 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_ppe_drop_pkt_num", index
);
855 buff
= buff
+ ETH_GSTRING_LEN
;
856 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_fbd_num", index
);
857 buff
= buff
+ ETH_GSTRING_LEN
;
859 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_pkt_num", index
);
860 buff
= buff
+ ETH_GSTRING_LEN
;
861 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bytes", index
);
862 buff
= buff
+ ETH_GSTRING_LEN
;
863 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_err_cnt", index
);
864 buff
= buff
+ ETH_GSTRING_LEN
;
865 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_io_err", index
);
866 buff
= buff
+ ETH_GSTRING_LEN
;
867 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_sw_err", index
);
868 buff
= buff
+ ETH_GSTRING_LEN
;
869 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_seg_pkt", index
);
870 buff
= buff
+ ETH_GSTRING_LEN
;
871 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_reuse_pg", index
);
872 buff
= buff
+ ETH_GSTRING_LEN
;
873 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_len_err", index
);
874 buff
= buff
+ ETH_GSTRING_LEN
;
875 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_non_vld_desc_err", index
);
876 buff
= buff
+ ETH_GSTRING_LEN
;
877 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_bd_num_err", index
);
878 buff
= buff
+ ETH_GSTRING_LEN
;
879 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l2_err", index
);
880 buff
= buff
+ ETH_GSTRING_LEN
;
881 snprintf(buff
, ETH_GSTRING_LEN
, "rx_ring%d_l3l4csum_err", index
);
884 void hns_rcb_get_common_regs(struct rcb_common_cb
*rcb_com
, void *data
)
887 bool is_ver1
= AE_IS_VER1(rcb_com
->dsaf_dev
->dsaf_ver
);
888 bool is_dbg
= HNS_DSAF_IS_DEBUG(rcb_com
->dsaf_dev
);
893 /*rcb common registers */
894 regs
[0] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_ENDIAN_REG
);
895 regs
[1] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_SYS_FSH_REG
);
896 regs
[2] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_INIT_FLAG_REG
);
898 regs
[3] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_REG
);
899 regs
[4] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_RINVLD_REG
);
900 regs
[5] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FNA_REG
);
901 regs
[6] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_FA_REG
);
902 regs
[7] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PKT_TC_BP_REG
);
903 regs
[8] = dsaf_read_dev(rcb_com
, RCB_COM_CFG_PPE_TNL_CLKEN_REG
);
905 regs
[9] = dsaf_read_dev(rcb_com
, RCB_COM_INTMSK_TX_PKT_REG
);
906 regs
[10] = dsaf_read_dev(rcb_com
, RCB_COM_RINT_TX_PKT_REG
);
907 regs
[11] = dsaf_read_dev(rcb_com
, RCB_COM_INTMASK_ECC_ERR_REG
);
908 regs
[12] = dsaf_read_dev(rcb_com
, RCB_COM_INTSTS_ECC_ERR_REG
);
909 regs
[13] = dsaf_read_dev(rcb_com
, RCB_COM_EBD_SRAM_ERR_REG
);
910 regs
[14] = dsaf_read_dev(rcb_com
, RCB_COM_RXRING_ERR_REG
);
911 regs
[15] = dsaf_read_dev(rcb_com
, RCB_COM_TXRING_ERR_REG
);
912 regs
[16] = dsaf_read_dev(rcb_com
, RCB_COM_TX_FBD_ERR_REG
);
913 regs
[17] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK_EN_REG
);
914 regs
[18] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK0_REG
);
915 regs
[19] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK1_REG
);
916 regs
[20] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK2_REG
);
917 regs
[21] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK3_REG
);
918 regs
[22] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK4_REG
);
919 regs
[23] = dsaf_read_dev(rcb_com
, RCB_SRAM_ECC_CHK5_REG
);
920 regs
[24] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR0_REG
);
921 regs
[25] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR3_REG
);
922 regs
[26] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR4_REG
);
923 regs
[27] = dsaf_read_dev(rcb_com
, RCB_ECC_ERR_ADDR5_REG
);
925 regs
[28] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_RING
);
926 regs
[29] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING_STS
);
927 regs
[30] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_RING
);
928 regs
[31] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_INTMASK_BD
);
929 regs
[32] = dsaf_read_dev(rcb_com
, RCB_COM_SF_CFG_BD_RINT_STS
);
930 regs
[33] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_RD_BD_BUSY
);
931 regs
[34] = dsaf_read_dev(rcb_com
, RCB_COM_RCB_FBD_CRT_EN
);
932 regs
[35] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_WR_ERR_INTMASK
);
933 regs
[36] = dsaf_read_dev(rcb_com
, RCB_COM_AXI_ERR_STS
);
934 regs
[37] = dsaf_read_dev(rcb_com
, RCB_COM_CHK_TX_FBD_NUM_REG
);
936 /* rcb common entry registers */
937 for (i
= 0; i
< 16; i
++) { /* total 16 model registers */
939 = dsaf_read_dev(rcb_com
, RCB_CFG_BD_NUM_REG
+ 4 * i
);
941 = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_REG
+ 4 * i
);
944 reg_tmp
= is_ver1
? RCB_CFG_OVERTIME_REG
: RCB_PORT_CFG_OVERTIME_REG
;
945 reg_num_tmp
= (is_ver1
|| is_dbg
) ? 1 : 6;
946 for (i
= 0; i
< reg_num_tmp
; i
++)
947 regs
[70 + i
] = dsaf_read_dev(rcb_com
, reg_tmp
);
949 regs
[76] = dsaf_read_dev(rcb_com
, RCB_CFG_PKTLINE_INT_NUM_REG
);
950 regs
[77] = dsaf_read_dev(rcb_com
, RCB_CFG_OVERTIME_INT_NUM_REG
);
952 /* mark end of rcb common regs */
953 for (i
= 78; i
< 80; i
++)
954 regs
[i
] = 0xcccccccc;
957 void hns_rcb_get_ring_regs(struct hnae_queue
*queue
, void *data
)
960 struct ring_pair_cb
*ring_pair
961 = container_of(queue
, struct ring_pair_cb
, q
);
964 /*rcb ring registers */
965 regs
[0] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_L_REG
);
966 regs
[1] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BASEADDR_H_REG
);
967 regs
[2] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_NUM_REG
);
968 regs
[3] = dsaf_read_dev(queue
, RCB_RING_RX_RING_BD_LEN_REG
);
969 regs
[4] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTLINE_REG
);
970 regs
[5] = dsaf_read_dev(queue
, RCB_RING_RX_RING_TAIL_REG
);
971 regs
[6] = dsaf_read_dev(queue
, RCB_RING_RX_RING_HEAD_REG
);
972 regs
[7] = dsaf_read_dev(queue
, RCB_RING_RX_RING_FBDNUM_REG
);
973 regs
[8] = dsaf_read_dev(queue
, RCB_RING_RX_RING_PKTNUM_RECORD_REG
);
975 regs
[9] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_L_REG
);
976 regs
[10] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BASEADDR_H_REG
);
977 regs
[11] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_NUM_REG
);
978 regs
[12] = dsaf_read_dev(queue
, RCB_RING_TX_RING_BD_LEN_REG
);
979 regs
[13] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTLINE_REG
);
980 regs
[15] = dsaf_read_dev(queue
, RCB_RING_TX_RING_TAIL_REG
);
981 regs
[16] = dsaf_read_dev(queue
, RCB_RING_TX_RING_HEAD_REG
);
982 regs
[17] = dsaf_read_dev(queue
, RCB_RING_TX_RING_FBDNUM_REG
);
983 regs
[18] = dsaf_read_dev(queue
, RCB_RING_TX_RING_OFFSET_REG
);
984 regs
[19] = dsaf_read_dev(queue
, RCB_RING_TX_RING_PKTNUM_RECORD_REG
);
986 regs
[20] = dsaf_read_dev(queue
, RCB_RING_PREFETCH_EN_REG
);
987 regs
[21] = dsaf_read_dev(queue
, RCB_RING_CFG_VF_NUM_REG
);
988 regs
[22] = dsaf_read_dev(queue
, RCB_RING_ASID_REG
);
989 regs
[23] = dsaf_read_dev(queue
, RCB_RING_RX_VM_REG
);
990 regs
[24] = dsaf_read_dev(queue
, RCB_RING_T0_BE_RST
);
991 regs
[25] = dsaf_read_dev(queue
, RCB_RING_COULD_BE_RST
);
992 regs
[26] = dsaf_read_dev(queue
, RCB_RING_WRR_WEIGHT_REG
);
994 regs
[27] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RXWL_REG
);
995 regs
[28] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_RING_REG
);
996 regs
[29] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TXWL_REG
);
997 regs
[30] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_RING_REG
);
998 regs
[31] = dsaf_read_dev(queue
, RCB_RING_INTMSK_RX_OVERTIME_REG
);
999 regs
[32] = dsaf_read_dev(queue
, RCB_RING_INTSTS_RX_OVERTIME_REG
);
1000 regs
[33] = dsaf_read_dev(queue
, RCB_RING_INTMSK_TX_OVERTIME_REG
);
1001 regs
[34] = dsaf_read_dev(queue
, RCB_RING_INTSTS_TX_OVERTIME_REG
);
1003 /* mark end of ring regs */
1004 for (i
= 35; i
< 40; i
++)
1005 regs
[i
] = 0xcccccc00 + ring_pair
->index
;