Commit | Line | Data |
---|---|---|
511e6bc0 | 1 | /* |
2 | * Copyright (c) 2014-2015 Hisilicon Limited. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/cdev.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/netdevice.h> | |
15 | #include <linux/etherdevice.h> | |
16 | #include <asm/cacheflush.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/of_address.h> | |
20 | #include <linux/of_platform.h> | |
21 | #include <linux/of_irq.h> | |
22 | #include <linux/spinlock.h> | |
23 | ||
24 | #include "hns_dsaf_main.h" | |
25 | #include "hns_dsaf_ppe.h" | |
26 | #include "hns_dsaf_rcb.h" | |
27 | ||
28 | #define RCB_COMMON_REG_OFFSET 0x80000 | |
29 | #define TX_RING 0 | |
30 | #define RX_RING 1 | |
31 | ||
32 | #define RCB_RESET_WAIT_TIMES 30 | |
33 | #define RCB_RESET_TRY_TIMES 10 | |
34 | ||
35 | /** | |
36 | *hns_rcb_wait_fbd_clean - clean fbd | |
37 | *@qs: ring struct pointer array | |
38 | *@qnum: num of array | |
39 | *@flag: tx or rx flag | |
40 | */ | |
41 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) | |
42 | { | |
43 | int i, wait_cnt; | |
44 | u32 fbd_num; | |
45 | ||
46 | for (wait_cnt = i = 0; i < q_num; wait_cnt++) { | |
47 | usleep_range(200, 300); | |
48 | fbd_num = 0; | |
49 | if (flag & RCB_INT_FLAG_TX) | |
50 | fbd_num += dsaf_read_dev(qs[i], | |
51 | RCB_RING_TX_RING_FBDNUM_REG); | |
52 | if (flag & RCB_INT_FLAG_RX) | |
53 | fbd_num += dsaf_read_dev(qs[i], | |
54 | RCB_RING_RX_RING_FBDNUM_REG); | |
55 | if (!fbd_num) | |
56 | i++; | |
57 | if (wait_cnt >= 10000) | |
58 | break; | |
59 | } | |
60 | ||
61 | if (i < q_num) | |
62 | dev_err(qs[i]->handle->owner_dev, | |
63 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); | |
64 | } | |
65 | ||
66 | /** | |
67 | *hns_rcb_reset_ring_hw - ring reset | |
68 | *@q: ring struct pointer | |
69 | */ | |
70 | void hns_rcb_reset_ring_hw(struct hnae_queue *q) | |
71 | { | |
72 | u32 wait_cnt; | |
73 | u32 try_cnt = 0; | |
74 | u32 could_ret; | |
75 | ||
76 | u32 tx_fbd_num; | |
77 | ||
78 | while (try_cnt++ < RCB_RESET_TRY_TIMES) { | |
79 | usleep_range(100, 200); | |
80 | tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); | |
81 | if (tx_fbd_num) | |
82 | continue; | |
83 | ||
84 | dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); | |
85 | ||
86 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); | |
87 | ||
88 | msleep(20); | |
89 | could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); | |
90 | ||
91 | wait_cnt = 0; | |
92 | while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { | |
93 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); | |
94 | ||
95 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); | |
96 | ||
97 | msleep(20); | |
98 | could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); | |
99 | ||
100 | wait_cnt++; | |
101 | } | |
102 | ||
103 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); | |
104 | ||
105 | if (could_ret) | |
106 | break; | |
107 | } | |
108 | ||
109 | if (try_cnt >= RCB_RESET_TRY_TIMES) | |
110 | dev_err(q->dev->dev, "port%d reset ring fail\n", | |
111 | hns_ae_get_vf_cb(q->handle)->port_index); | |
112 | } | |
113 | ||
114 | /** | |
115 | *hns_rcb_int_ctrl_hw - rcb irq enable control | |
116 | *@q: hnae queue struct pointer | |
117 | *@flag:ring flag tx or rx | |
118 | *@mask:mask | |
119 | */ | |
120 | void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) | |
121 | { | |
122 | u32 int_mask_en = !!mask; | |
123 | ||
124 | if (flag & RCB_INT_FLAG_TX) { | |
125 | dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); | |
126 | dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, | |
127 | int_mask_en); | |
128 | } | |
129 | ||
130 | if (flag & RCB_INT_FLAG_RX) { | |
131 | dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); | |
132 | dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, | |
133 | int_mask_en); | |
134 | } | |
135 | } | |
136 | ||
137 | void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) | |
138 | { | |
511e6bc0 | 139 | if (flag & RCB_INT_FLAG_TX) { |
13ac695e S |
140 | dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); |
141 | dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); | |
511e6bc0 | 142 | } |
143 | ||
144 | if (flag & RCB_INT_FLAG_RX) { | |
13ac695e S |
145 | dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); |
146 | dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); | |
511e6bc0 | 147 | } |
148 | } | |
149 | ||
13ac695e S |
150 | void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) |
151 | { | |
152 | u32 int_mask_en = !!mask; | |
153 | ||
154 | if (flag & RCB_INT_FLAG_TX) | |
155 | dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); | |
156 | ||
157 | if (flag & RCB_INT_FLAG_RX) | |
158 | dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); | |
159 | } | |
160 | ||
161 | void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) | |
162 | { | |
163 | if (flag & RCB_INT_FLAG_TX) | |
164 | dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); | |
165 | ||
166 | if (flag & RCB_INT_FLAG_RX) | |
167 | dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); | |
168 | } | |
169 | ||
511e6bc0 | 170 | /** |
171 | *hns_rcb_ring_enable_hw - enable ring | |
172 | *@ring: rcb ring | |
173 | */ | |
174 | void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) | |
175 | { | |
176 | dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); | |
177 | } | |
178 | ||
179 | void hns_rcb_start(struct hnae_queue *q, u32 val) | |
180 | { | |
181 | hns_rcb_ring_enable_hw(q, val); | |
182 | } | |
183 | ||
184 | /** | |
185 | *hns_rcb_common_init_commit_hw - make rcb common init completed | |
186 | *@rcb_common: rcb common device | |
187 | */ | |
188 | void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) | |
189 | { | |
190 | wmb(); /* Sync point before breakpoint */ | |
191 | dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); | |
192 | wmb(); /* Sync point after breakpoint */ | |
193 | } | |
194 | ||
195 | /** | |
196 | *hns_rcb_ring_init - init rcb ring | |
197 | *@ring_pair: ring pair control block | |
198 | *@ring_type: ring type, RX_RING or TX_RING | |
199 | */ | |
200 | static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) | |
201 | { | |
202 | struct hnae_queue *q = &ring_pair->q; | |
203 | struct rcb_common_cb *rcb_common = ring_pair->rcb_common; | |
204 | u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; | |
205 | struct hnae_ring *ring = | |
206 | (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; | |
207 | dma_addr_t dma = ring->desc_dma_addr; | |
208 | ||
209 | if (ring_type == RX_RING) { | |
210 | dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, | |
211 | (u32)dma); | |
212 | dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, | |
e4600d69 | 213 | (u32)((dma >> 31) >> 1)); |
13ac695e | 214 | |
511e6bc0 | 215 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, |
216 | bd_size_type); | |
217 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, | |
218 | ring_pair->port_id_in_dsa); | |
219 | dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, | |
220 | ring_pair->port_id_in_dsa); | |
221 | } else { | |
222 | dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, | |
223 | (u32)dma); | |
224 | dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, | |
e4600d69 | 225 | (u32)((dma >> 31) >> 1)); |
13ac695e | 226 | |
511e6bc0 | 227 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, |
228 | bd_size_type); | |
229 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, | |
230 | ring_pair->port_id_in_dsa); | |
231 | dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, | |
232 | ring_pair->port_id_in_dsa); | |
233 | } | |
234 | } | |
235 | ||
236 | /** | |
237 | *hns_rcb_init_hw - init rcb hardware | |
238 | *@ring: rcb ring | |
239 | */ | |
240 | void hns_rcb_init_hw(struct ring_pair_cb *ring) | |
241 | { | |
242 | hns_rcb_ring_init(ring, RX_RING); | |
243 | hns_rcb_ring_init(ring, TX_RING); | |
244 | } | |
245 | ||
246 | /** | |
247 | *hns_rcb_set_port_desc_cnt - set rcb port description num | |
248 | *@rcb_common: rcb_common device | |
249 | *@port_idx:port index | |
250 | *@desc_cnt:BD num | |
251 | */ | |
252 | static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, | |
253 | u32 port_idx, u32 desc_cnt) | |
254 | { | |
511e6bc0 | 255 | dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, |
256 | desc_cnt); | |
257 | } | |
258 | ||
259 | /** | |
260 | *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames | |
261 | *@rcb_common: rcb_common device | |
262 | *@port_idx:port index | |
263 | *@coalesced_frames:BD num for coalesced frames | |
264 | */ | |
265 | static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, | |
266 | u32 port_idx, | |
267 | u32 coalesced_frames) | |
268 | { | |
511e6bc0 | 269 | if (coalesced_frames >= rcb_common->desc_num || |
270 | coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) | |
271 | return -EINVAL; | |
272 | ||
273 | dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, | |
274 | coalesced_frames); | |
275 | return 0; | |
276 | } | |
277 | ||
278 | /** | |
279 | *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames | |
280 | *@rcb_common: rcb_common device | |
281 | *@port_idx:port index | |
282 | * return coaleseced frames value | |
283 | */ | |
284 | static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common, | |
285 | u32 port_idx) | |
286 | { | |
287 | if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) | |
288 | port_idx = 0; | |
289 | ||
290 | return dsaf_read_dev(rcb_common, | |
291 | RCB_CFG_PKTLINE_REG + port_idx * 4); | |
292 | } | |
293 | ||
294 | /** | |
295 | *hns_rcb_set_timeout - set rcb port coalesced time_out | |
296 | *@rcb_common: rcb_common device | |
297 | *@time_out:time for coalesced time_out | |
298 | */ | |
299 | static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common, | |
300 | u32 timeout) | |
301 | { | |
302 | dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout); | |
303 | } | |
304 | ||
305 | static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) | |
306 | { | |
307 | if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) | |
308 | return HNS_RCB_SERVICE_NW_ENGINE_NUM; | |
309 | else | |
310 | return HNS_RCB_DEBUG_NW_ENGINE_NUM; | |
311 | } | |
312 | ||
313 | /*clr rcb comm exception irq**/ | |
314 | static void hns_rcb_comm_exc_irq_en( | |
315 | struct rcb_common_cb *rcb_common, int en) | |
316 | { | |
317 | u32 clr_vlue = 0xfffffffful; | |
318 | u32 msk_vlue = en ? 0 : 0xfffffffful; | |
319 | ||
320 | /* clr int*/ | |
321 | dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); | |
322 | ||
323 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); | |
324 | ||
325 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); | |
326 | ||
327 | dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); | |
328 | dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); | |
329 | ||
330 | /*en msk*/ | |
331 | dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); | |
332 | ||
333 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); | |
334 | ||
335 | /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ | |
336 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); | |
337 | ||
338 | dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); | |
339 | dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); | |
340 | } | |
341 | ||
342 | /** | |
343 | *hns_rcb_common_init_hw - init rcb common hardware | |
344 | *@rcb_common: rcb_common device | |
345 | *retuen 0 - success , negative --fail | |
346 | */ | |
347 | int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) | |
348 | { | |
349 | u32 reg_val; | |
350 | int i; | |
351 | int port_num = hns_rcb_common_get_port_num(rcb_common); | |
352 | ||
353 | hns_rcb_comm_exc_irq_en(rcb_common, 0); | |
354 | ||
355 | reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); | |
356 | if (0x1 != (reg_val & 0x1)) { | |
357 | dev_err(rcb_common->dsaf_dev->dev, | |
358 | "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); | |
359 | return -EBUSY; | |
360 | } | |
361 | ||
362 | for (i = 0; i < port_num; i++) { | |
363 | hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); | |
364 | (void)hns_rcb_set_port_coalesced_frames( | |
365 | rcb_common, i, rcb_common->coalesced_frames); | |
366 | } | |
367 | hns_rcb_set_timeout(rcb_common, rcb_common->timeout); | |
368 | ||
369 | dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, | |
370 | HNS_RCB_COMMON_ENDIAN); | |
371 | ||
13ac695e S |
372 | dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); |
373 | dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); | |
374 | ||
511e6bc0 | 375 | return 0; |
376 | } | |
377 | ||
378 | int hns_rcb_buf_size2type(u32 buf_size) | |
379 | { | |
380 | int bd_size_type; | |
381 | ||
382 | switch (buf_size) { | |
383 | case 512: | |
384 | bd_size_type = HNS_BD_SIZE_512_TYPE; | |
385 | break; | |
386 | case 1024: | |
387 | bd_size_type = HNS_BD_SIZE_1024_TYPE; | |
388 | break; | |
389 | case 2048: | |
390 | bd_size_type = HNS_BD_SIZE_2048_TYPE; | |
391 | break; | |
392 | case 4096: | |
393 | bd_size_type = HNS_BD_SIZE_4096_TYPE; | |
394 | break; | |
395 | default: | |
396 | bd_size_type = -EINVAL; | |
397 | } | |
398 | ||
399 | return bd_size_type; | |
400 | } | |
401 | ||
402 | static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) | |
403 | { | |
404 | struct hnae_ring *ring; | |
405 | struct rcb_common_cb *rcb_common; | |
406 | struct ring_pair_cb *ring_pair_cb; | |
407 | u32 buf_size; | |
13ac695e S |
408 | u16 desc_num, mdnum_ppkt; |
409 | bool irq_idx, is_ver1; | |
511e6bc0 | 410 | |
411 | ring_pair_cb = container_of(q, struct ring_pair_cb, q); | |
13ac695e | 412 | is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); |
511e6bc0 | 413 | if (ring_type == RX_RING) { |
414 | ring = &q->rx_ring; | |
415 | ring->io_base = ring_pair_cb->q.io_base; | |
416 | irq_idx = HNS_RCB_IRQ_IDX_RX; | |
13ac695e | 417 | mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; |
511e6bc0 | 418 | } else { |
419 | ring = &q->tx_ring; | |
420 | ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + | |
421 | HNS_RCB_TX_REG_OFFSET; | |
422 | irq_idx = HNS_RCB_IRQ_IDX_TX; | |
13ac695e S |
423 | mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : |
424 | HNS_RCBV2_RING_MAX_TXBD_PER_PKT; | |
511e6bc0 | 425 | } |
426 | ||
427 | rcb_common = ring_pair_cb->rcb_common; | |
428 | buf_size = rcb_common->dsaf_dev->buf_size; | |
429 | desc_num = rcb_common->dsaf_dev->desc_num; | |
430 | ||
431 | ring->desc = NULL; | |
432 | ring->desc_cb = NULL; | |
433 | ||
434 | ring->irq = ring_pair_cb->virq[irq_idx]; | |
435 | ring->desc_dma_addr = 0; | |
436 | ||
437 | ring->buf_size = buf_size; | |
438 | ring->desc_num = desc_num; | |
13ac695e | 439 | ring->max_desc_num_per_pkt = mdnum_ppkt; |
511e6bc0 | 440 | ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; |
441 | ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; | |
442 | ring->next_to_use = 0; | |
443 | ring->next_to_clean = 0; | |
444 | } | |
445 | ||
446 | static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) | |
447 | { | |
448 | ring_pair_cb->q.handle = NULL; | |
449 | ||
450 | hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); | |
451 | hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); | |
452 | } | |
453 | ||
454 | static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) | |
455 | { | |
456 | int comm_index = rcb_common->comm_index; | |
457 | int port; | |
458 | int q_num; | |
459 | ||
460 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { | |
461 | q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; | |
462 | port = ring_idx / q_num; | |
463 | } else { | |
464 | port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; | |
465 | } | |
466 | ||
467 | return port; | |
468 | } | |
469 | ||
13ac695e S |
470 | #define SERVICE_RING_IRQ_IDX(v1) \ |
471 | ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) | |
472 | #define DEBUG_RING_IRQ_IDX(v1) \ | |
473 | ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) | |
474 | #define DEBUG_RING_IRQ_OFFSET(v1) \ | |
475 | ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) | |
511e6bc0 | 476 | static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) |
477 | { | |
478 | int comm_index = rcb_common->comm_index; | |
13ac695e | 479 | bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); |
511e6bc0 | 480 | |
481 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) | |
13ac695e | 482 | return SERVICE_RING_IRQ_IDX(is_ver1); |
511e6bc0 | 483 | else |
13ac695e S |
484 | return DEBUG_RING_IRQ_IDX(is_ver1) + |
485 | (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); | |
511e6bc0 | 486 | } |
487 | ||
488 | #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ | |
489 | ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) | |
490 | /** | |
491 | *hns_rcb_get_cfg - get rcb config | |
492 | *@rcb_common: rcb common device | |
493 | */ | |
494 | void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) | |
495 | { | |
496 | struct ring_pair_cb *ring_pair_cb; | |
497 | u32 i; | |
498 | u32 ring_num = rcb_common->ring_num; | |
499 | int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); | |
500 | struct device_node *np = rcb_common->dsaf_dev->dev->of_node; | |
13ac695e S |
501 | struct platform_device *pdev = |
502 | container_of(rcb_common->dsaf_dev->dev, | |
503 | struct platform_device, dev); | |
504 | bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); | |
511e6bc0 | 505 | |
506 | for (i = 0; i < ring_num; i++) { | |
507 | ring_pair_cb = &rcb_common->ring_pair_cb[i]; | |
508 | ring_pair_cb->rcb_common = rcb_common; | |
509 | ring_pair_cb->dev = rcb_common->dsaf_dev->dev; | |
510 | ring_pair_cb->index = i; | |
511 | ring_pair_cb->q.io_base = | |
512 | RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); | |
513 | ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); | |
13ac695e S |
514 | ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = |
515 | is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : | |
516 | platform_get_irq(pdev, base_irq_idx + i * 3 + 1); | |
517 | ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = | |
518 | is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : | |
519 | platform_get_irq(pdev, base_irq_idx + i * 3); | |
511e6bc0 | 520 | ring_pair_cb->q.phy_base = |
521 | RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); | |
522 | hns_rcb_ring_pair_get_cfg(ring_pair_cb); | |
523 | } | |
524 | } | |
525 | ||
526 | /** | |
527 | *hns_rcb_get_coalesced_frames - get rcb port coalesced frames | |
528 | *@rcb_common: rcb_common device | |
529 | *@comm_index:port index | |
530 | *return coalesced_frames | |
531 | */ | |
532 | u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) | |
533 | { | |
534 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); | |
535 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | |
536 | ||
537 | return hns_rcb_get_port_coalesced_frames(rcb_comm, port); | |
538 | } | |
539 | ||
540 | /** | |
541 | *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out | |
542 | *@rcb_common: rcb_common device | |
543 | *@comm_index:port index | |
544 | *return time_out | |
545 | */ | |
546 | u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) | |
547 | { | |
548 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | |
549 | ||
550 | return rcb_comm->timeout; | |
551 | } | |
552 | ||
553 | /** | |
554 | *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out | |
555 | *@rcb_common: rcb_common device | |
556 | *@comm_index: comm :index | |
557 | *@etx_usecs:tx time for coalesced time_out | |
558 | *@rx_usecs:rx time for coalesced time_out | |
559 | */ | |
560 | void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, | |
561 | int port, u32 timeout) | |
562 | { | |
563 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); | |
564 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | |
565 | ||
566 | if (rcb_comm->timeout == timeout) | |
567 | return; | |
568 | ||
569 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { | |
570 | dev_err(dsaf_dev->dev, | |
571 | "error: not support coalesce_usecs setting!\n"); | |
572 | return; | |
573 | } | |
574 | rcb_comm->timeout = timeout; | |
575 | hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); | |
576 | } | |
577 | ||
578 | /** | |
579 | *hns_rcb_set_coalesced_frames - set rcb coalesced frames | |
580 | *@rcb_common: rcb_common device | |
581 | *@tx_frames:tx BD num for coalesced frames | |
582 | *@rx_frames:rx BD num for coalesced frames | |
583 | *Return 0 on success, negative on failure | |
584 | */ | |
585 | int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, | |
586 | int port, u32 coalesced_frames) | |
587 | { | |
588 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); | |
589 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; | |
590 | u32 coalesced_reg_val; | |
591 | int ret; | |
592 | ||
593 | coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); | |
594 | ||
595 | if (coalesced_reg_val == coalesced_frames) | |
596 | return 0; | |
597 | ||
598 | if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { | |
599 | ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, | |
600 | coalesced_frames); | |
601 | return ret; | |
602 | } else { | |
603 | return -EINVAL; | |
604 | } | |
605 | } | |
606 | ||
607 | /** | |
608 | *hns_rcb_get_queue_mode - get max VM number and max ring number per VM | |
609 | * accordding to dsaf mode | |
610 | *@dsaf_mode: dsaf mode | |
611 | *@max_vfn : max vfn number | |
612 | *@max_q_per_vf:max ring number per vm | |
613 | */ | |
4568637f | 614 | void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, |
615 | u16 *max_vfn, u16 *max_q_per_vf) | |
511e6bc0 | 616 | { |
617 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { | |
618 | switch (dsaf_mode) { | |
619 | case DSAF_MODE_DISABLE_6PORT_0VM: | |
620 | *max_vfn = 1; | |
621 | *max_q_per_vf = 16; | |
622 | break; | |
623 | case DSAF_MODE_DISABLE_FIX: | |
624 | *max_vfn = 1; | |
625 | *max_q_per_vf = 1; | |
626 | break; | |
627 | case DSAF_MODE_DISABLE_2PORT_64VM: | |
628 | *max_vfn = 64; | |
629 | *max_q_per_vf = 1; | |
630 | break; | |
631 | case DSAF_MODE_DISABLE_6PORT_16VM: | |
632 | *max_vfn = 16; | |
633 | *max_q_per_vf = 1; | |
634 | break; | |
635 | default: | |
636 | *max_vfn = 1; | |
637 | *max_q_per_vf = 16; | |
638 | break; | |
639 | } | |
640 | } else { | |
641 | *max_vfn = 1; | |
642 | *max_q_per_vf = 1; | |
643 | } | |
644 | } | |
645 | ||
646 | int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index) | |
647 | { | |
648 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { | |
649 | switch (dsaf_dev->dsaf_mode) { | |
650 | case DSAF_MODE_ENABLE_FIX: | |
651 | return 1; | |
652 | ||
653 | case DSAF_MODE_DISABLE_FIX: | |
654 | return 6; | |
655 | ||
656 | case DSAF_MODE_ENABLE_0VM: | |
657 | return 32; | |
658 | ||
659 | case DSAF_MODE_DISABLE_6PORT_0VM: | |
660 | case DSAF_MODE_ENABLE_16VM: | |
661 | case DSAF_MODE_DISABLE_6PORT_2VM: | |
662 | case DSAF_MODE_DISABLE_6PORT_16VM: | |
663 | case DSAF_MODE_DISABLE_6PORT_4VM: | |
664 | case DSAF_MODE_ENABLE_8VM: | |
665 | return 96; | |
666 | ||
667 | case DSAF_MODE_DISABLE_2PORT_16VM: | |
668 | case DSAF_MODE_DISABLE_2PORT_8VM: | |
669 | case DSAF_MODE_ENABLE_32VM: | |
670 | case DSAF_MODE_DISABLE_2PORT_64VM: | |
671 | case DSAF_MODE_ENABLE_128VM: | |
672 | return 128; | |
673 | ||
674 | default: | |
675 | dev_warn(dsaf_dev->dev, | |
676 | "get ring num fail,use default!dsaf_mode=%d\n", | |
677 | dsaf_dev->dsaf_mode); | |
678 | return 128; | |
679 | } | |
680 | } else { | |
681 | return 1; | |
682 | } | |
683 | } | |
684 | ||
685 | void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, | |
686 | int comm_index) | |
687 | { | |
688 | void __iomem *base_addr; | |
689 | ||
690 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) | |
691 | base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; | |
692 | else | |
693 | base_addr = dsaf_dev->sds_base | |
694 | + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET | |
695 | + RCB_COMMON_REG_OFFSET; | |
696 | ||
697 | return base_addr; | |
698 | } | |
699 | ||
700 | static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, | |
701 | int comm_index) | |
702 | { | |
703 | struct device_node *np = dsaf_dev->dev->of_node; | |
704 | phys_addr_t phy_addr; | |
705 | const __be32 *tmp_addr; | |
706 | u64 addr_offset = 0; | |
707 | u64 size = 0; | |
708 | int index = 0; | |
709 | ||
710 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { | |
711 | index = 2; | |
712 | addr_offset = RCB_COMMON_REG_OFFSET; | |
713 | } else { | |
714 | index = 1; | |
715 | addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET + | |
716 | RCB_COMMON_REG_OFFSET; | |
717 | } | |
718 | tmp_addr = of_get_address(np, index, &size, NULL); | |
719 | phy_addr = of_translate_address(np, tmp_addr); | |
720 | return phy_addr + addr_offset; | |
721 | } | |
722 | ||
723 | int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, | |
724 | int comm_index) | |
725 | { | |
726 | struct rcb_common_cb *rcb_common; | |
727 | enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; | |
728 | u16 max_vfn; | |
729 | u16 max_q_per_vf; | |
730 | int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index); | |
731 | ||
732 | rcb_common = | |
733 | devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + | |
734 | ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); | |
735 | if (!rcb_common) { | |
736 | dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); | |
737 | return -ENOMEM; | |
738 | } | |
739 | rcb_common->comm_index = comm_index; | |
740 | rcb_common->ring_num = ring_num; | |
741 | rcb_common->dsaf_dev = dsaf_dev; | |
742 | ||
743 | rcb_common->desc_num = dsaf_dev->desc_num; | |
744 | rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES; | |
745 | rcb_common->timeout = HNS_RCB_MAX_TIME_OUT; | |
746 | ||
747 | hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); | |
748 | rcb_common->max_vfn = max_vfn; | |
749 | rcb_common->max_q_per_vf = max_q_per_vf; | |
750 | ||
751 | rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index); | |
752 | rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index); | |
753 | ||
754 | dsaf_dev->rcb_common[comm_index] = rcb_common; | |
755 | return 0; | |
756 | } | |
757 | ||
758 | void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, | |
759 | u32 comm_index) | |
760 | { | |
761 | dsaf_dev->rcb_common[comm_index] = NULL; | |
762 | } | |
763 | ||
764 | void hns_rcb_update_stats(struct hnae_queue *queue) | |
765 | { | |
766 | struct ring_pair_cb *ring = | |
767 | container_of(queue, struct ring_pair_cb, q); | |
768 | struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; | |
769 | struct ppe_common_cb *ppe_common | |
770 | = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; | |
771 | struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; | |
772 | ||
773 | hw_stats->rx_pkts += dsaf_read_dev(queue, | |
774 | RCB_RING_RX_RING_PKTNUM_RECORD_REG); | |
775 | dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); | |
776 | ||
777 | hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, | |
778 | PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); | |
779 | hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, | |
780 | PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); | |
781 | ||
782 | hw_stats->tx_pkts += dsaf_read_dev(queue, | |
783 | RCB_RING_TX_RING_PKTNUM_RECORD_REG); | |
784 | dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); | |
785 | ||
786 | hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, | |
787 | PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); | |
788 | hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, | |
789 | PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); | |
790 | } | |
791 | ||
792 | /** | |
793 | *hns_rcb_get_stats - get rcb statistic | |
794 | *@ring: rcb ring | |
795 | *@data:statistic value | |
796 | */ | |
797 | void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) | |
798 | { | |
799 | u64 *regs_buff = data; | |
800 | struct ring_pair_cb *ring = | |
801 | container_of(queue, struct ring_pair_cb, q); | |
802 | struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; | |
803 | ||
804 | regs_buff[0] = hw_stats->tx_pkts; | |
805 | regs_buff[1] = hw_stats->ppe_tx_ok_pkts; | |
806 | regs_buff[2] = hw_stats->ppe_tx_drop_pkts; | |
807 | regs_buff[3] = | |
808 | dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); | |
809 | ||
810 | regs_buff[4] = queue->tx_ring.stats.tx_pkts; | |
811 | regs_buff[5] = queue->tx_ring.stats.tx_bytes; | |
812 | regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; | |
813 | regs_buff[7] = queue->tx_ring.stats.io_err_cnt; | |
814 | regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; | |
815 | regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; | |
816 | regs_buff[10] = queue->tx_ring.stats.restart_queue; | |
817 | regs_buff[11] = queue->tx_ring.stats.tx_busy; | |
818 | ||
819 | regs_buff[12] = hw_stats->rx_pkts; | |
820 | regs_buff[13] = hw_stats->ppe_rx_ok_pkts; | |
821 | regs_buff[14] = hw_stats->ppe_rx_drop_pkts; | |
822 | regs_buff[15] = | |
823 | dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); | |
824 | ||
825 | regs_buff[16] = queue->rx_ring.stats.rx_pkts; | |
826 | regs_buff[17] = queue->rx_ring.stats.rx_bytes; | |
827 | regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; | |
828 | regs_buff[19] = queue->rx_ring.stats.io_err_cnt; | |
829 | regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; | |
830 | regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; | |
831 | regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; | |
832 | regs_buff[23] = queue->rx_ring.stats.err_pkt_len; | |
833 | regs_buff[24] = queue->rx_ring.stats.non_vld_descs; | |
834 | regs_buff[25] = queue->rx_ring.stats.err_bd_num; | |
835 | regs_buff[26] = queue->rx_ring.stats.l2_err; | |
836 | regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; | |
837 | } | |
838 | ||
839 | /** | |
840 | *hns_rcb_get_ring_sset_count - rcb string set count | |
841 | *@stringset:ethtool cmd | |
842 | *return rcb ring string set count | |
843 | */ | |
844 | int hns_rcb_get_ring_sset_count(int stringset) | |
845 | { | |
846 | if (stringset == ETH_SS_STATS) | |
847 | return HNS_RING_STATIC_REG_NUM; | |
848 | ||
849 | return 0; | |
850 | } | |
851 | ||
852 | /** | |
853 | *hns_rcb_get_common_regs_count - rcb common regs count | |
854 | *return regs count | |
855 | */ | |
856 | int hns_rcb_get_common_regs_count(void) | |
857 | { | |
858 | return HNS_RCB_COMMON_DUMP_REG_NUM; | |
859 | } | |
860 | ||
861 | /** | |
862 | *rcb_get_sset_count - rcb ring regs count | |
863 | *return regs count | |
864 | */ | |
865 | int hns_rcb_get_ring_regs_count(void) | |
866 | { | |
867 | return HNS_RCB_RING_DUMP_REG_NUM; | |
868 | } | |
869 | ||
870 | /** | |
871 | *hns_rcb_get_strings - get rcb string set | |
872 | *@stringset:string set index | |
873 | *@data:strings name value | |
874 | *@index:queue index | |
875 | */ | |
876 | void hns_rcb_get_strings(int stringset, u8 *data, int index) | |
877 | { | |
878 | char *buff = (char *)data; | |
879 | ||
880 | if (stringset != ETH_SS_STATS) | |
881 | return; | |
882 | ||
883 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); | |
884 | buff = buff + ETH_GSTRING_LEN; | |
885 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); | |
886 | buff = buff + ETH_GSTRING_LEN; | |
887 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); | |
888 | buff = buff + ETH_GSTRING_LEN; | |
889 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); | |
890 | buff = buff + ETH_GSTRING_LEN; | |
891 | ||
892 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); | |
893 | buff = buff + ETH_GSTRING_LEN; | |
894 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); | |
895 | buff = buff + ETH_GSTRING_LEN; | |
896 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); | |
897 | buff = buff + ETH_GSTRING_LEN; | |
898 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); | |
899 | buff = buff + ETH_GSTRING_LEN; | |
900 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); | |
901 | buff = buff + ETH_GSTRING_LEN; | |
902 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); | |
903 | buff = buff + ETH_GSTRING_LEN; | |
904 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); | |
905 | buff = buff + ETH_GSTRING_LEN; | |
906 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); | |
907 | buff = buff + ETH_GSTRING_LEN; | |
908 | ||
909 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); | |
910 | buff = buff + ETH_GSTRING_LEN; | |
911 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); | |
912 | buff = buff + ETH_GSTRING_LEN; | |
913 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); | |
914 | buff = buff + ETH_GSTRING_LEN; | |
915 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); | |
916 | buff = buff + ETH_GSTRING_LEN; | |
917 | ||
918 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); | |
919 | buff = buff + ETH_GSTRING_LEN; | |
920 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); | |
921 | buff = buff + ETH_GSTRING_LEN; | |
922 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); | |
923 | buff = buff + ETH_GSTRING_LEN; | |
924 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); | |
925 | buff = buff + ETH_GSTRING_LEN; | |
926 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); | |
927 | buff = buff + ETH_GSTRING_LEN; | |
928 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); | |
929 | buff = buff + ETH_GSTRING_LEN; | |
930 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); | |
931 | buff = buff + ETH_GSTRING_LEN; | |
932 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); | |
933 | buff = buff + ETH_GSTRING_LEN; | |
934 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); | |
935 | buff = buff + ETH_GSTRING_LEN; | |
936 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); | |
937 | buff = buff + ETH_GSTRING_LEN; | |
938 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); | |
939 | buff = buff + ETH_GSTRING_LEN; | |
940 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); | |
941 | } | |
942 | ||
943 | void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) | |
944 | { | |
945 | u32 *regs = data; | |
946 | u32 i = 0; | |
947 | ||
948 | /*rcb common registers */ | |
949 | regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); | |
950 | regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); | |
951 | regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); | |
952 | ||
953 | regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); | |
954 | regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); | |
955 | regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); | |
956 | regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); | |
957 | regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); | |
958 | regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); | |
959 | ||
960 | regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); | |
961 | regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); | |
962 | regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); | |
963 | regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); | |
964 | regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); | |
965 | regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); | |
966 | regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); | |
967 | regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); | |
968 | regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); | |
969 | regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); | |
970 | regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); | |
971 | regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); | |
972 | regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); | |
973 | regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); | |
974 | regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); | |
975 | regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); | |
976 | regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); | |
977 | regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); | |
978 | regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); | |
979 | ||
980 | regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); | |
981 | regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); | |
982 | regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); | |
983 | regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); | |
984 | regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); | |
985 | regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); | |
986 | regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); | |
987 | regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); | |
988 | regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); | |
989 | regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); | |
990 | ||
991 | /* rcb common entry registers */ | |
992 | for (i = 0; i < 16; i++) { /* total 16 model registers */ | |
993 | regs[38 + i] | |
994 | = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); | |
995 | regs[54 + i] | |
996 | = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); | |
997 | } | |
998 | ||
999 | regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); | |
1000 | regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); | |
1001 | regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); | |
1002 | ||
1003 | /* mark end of rcb common regs */ | |
1004 | for (i = 73; i < 80; i++) | |
1005 | regs[i] = 0xcccccccc; | |
1006 | } | |
1007 | ||
1008 | void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) | |
1009 | { | |
1010 | u32 *regs = data; | |
1011 | struct ring_pair_cb *ring_pair | |
1012 | = container_of(queue, struct ring_pair_cb, q); | |
1013 | u32 i = 0; | |
1014 | ||
1015 | /*rcb ring registers */ | |
1016 | regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); | |
1017 | regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); | |
1018 | regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); | |
1019 | regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); | |
1020 | regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); | |
1021 | regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); | |
1022 | regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); | |
1023 | regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); | |
1024 | regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); | |
1025 | ||
1026 | regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); | |
1027 | regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); | |
1028 | regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); | |
1029 | regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); | |
1030 | regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); | |
1031 | regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); | |
1032 | regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); | |
1033 | regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); | |
1034 | regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); | |
1035 | regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); | |
1036 | ||
1037 | regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); | |
1038 | regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); | |
1039 | regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); | |
1040 | regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); | |
1041 | regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); | |
1042 | regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); | |
1043 | regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); | |
1044 | ||
1045 | regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); | |
1046 | regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); | |
1047 | regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); | |
1048 | regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); | |
1049 | regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); | |
1050 | regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); | |
1051 | regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); | |
1052 | regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); | |
1053 | ||
1054 | /* mark end of ring regs */ | |
1055 | for (i = 35; i < 40; i++) | |
1056 | regs[i] = 0xcccccc00 + ring_pair->index; | |
1057 | } |