2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
19 #include "nicvf_queues.h"
27 #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
29 /* Poll a register for a specific value */
30 static int nicvf_poll_reg(struct nicvf
*nic
, int qidx
,
31 u64 reg
, int bit_pos
, int bits
, int val
)
37 bit_mask
= (1ULL << bits
) - 1;
38 bit_mask
= (bit_mask
<< bit_pos
);
41 reg_val
= nicvf_queue_reg_read(nic
, reg
, qidx
);
42 if (((reg_val
& bit_mask
) >> bit_pos
) == val
)
44 usleep_range(1000, 2000);
47 netdev_err(nic
->netdev
, "Poll on reg 0x%llx failed\n", reg
);
51 /* Allocate memory for a queue's descriptors */
52 static int nicvf_alloc_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
,
53 int q_len
, int desc_size
, int align_bytes
)
56 dmem
->size
= (desc_size
* q_len
) + align_bytes
;
57 /* Save address, need it while freeing */
58 dmem
->unalign_base
= dma_zalloc_coherent(&nic
->pdev
->dev
, dmem
->size
,
59 &dmem
->dma
, GFP_KERNEL
);
60 if (!dmem
->unalign_base
)
63 /* Align memory address for 'align_bytes' */
64 dmem
->phys_base
= NICVF_ALIGNED_ADDR((u64
)dmem
->dma
, align_bytes
);
65 dmem
->base
= dmem
->unalign_base
+ (dmem
->phys_base
- dmem
->dma
);
69 /* Free queue's descriptor memory */
70 static void nicvf_free_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
)
75 dma_free_coherent(&nic
->pdev
->dev
, dmem
->size
,
76 dmem
->unalign_base
, dmem
->dma
);
77 dmem
->unalign_base
= NULL
;
81 /* Allocate buffer for packet reception
82 * HW returns memory address where packet is DMA'ed but not a pointer
83 * into RBDR ring, so save buffer address at the start of fragment and
84 * align the start address to a cache aligned address
86 static inline int nicvf_alloc_rcv_buffer(struct nicvf
*nic
, gfp_t gfp
,
87 u32 buf_len
, u64
**rbuf
)
90 struct rbuf_info
*rinfo
;
91 int order
= get_order(buf_len
);
93 /* Check if request can be accomodated in previous allocated page */
95 if ((nic
->rb_page_offset
+ buf_len
+ buf_len
) >
96 (PAGE_SIZE
<< order
)) {
99 nic
->rb_page_offset
+= buf_len
;
100 get_page(nic
->rb_page
);
104 /* Allocate a new page */
106 nic
->rb_page
= alloc_pages(gfp
| __GFP_COMP
| __GFP_NOWARN
,
109 netdev_err(nic
->netdev
,
110 "Failed to allocate new rcv buffer\n");
113 nic
->rb_page_offset
= 0;
116 data
= (u64
)page_address(nic
->rb_page
) + nic
->rb_page_offset
;
118 /* Align buffer addr to cache line i.e 128 bytes */
119 rinfo
= (struct rbuf_info
*)(data
+ NICVF_RCV_BUF_ALIGN_LEN(data
));
120 /* Save page address for reference updation */
121 rinfo
->page
= nic
->rb_page
;
122 /* Store start address for later retrieval */
123 rinfo
->data
= (void *)data
;
124 /* Store alignment offset */
125 rinfo
->offset
= NICVF_RCV_BUF_ALIGN_LEN(data
);
127 data
+= rinfo
->offset
;
129 /* Give next aligned address to hw for DMA */
130 *rbuf
= (u64
*)(data
+ NICVF_RCV_BUF_ALIGN_BYTES
);
134 /* Retrieve actual buffer start address and build skb for received packet */
135 static struct sk_buff
*nicvf_rb_ptr_to_skb(struct nicvf
*nic
,
139 struct rbuf_info
*rinfo
;
141 rb_ptr
= (u64
)phys_to_virt(rb_ptr
);
142 /* Get buffer start address and alignment offset */
143 rinfo
= GET_RBUF_INFO(rb_ptr
);
145 /* Now build an skb to give to stack */
146 skb
= build_skb(rinfo
->data
, RCV_FRAG_LEN
);
148 put_page(rinfo
->page
);
152 /* Set correct skb->data */
153 skb_reserve(skb
, rinfo
->offset
+ NICVF_RCV_BUF_ALIGN_BYTES
);
155 prefetch((void *)rb_ptr
);
159 /* Allocate RBDR ring and populate receive buffers */
160 static int nicvf_init_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
,
161 int ring_len
, int buf_size
)
165 struct rbdr_entry_t
*desc
;
168 err
= nicvf_alloc_q_desc_mem(nic
, &rbdr
->dmem
, ring_len
,
169 sizeof(struct rbdr_entry_t
),
170 NICVF_RCV_BUF_ALIGN_BYTES
);
174 rbdr
->desc
= rbdr
->dmem
.base
;
175 /* Buffer size has to be in multiples of 128 bytes */
176 rbdr
->dma_size
= buf_size
;
178 rbdr
->thresh
= RBDR_THRESH
;
181 for (idx
= 0; idx
< ring_len
; idx
++) {
182 err
= nicvf_alloc_rcv_buffer(nic
, GFP_KERNEL
, RCV_FRAG_LEN
,
187 desc
= GET_RBDR_DESC(rbdr
, idx
);
188 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
193 /* Free RBDR ring and its receive buffers */
194 static void nicvf_free_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
)
198 struct rbdr_entry_t
*desc
;
199 struct rbuf_info
*rinfo
;
204 rbdr
->enable
= false;
205 if (!rbdr
->dmem
.base
)
212 while (head
!= tail
) {
213 desc
= GET_RBDR_DESC(rbdr
, head
);
214 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
215 rinfo
= GET_RBUF_INFO((u64
)phys_to_virt(buf_addr
));
216 put_page(rinfo
->page
);
218 head
&= (rbdr
->dmem
.q_len
- 1);
220 /* Free SKB of tail desc */
221 desc
= GET_RBDR_DESC(rbdr
, tail
);
222 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
223 rinfo
= GET_RBUF_INFO((u64
)phys_to_virt(buf_addr
));
224 put_page(rinfo
->page
);
227 nicvf_free_q_desc_mem(nic
, &rbdr
->dmem
);
230 /* Refill receive buffer descriptors with new buffers.
232 static void nicvf_refill_rbdr(struct nicvf
*nic
, gfp_t gfp
)
234 struct queue_set
*qs
= nic
->qs
;
235 int rbdr_idx
= qs
->rbdr_cnt
;
239 struct rbdr_entry_t
*desc
;
247 rbdr
= &qs
->rbdr
[rbdr_idx
];
248 /* Check if it's enabled */
252 /* Get no of desc's to be refilled */
253 qcount
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, rbdr_idx
);
255 /* Doorbell can be ringed with a max of ring size minus 1 */
256 if (qcount
>= (qs
->rbdr_len
- 1))
259 refill_rb_cnt
= qs
->rbdr_len
- qcount
- 1;
261 /* Start filling descs from tail */
262 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_TAIL
, rbdr_idx
) >> 3;
263 while (refill_rb_cnt
) {
265 tail
&= (rbdr
->dmem
.q_len
- 1);
267 if (nicvf_alloc_rcv_buffer(nic
, gfp
, RCV_FRAG_LEN
, &rbuf
))
270 desc
= GET_RBDR_DESC(rbdr
, tail
);
271 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
276 /* make sure all memory stores are done before ringing doorbell */
279 /* Check if buffer allocation failed */
281 nic
->rb_alloc_fail
= true;
283 nic
->rb_alloc_fail
= false;
286 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
289 /* Re-enable RBDR interrupts only if buffer allocation is success */
290 if (!nic
->rb_alloc_fail
&& rbdr
->enable
)
291 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, rbdr_idx
);
297 /* Alloc rcv buffers in non-atomic mode for better success */
298 void nicvf_rbdr_work(struct work_struct
*work
)
300 struct nicvf
*nic
= container_of(work
, struct nicvf
, rbdr_work
.work
);
302 nicvf_refill_rbdr(nic
, GFP_KERNEL
);
303 if (nic
->rb_alloc_fail
)
304 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
306 nic
->rb_work_scheduled
= false;
309 /* In Softirq context, alloc rcv buffers in atomic mode */
310 void nicvf_rbdr_task(unsigned long data
)
312 struct nicvf
*nic
= (struct nicvf
*)data
;
314 nicvf_refill_rbdr(nic
, GFP_ATOMIC
);
315 if (nic
->rb_alloc_fail
) {
316 nic
->rb_work_scheduled
= true;
317 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
321 /* Initialize completion queue */
322 static int nicvf_init_cmp_queue(struct nicvf
*nic
,
323 struct cmp_queue
*cq
, int q_len
)
327 err
= nicvf_alloc_q_desc_mem(nic
, &cq
->dmem
, q_len
, CMP_QUEUE_DESC_SIZE
,
328 NICVF_CQ_BASE_ALIGN_BYTES
);
332 cq
->desc
= cq
->dmem
.base
;
333 cq
->thresh
= CMP_QUEUE_CQE_THRESH
;
334 nic
->cq_coalesce_usecs
= (CMP_QUEUE_TIMER_THRESH
* 0.05) - 1;
339 static void nicvf_free_cmp_queue(struct nicvf
*nic
, struct cmp_queue
*cq
)
346 nicvf_free_q_desc_mem(nic
, &cq
->dmem
);
349 /* Initialize transmit queue */
350 static int nicvf_init_snd_queue(struct nicvf
*nic
,
351 struct snd_queue
*sq
, int q_len
)
355 err
= nicvf_alloc_q_desc_mem(nic
, &sq
->dmem
, q_len
, SND_QUEUE_DESC_SIZE
,
356 NICVF_SQ_BASE_ALIGN_BYTES
);
360 sq
->desc
= sq
->dmem
.base
;
361 sq
->skbuff
= kcalloc(q_len
, sizeof(u64
), GFP_KERNEL
);
366 atomic_set(&sq
->free_cnt
, q_len
- 1);
367 sq
->thresh
= SND_QUEUE_THRESH
;
369 /* Preallocate memory for TSO segment's header */
370 sq
->tso_hdrs
= dma_alloc_coherent(&nic
->pdev
->dev
,
371 q_len
* TSO_HEADER_SIZE
,
372 &sq
->tso_hdrs_phys
, GFP_KERNEL
);
379 static void nicvf_free_snd_queue(struct nicvf
*nic
, struct snd_queue
*sq
)
387 dma_free_coherent(&nic
->pdev
->dev
,
388 sq
->dmem
.q_len
* TSO_HEADER_SIZE
,
389 sq
->tso_hdrs
, sq
->tso_hdrs_phys
);
392 nicvf_free_q_desc_mem(nic
, &sq
->dmem
);
395 static void nicvf_reclaim_snd_queue(struct nicvf
*nic
,
396 struct queue_set
*qs
, int qidx
)
398 /* Disable send queue */
399 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, 0);
400 /* Check if SQ is stopped */
401 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_SQ_0_7_STATUS
, 21, 1, 0x01))
403 /* Reset send queue */
404 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
407 static void nicvf_reclaim_rcv_queue(struct nicvf
*nic
,
408 struct queue_set
*qs
, int qidx
)
410 union nic_mbx mbx
= {};
412 /* Make sure all packets in the pipeline are written back into mem */
413 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_SW_SYNC
;
414 nicvf_send_msg_to_pf(nic
, &mbx
);
417 static void nicvf_reclaim_cmp_queue(struct nicvf
*nic
,
418 struct queue_set
*qs
, int qidx
)
420 /* Disable timer threshold (doesn't get reset upon CQ reset */
421 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
, qidx
, 0);
422 /* Disable completion queue */
423 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, 0);
424 /* Reset completion queue */
425 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
428 static void nicvf_reclaim_rbdr(struct nicvf
*nic
,
429 struct rbdr
*rbdr
, int qidx
)
434 /* Save head and tail pointers for feeing up buffers */
435 rbdr
->head
= nicvf_queue_reg_read(nic
,
436 NIC_QSET_RBDR_0_1_HEAD
,
438 rbdr
->tail
= nicvf_queue_reg_read(nic
,
439 NIC_QSET_RBDR_0_1_TAIL
,
442 /* If RBDR FIFO is in 'FAIL' state then do a reset first
445 fifo_state
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, qidx
);
446 if (((fifo_state
>> 62) & 0x03) == 0x3)
447 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
448 qidx
, NICVF_RBDR_RESET
);
451 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0);
452 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
455 tmp
= nicvf_queue_reg_read(nic
,
456 NIC_QSET_RBDR_0_1_PREFETCH_STATUS
,
458 if ((tmp
& 0xFFFFFFFF) == ((tmp
>> 32) & 0xFFFFFFFF))
460 usleep_range(1000, 2000);
463 netdev_err(nic
->netdev
,
464 "Failed polling on prefetch status\n");
468 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
469 qidx
, NICVF_RBDR_RESET
);
471 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x02))
473 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0x00);
474 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
478 void nicvf_config_vlan_stripping(struct nicvf
*nic
, netdev_features_t features
)
483 rq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_RQ_GEN_CFG
, 0);
485 /* Enable first VLAN stripping */
486 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
487 rq_cfg
|= (1ULL << 25);
489 rq_cfg
&= ~(1ULL << 25);
490 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_GEN_CFG
, 0, rq_cfg
);
492 /* Configure Secondary Qsets, if any */
493 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++)
494 if (nic
->snicvf
[sqs
])
495 nicvf_queue_reg_write(nic
->snicvf
[sqs
],
496 NIC_QSET_RQ_GEN_CFG
, 0, rq_cfg
);
499 /* Configures receive queue */
500 static void nicvf_rcv_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
501 int qidx
, bool enable
)
503 union nic_mbx mbx
= {};
504 struct rcv_queue
*rq
;
505 struct rq_cfg rq_cfg
;
510 /* Disable receive queue */
511 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, 0);
514 nicvf_reclaim_rcv_queue(nic
, qs
, qidx
);
518 rq
->cq_qs
= qs
->vnic_id
;
520 rq
->start_rbdr_qs
= qs
->vnic_id
;
521 rq
->start_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
522 rq
->cont_rbdr_qs
= qs
->vnic_id
;
523 rq
->cont_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
524 /* all writes of RBDR data to be loaded into L2 Cache as well*/
527 /* Send a mailbox msg to PF to config RQ */
528 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_CFG
;
529 mbx
.rq
.qs_num
= qs
->vnic_id
;
530 mbx
.rq
.rq_num
= qidx
;
531 mbx
.rq
.cfg
= (rq
->caching
<< 26) | (rq
->cq_qs
<< 19) |
532 (rq
->cq_idx
<< 16) | (rq
->cont_rbdr_qs
<< 9) |
533 (rq
->cont_qs_rbdr_idx
<< 8) |
534 (rq
->start_rbdr_qs
<< 1) | (rq
->start_qs_rbdr_idx
);
535 nicvf_send_msg_to_pf(nic
, &mbx
);
537 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_BP_CFG
;
538 mbx
.rq
.cfg
= (1ULL << 63) | (1ULL << 62) | (qs
->vnic_id
<< 0);
539 nicvf_send_msg_to_pf(nic
, &mbx
);
542 * Enable CQ drop to reserve sufficient CQEs for all tx packets
544 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_DROP_CFG
;
545 mbx
.rq
.cfg
= (1ULL << 62) | (RQ_CQ_DROP
<< 8);
546 nicvf_send_msg_to_pf(nic
, &mbx
);
548 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_GEN_CFG
, 0, 0x00);
550 nicvf_config_vlan_stripping(nic
, nic
->netdev
->features
);
552 /* Enable Receive queue */
555 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, *(u64
*)&rq_cfg
);
558 /* Configures completion queue */
559 void nicvf_cmp_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
560 int qidx
, bool enable
)
562 struct cmp_queue
*cq
;
563 struct cq_cfg cq_cfg
;
569 nicvf_reclaim_cmp_queue(nic
, qs
, qidx
);
573 /* Reset completion queue */
574 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
579 spin_lock_init(&cq
->lock
);
580 /* Set completion queue base address */
581 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_BASE
,
582 qidx
, (u64
)(cq
->dmem
.phys_base
));
584 /* Enable Completion queue */
588 cq_cfg
.qsize
= CMP_QSIZE
;
590 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, *(u64
*)&cq_cfg
);
592 /* Set threshold value for interrupt generation */
593 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_THRESH
, qidx
, cq
->thresh
);
594 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
,
595 qidx
, nic
->cq_coalesce_usecs
);
598 /* Configures transmit queue */
599 static void nicvf_snd_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
600 int qidx
, bool enable
)
602 union nic_mbx mbx
= {};
603 struct snd_queue
*sq
;
604 struct sq_cfg sq_cfg
;
610 nicvf_reclaim_snd_queue(nic
, qs
, qidx
);
614 /* Reset send queue */
615 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
617 sq
->cq_qs
= qs
->vnic_id
;
620 /* Send a mailbox msg to PF to config SQ */
621 mbx
.sq
.msg
= NIC_MBOX_MSG_SQ_CFG
;
622 mbx
.sq
.qs_num
= qs
->vnic_id
;
623 mbx
.sq
.sq_num
= qidx
;
624 mbx
.sq
.cfg
= (sq
->cq_qs
<< 3) | sq
->cq_idx
;
625 nicvf_send_msg_to_pf(nic
, &mbx
);
627 /* Set queue base address */
628 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_BASE
,
629 qidx
, (u64
)(sq
->dmem
.phys_base
));
631 /* Enable send queue & set queue size */
635 sq_cfg
.qsize
= SND_QSIZE
;
636 sq_cfg
.tstmp_bgx_intf
= 0;
637 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, *(u64
*)&sq_cfg
);
639 /* Set threshold value for interrupt generation */
640 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_THRESH
, qidx
, sq
->thresh
);
642 /* Set queue:cpu affinity for better load distribution */
643 if (cpu_online(qidx
)) {
644 cpumask_set_cpu(qidx
, &sq
->affinity_mask
);
645 netif_set_xps_queue(nic
->netdev
,
646 &sq
->affinity_mask
, qidx
);
650 /* Configures receive buffer descriptor ring */
651 static void nicvf_rbdr_config(struct nicvf
*nic
, struct queue_set
*qs
,
652 int qidx
, bool enable
)
655 struct rbdr_cfg rbdr_cfg
;
657 rbdr
= &qs
->rbdr
[qidx
];
658 nicvf_reclaim_rbdr(nic
, rbdr
, qidx
);
662 /* Set descriptor base address */
663 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_BASE
,
664 qidx
, (u64
)(rbdr
->dmem
.phys_base
));
666 /* Enable RBDR & set queue size */
667 /* Buffer size should be in multiples of 128 bytes */
671 rbdr_cfg
.qsize
= RBDR_SIZE
;
672 rbdr_cfg
.avg_con
= 0;
673 rbdr_cfg
.lines
= rbdr
->dma_size
/ 128;
674 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
675 qidx
, *(u64
*)&rbdr_cfg
);
678 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
679 qidx
, qs
->rbdr_len
- 1);
681 /* Set threshold value for interrupt generation */
682 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_THRESH
,
683 qidx
, rbdr
->thresh
- 1);
686 /* Requests PF to assign and enable Qset */
687 void nicvf_qset_config(struct nicvf
*nic
, bool enable
)
689 union nic_mbx mbx
= {};
690 struct queue_set
*qs
= nic
->qs
;
691 struct qs_cfg
*qs_cfg
;
694 netdev_warn(nic
->netdev
,
695 "Qset is still not allocated, don't init queues\n");
700 qs
->vnic_id
= nic
->vf_id
;
702 /* Send a mailbox msg to PF to config Qset */
703 mbx
.qs
.msg
= NIC_MBOX_MSG_QS_CFG
;
704 mbx
.qs
.num
= qs
->vnic_id
;
707 qs_cfg
= (struct qs_cfg
*)&mbx
.qs
.cfg
;
713 qs_cfg
->vnic
= qs
->vnic_id
;
715 nicvf_send_msg_to_pf(nic
, &mbx
);
718 static void nicvf_free_resources(struct nicvf
*nic
)
721 struct queue_set
*qs
= nic
->qs
;
723 /* Free receive buffer descriptor ring */
724 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
725 nicvf_free_rbdr(nic
, &qs
->rbdr
[qidx
]);
727 /* Free completion queue */
728 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
729 nicvf_free_cmp_queue(nic
, &qs
->cq
[qidx
]);
731 /* Free send queue */
732 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
733 nicvf_free_snd_queue(nic
, &qs
->sq
[qidx
]);
736 static int nicvf_alloc_resources(struct nicvf
*nic
)
739 struct queue_set
*qs
= nic
->qs
;
741 /* Alloc receive buffer descriptor ring */
742 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
743 if (nicvf_init_rbdr(nic
, &qs
->rbdr
[qidx
], qs
->rbdr_len
,
748 /* Alloc send queue */
749 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++) {
750 if (nicvf_init_snd_queue(nic
, &qs
->sq
[qidx
], qs
->sq_len
))
754 /* Alloc completion queue */
755 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
756 if (nicvf_init_cmp_queue(nic
, &qs
->cq
[qidx
], qs
->cq_len
))
762 nicvf_free_resources(nic
);
766 int nicvf_set_qset_resources(struct nicvf
*nic
)
768 struct queue_set
*qs
;
770 qs
= devm_kzalloc(&nic
->pdev
->dev
, sizeof(*qs
), GFP_KERNEL
);
775 /* Set count of each queue */
776 qs
->rbdr_cnt
= RBDR_CNT
;
777 qs
->rq_cnt
= RCV_QUEUE_CNT
;
778 qs
->sq_cnt
= SND_QUEUE_CNT
;
779 qs
->cq_cnt
= CMP_QUEUE_CNT
;
781 /* Set queue lengths */
782 qs
->rbdr_len
= RCV_BUF_COUNT
;
783 qs
->sq_len
= SND_QUEUE_LEN
;
784 qs
->cq_len
= CMP_QUEUE_LEN
;
788 int nicvf_config_data_transfer(struct nicvf
*nic
, bool enable
)
790 bool disable
= false;
791 struct queue_set
*qs
= nic
->qs
;
798 if (nicvf_alloc_resources(nic
))
801 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
802 nicvf_snd_queue_config(nic
, qs
, qidx
, enable
);
803 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
804 nicvf_cmp_queue_config(nic
, qs
, qidx
, enable
);
805 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
806 nicvf_rbdr_config(nic
, qs
, qidx
, enable
);
807 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
808 nicvf_rcv_queue_config(nic
, qs
, qidx
, enable
);
810 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
811 nicvf_rcv_queue_config(nic
, qs
, qidx
, disable
);
812 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
813 nicvf_rbdr_config(nic
, qs
, qidx
, disable
);
814 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
815 nicvf_snd_queue_config(nic
, qs
, qidx
, disable
);
816 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
817 nicvf_cmp_queue_config(nic
, qs
, qidx
, disable
);
819 nicvf_free_resources(nic
);
825 /* Get a free desc from SQ
826 * returns descriptor ponter & descriptor number
828 static inline int nicvf_get_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
833 atomic_sub(desc_cnt
, &sq
->free_cnt
);
834 sq
->tail
+= desc_cnt
;
835 sq
->tail
&= (sq
->dmem
.q_len
- 1);
840 /* Free descriptor back to SQ for future use */
841 void nicvf_put_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
843 atomic_add(desc_cnt
, &sq
->free_cnt
);
844 sq
->head
+= desc_cnt
;
845 sq
->head
&= (sq
->dmem
.q_len
- 1);
848 static inline int nicvf_get_nxt_sqentry(struct snd_queue
*sq
, int qentry
)
851 qentry
&= (sq
->dmem
.q_len
- 1);
855 void nicvf_sq_enable(struct nicvf
*nic
, struct snd_queue
*sq
, int qidx
)
859 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
860 sq_cfg
|= NICVF_SQ_EN
;
861 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
862 /* Ring doorbell so that H/W restarts processing SQEs */
863 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
, qidx
, 0);
866 void nicvf_sq_disable(struct nicvf
*nic
, int qidx
)
870 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
871 sq_cfg
&= ~NICVF_SQ_EN
;
872 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
875 void nicvf_sq_free_used_descs(struct net_device
*netdev
, struct snd_queue
*sq
,
880 struct nicvf
*nic
= netdev_priv(netdev
);
881 struct sq_hdr_subdesc
*hdr
;
883 head
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_HEAD
, qidx
) >> 4;
884 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_TAIL
, qidx
) >> 4;
885 while (sq
->head
!= head
) {
886 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, sq
->head
);
887 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
) {
888 nicvf_put_sq_desc(sq
, 1);
891 skb
= (struct sk_buff
*)sq
->skbuff
[sq
->head
];
893 dev_kfree_skb_any(skb
);
894 atomic64_add(1, (atomic64_t
*)&netdev
->stats
.tx_packets
);
895 atomic64_add(hdr
->tot_len
,
896 (atomic64_t
*)&netdev
->stats
.tx_bytes
);
897 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
901 /* Calculate no of SQ subdescriptors needed to transmit all
902 * segments of this TSO packet.
903 * Taken from 'Tilera network driver' with a minor modification.
905 static int nicvf_tso_count_subdescs(struct sk_buff
*skb
)
907 struct skb_shared_info
*sh
= skb_shinfo(skb
);
908 unsigned int sh_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
909 unsigned int data_len
= skb
->len
- sh_len
;
910 unsigned int p_len
= sh
->gso_size
;
911 long f_id
= -1; /* id of the current fragment */
912 long f_size
= skb_headlen(skb
) - sh_len
; /* current fragment size */
913 long f_used
= 0; /* bytes used from the current fragment */
914 long n
; /* size of the current piece of payload */
918 for (segment
= 0; segment
< sh
->gso_segs
; segment
++) {
919 unsigned int p_used
= 0;
921 /* One edesc for header and for each piece of the payload. */
922 for (num_edescs
++; p_used
< p_len
; num_edescs
++) {
923 /* Advance as needed. */
924 while (f_used
>= f_size
) {
926 f_size
= skb_frag_size(&sh
->frags
[f_id
]);
930 /* Use bytes from the current fragment. */
932 if (n
> f_size
- f_used
)
938 /* The last segment may be less than gso_size. */
940 if (data_len
< p_len
)
944 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
945 return num_edescs
+ sh
->gso_segs
;
948 /* Get the number of SQ descriptors needed to xmit this skb */
949 static int nicvf_sq_subdesc_required(struct nicvf
*nic
, struct sk_buff
*skb
)
951 int subdesc_cnt
= MIN_SQ_DESC_PER_PKT_XMIT
;
953 if (skb_shinfo(skb
)->gso_size
) {
954 subdesc_cnt
= nicvf_tso_count_subdescs(skb
);
958 if (skb_shinfo(skb
)->nr_frags
)
959 subdesc_cnt
+= skb_shinfo(skb
)->nr_frags
;
964 /* Add SQ HEADER subdescriptor.
965 * First subdescriptor for every send descriptor.
968 nicvf_sq_add_hdr_subdesc(struct snd_queue
*sq
, int qentry
,
969 int subdesc_cnt
, struct sk_buff
*skb
, int len
)
972 struct sq_hdr_subdesc
*hdr
;
974 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, qentry
);
975 sq
->skbuff
[qentry
] = (u64
)skb
;
977 memset(hdr
, 0, SND_QUEUE_DESC_SIZE
);
978 hdr
->subdesc_type
= SQ_DESC_TYPE_HEADER
;
979 /* Enable notification via CQE after processing SQE */
981 /* No of subdescriptors following this */
982 hdr
->subdesc_cnt
= subdesc_cnt
;
985 /* Offload checksum calculation to HW */
986 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
987 hdr
->csum_l3
= 1; /* Enable IP csum calculation */
988 hdr
->l3_offset
= skb_network_offset(skb
);
989 hdr
->l4_offset
= skb_transport_offset(skb
);
991 proto
= ip_hdr(skb
)->protocol
;
994 hdr
->csum_l4
= SEND_L4_CSUM_TCP
;
997 hdr
->csum_l4
= SEND_L4_CSUM_UDP
;
1000 hdr
->csum_l4
= SEND_L4_CSUM_SCTP
;
1006 /* SQ GATHER subdescriptor
1007 * Must follow HDR descriptor
1009 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue
*sq
, int qentry
,
1012 struct sq_gather_subdesc
*gather
;
1014 qentry
&= (sq
->dmem
.q_len
- 1);
1015 gather
= (struct sq_gather_subdesc
*)GET_SQ_DESC(sq
, qentry
);
1017 memset(gather
, 0, SND_QUEUE_DESC_SIZE
);
1018 gather
->subdesc_type
= SQ_DESC_TYPE_GATHER
;
1019 gather
->ld_type
= NIC_SEND_LD_TYPE_E_LDD
;
1020 gather
->size
= size
;
1021 gather
->addr
= data
;
1024 /* Segment a TSO packet into 'gso_size' segments and append
1025 * them to SQ for transfer
1027 static int nicvf_sq_append_tso(struct nicvf
*nic
, struct snd_queue
*sq
,
1028 int qentry
, struct sk_buff
*skb
)
1031 int seg_subdescs
= 0, desc_cnt
= 0;
1032 int seg_len
, total_len
, data_left
;
1033 int hdr_qentry
= qentry
;
1034 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1036 tso_start(skb
, &tso
);
1037 total_len
= skb
->len
- hdr_len
;
1038 while (total_len
> 0) {
1041 /* Save Qentry for adding HDR_SUBDESC at the end */
1042 hdr_qentry
= qentry
;
1044 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
1045 total_len
-= data_left
;
1047 /* Add segment's header */
1048 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1049 hdr
= sq
->tso_hdrs
+ qentry
* TSO_HEADER_SIZE
;
1050 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
1051 nicvf_sq_add_gather_subdesc(sq
, qentry
, hdr_len
,
1053 qentry
* TSO_HEADER_SIZE
);
1054 /* HDR_SUDESC + GATHER */
1058 /* Add segment's payload fragments */
1059 while (data_left
> 0) {
1062 size
= min_t(int, tso
.size
, data_left
);
1064 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1065 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1066 virt_to_phys(tso
.data
));
1071 tso_build_data(skb
, &tso
, size
);
1073 nicvf_sq_add_hdr_subdesc(sq
, hdr_qentry
,
1074 seg_subdescs
- 1, skb
, seg_len
);
1075 sq
->skbuff
[hdr_qentry
] = (u64
)NULL
;
1076 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1078 desc_cnt
+= seg_subdescs
;
1080 /* Save SKB in the last segment for freeing */
1081 sq
->skbuff
[hdr_qentry
] = (u64
)skb
;
1083 /* make sure all memory stores are done before ringing doorbell */
1086 /* Inform HW to xmit all TSO segments */
1087 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1088 skb_get_queue_mapping(skb
), desc_cnt
);
1089 nic
->drv_stats
.tx_tso
++;
1093 /* Append an skb to a SQ for packet transfer. */
1094 int nicvf_sq_append_skb(struct nicvf
*nic
, struct sk_buff
*skb
)
1099 struct queue_set
*qs
= nic
->qs
;
1100 struct snd_queue
*sq
;
1102 sq_num
= skb_get_queue_mapping(skb
);
1103 sq
= &qs
->sq
[sq_num
];
1105 subdesc_cnt
= nicvf_sq_subdesc_required(nic
, skb
);
1106 if (subdesc_cnt
> atomic_read(&sq
->free_cnt
))
1109 qentry
= nicvf_get_sq_desc(sq
, subdesc_cnt
);
1111 /* Check if its a TSO packet */
1112 if (skb_shinfo(skb
)->gso_size
)
1113 return nicvf_sq_append_tso(nic
, sq
, qentry
, skb
);
1115 /* Add SQ header subdesc */
1116 nicvf_sq_add_hdr_subdesc(sq
, qentry
, subdesc_cnt
- 1, skb
, skb
->len
);
1118 /* Add SQ gather subdescs */
1119 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1120 size
= skb_is_nonlinear(skb
) ? skb_headlen(skb
) : skb
->len
;
1121 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
, virt_to_phys(skb
->data
));
1123 /* Check for scattered buffer */
1124 if (!skb_is_nonlinear(skb
))
1127 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1128 const struct skb_frag_struct
*frag
;
1130 frag
= &skb_shinfo(skb
)->frags
[i
];
1132 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1133 size
= skb_frag_size(frag
);
1134 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1136 skb_frag_address(frag
)));
1140 /* make sure all memory stores are done before ringing doorbell */
1143 /* Inform HW to xmit new packet */
1144 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1145 sq_num
, subdesc_cnt
);
1149 netdev_dbg(nic
->netdev
, "Not enough SQ descriptors to xmit pkt\n");
1153 static inline unsigned frag_num(unsigned i
)
1156 return (i
& ~3) + 3 - (i
& 3);
1162 /* Returns SKB for a received packet */
1163 struct sk_buff
*nicvf_get_rcv_skb(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
)
1166 int payload_len
= 0;
1167 struct sk_buff
*skb
= NULL
;
1168 struct sk_buff
*skb_frag
= NULL
;
1169 struct sk_buff
*prev_frag
= NULL
;
1170 u16
*rb_lens
= NULL
;
1171 u64
*rb_ptrs
= NULL
;
1173 rb_lens
= (void *)cqe_rx
+ (3 * sizeof(u64
));
1174 rb_ptrs
= (void *)cqe_rx
+ (6 * sizeof(u64
));
1176 netdev_dbg(nic
->netdev
, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1177 __func__
, cqe_rx
->rb_cnt
, cqe_rx
->rb0_ptr
, cqe_rx
->rb0_sz
);
1179 for (frag
= 0; frag
< cqe_rx
->rb_cnt
; frag
++) {
1180 payload_len
= rb_lens
[frag_num(frag
)];
1182 /* First fragment */
1183 skb
= nicvf_rb_ptr_to_skb(nic
,
1184 *rb_ptrs
- cqe_rx
->align_pad
,
1188 skb_reserve(skb
, cqe_rx
->align_pad
);
1189 skb_put(skb
, payload_len
);
1192 skb_frag
= nicvf_rb_ptr_to_skb(nic
, *rb_ptrs
,
1199 if (!skb_shinfo(skb
)->frag_list
)
1200 skb_shinfo(skb
)->frag_list
= skb_frag
;
1202 prev_frag
->next
= skb_frag
;
1204 prev_frag
= skb_frag
;
1205 skb
->len
+= payload_len
;
1206 skb
->data_len
+= payload_len
;
1207 skb_frag
->len
= payload_len
;
1209 /* Next buffer pointer */
1215 /* Enable interrupt */
1216 void nicvf_enable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1220 reg_val
= nicvf_reg_read(nic
, NIC_VF_ENA_W1S
);
1224 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1227 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1229 case NICVF_INTR_RBDR
:
1230 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1232 case NICVF_INTR_PKT_DROP
:
1233 reg_val
|= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1235 case NICVF_INTR_TCP_TIMER
:
1236 reg_val
|= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1238 case NICVF_INTR_MBOX
:
1239 reg_val
|= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1241 case NICVF_INTR_QS_ERR
:
1242 reg_val
|= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1245 netdev_err(nic
->netdev
,
1246 "Failed to enable interrupt: unknown type\n");
1250 nicvf_reg_write(nic
, NIC_VF_ENA_W1S
, reg_val
);
1253 /* Disable interrupt */
1254 void nicvf_disable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1260 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1263 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1265 case NICVF_INTR_RBDR
:
1266 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1268 case NICVF_INTR_PKT_DROP
:
1269 reg_val
|= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1271 case NICVF_INTR_TCP_TIMER
:
1272 reg_val
|= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1274 case NICVF_INTR_MBOX
:
1275 reg_val
|= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1277 case NICVF_INTR_QS_ERR
:
1278 reg_val
|= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1281 netdev_err(nic
->netdev
,
1282 "Failed to disable interrupt: unknown type\n");
1286 nicvf_reg_write(nic
, NIC_VF_ENA_W1C
, reg_val
);
1289 /* Clear interrupt */
1290 void nicvf_clear_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1296 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1299 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1301 case NICVF_INTR_RBDR
:
1302 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1304 case NICVF_INTR_PKT_DROP
:
1305 reg_val
= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1307 case NICVF_INTR_TCP_TIMER
:
1308 reg_val
= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1310 case NICVF_INTR_MBOX
:
1311 reg_val
= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1313 case NICVF_INTR_QS_ERR
:
1314 reg_val
|= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1317 netdev_err(nic
->netdev
,
1318 "Failed to clear interrupt: unknown type\n");
1322 nicvf_reg_write(nic
, NIC_VF_INT
, reg_val
);
1325 /* Check if interrupt is enabled */
1326 int nicvf_is_intr_enabled(struct nicvf
*nic
, int int_type
, int q_idx
)
1331 reg_val
= nicvf_reg_read(nic
, NIC_VF_ENA_W1S
);
1335 mask
= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1338 mask
= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1340 case NICVF_INTR_RBDR
:
1341 mask
= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1343 case NICVF_INTR_PKT_DROP
:
1344 mask
= NICVF_INTR_PKT_DROP_MASK
;
1346 case NICVF_INTR_TCP_TIMER
:
1347 mask
= NICVF_INTR_TCP_TIMER_MASK
;
1349 case NICVF_INTR_MBOX
:
1350 mask
= NICVF_INTR_MBOX_MASK
;
1352 case NICVF_INTR_QS_ERR
:
1353 mask
= NICVF_INTR_QS_ERR_MASK
;
1356 netdev_err(nic
->netdev
,
1357 "Failed to check interrupt enable: unknown type\n");
1361 return (reg_val
& mask
);
1364 void nicvf_update_rq_stats(struct nicvf
*nic
, int rq_idx
)
1366 struct rcv_queue
*rq
;
1368 #define GET_RQ_STATS(reg) \
1369 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1370 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1372 rq
= &nic
->qs
->rq
[rq_idx
];
1373 rq
->stats
.bytes
= GET_RQ_STATS(RQ_SQ_STATS_OCTS
);
1374 rq
->stats
.pkts
= GET_RQ_STATS(RQ_SQ_STATS_PKTS
);
1377 void nicvf_update_sq_stats(struct nicvf
*nic
, int sq_idx
)
1379 struct snd_queue
*sq
;
1381 #define GET_SQ_STATS(reg) \
1382 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1383 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1385 sq
= &nic
->qs
->sq
[sq_idx
];
1386 sq
->stats
.bytes
= GET_SQ_STATS(RQ_SQ_STATS_OCTS
);
1387 sq
->stats
.pkts
= GET_SQ_STATS(RQ_SQ_STATS_PKTS
);
1390 /* Check for errors in the receive cmp.queue entry */
1391 int nicvf_check_cqe_rx_errs(struct nicvf
*nic
,
1392 struct cmp_queue
*cq
, struct cqe_rx_t
*cqe_rx
)
1394 struct nicvf_hw_stats
*stats
= &nic
->hw_stats
;
1395 struct nicvf_drv_stats
*drv_stats
= &nic
->drv_stats
;
1397 if (!cqe_rx
->err_level
&& !cqe_rx
->err_opcode
) {
1398 drv_stats
->rx_frames_ok
++;
1402 if (netif_msg_rx_err(nic
))
1403 netdev_err(nic
->netdev
,
1404 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1406 cqe_rx
->err_level
, cqe_rx
->err_opcode
);
1408 switch (cqe_rx
->err_opcode
) {
1409 case CQ_RX_ERROP_RE_PARTIAL
:
1410 stats
->rx_bgx_truncated_pkts
++;
1412 case CQ_RX_ERROP_RE_JABBER
:
1413 stats
->rx_jabber_errs
++;
1415 case CQ_RX_ERROP_RE_FCS
:
1416 stats
->rx_fcs_errs
++;
1418 case CQ_RX_ERROP_RE_RX_CTL
:
1419 stats
->rx_bgx_errs
++;
1421 case CQ_RX_ERROP_PREL2_ERR
:
1422 stats
->rx_prel2_errs
++;
1424 case CQ_RX_ERROP_L2_MAL
:
1425 stats
->rx_l2_hdr_malformed
++;
1427 case CQ_RX_ERROP_L2_OVERSIZE
:
1428 stats
->rx_oversize
++;
1430 case CQ_RX_ERROP_L2_UNDERSIZE
:
1431 stats
->rx_undersize
++;
1433 case CQ_RX_ERROP_L2_LENMISM
:
1434 stats
->rx_l2_len_mismatch
++;
1436 case CQ_RX_ERROP_L2_PCLP
:
1437 stats
->rx_l2_pclp
++;
1439 case CQ_RX_ERROP_IP_NOT
:
1440 stats
->rx_ip_ver_errs
++;
1442 case CQ_RX_ERROP_IP_CSUM_ERR
:
1443 stats
->rx_ip_csum_errs
++;
1445 case CQ_RX_ERROP_IP_MAL
:
1446 stats
->rx_ip_hdr_malformed
++;
1448 case CQ_RX_ERROP_IP_MALD
:
1449 stats
->rx_ip_payload_malformed
++;
1451 case CQ_RX_ERROP_IP_HOP
:
1452 stats
->rx_ip_ttl_errs
++;
1454 case CQ_RX_ERROP_L3_PCLP
:
1455 stats
->rx_l3_pclp
++;
1457 case CQ_RX_ERROP_L4_MAL
:
1458 stats
->rx_l4_malformed
++;
1460 case CQ_RX_ERROP_L4_CHK
:
1461 stats
->rx_l4_csum_errs
++;
1463 case CQ_RX_ERROP_UDP_LEN
:
1464 stats
->rx_udp_len_errs
++;
1466 case CQ_RX_ERROP_L4_PORT
:
1467 stats
->rx_l4_port_errs
++;
1469 case CQ_RX_ERROP_TCP_FLAG
:
1470 stats
->rx_tcp_flag_errs
++;
1472 case CQ_RX_ERROP_TCP_OFFSET
:
1473 stats
->rx_tcp_offset_errs
++;
1475 case CQ_RX_ERROP_L4_PCLP
:
1476 stats
->rx_l4_pclp
++;
1478 case CQ_RX_ERROP_RBDR_TRUNC
:
1479 stats
->rx_truncated_pkts
++;
1486 /* Check for errors in the send cmp.queue entry */
1487 int nicvf_check_cqe_tx_errs(struct nicvf
*nic
,
1488 struct cmp_queue
*cq
, struct cqe_send_t
*cqe_tx
)
1490 struct cmp_queue_stats
*stats
= &cq
->stats
;
1492 switch (cqe_tx
->send_status
) {
1493 case CQ_TX_ERROP_GOOD
:
1496 case CQ_TX_ERROP_DESC_FAULT
:
1497 stats
->tx
.desc_fault
++;
1499 case CQ_TX_ERROP_HDR_CONS_ERR
:
1500 stats
->tx
.hdr_cons_err
++;
1502 case CQ_TX_ERROP_SUBDC_ERR
:
1503 stats
->tx
.subdesc_err
++;
1505 case CQ_TX_ERROP_IMM_SIZE_OFLOW
:
1506 stats
->tx
.imm_size_oflow
++;
1508 case CQ_TX_ERROP_DATA_SEQUENCE_ERR
:
1509 stats
->tx
.data_seq_err
++;
1511 case CQ_TX_ERROP_MEM_SEQUENCE_ERR
:
1512 stats
->tx
.mem_seq_err
++;
1514 case CQ_TX_ERROP_LOCK_VIOL
:
1515 stats
->tx
.lock_viol
++;
1517 case CQ_TX_ERROP_DATA_FAULT
:
1518 stats
->tx
.data_fault
++;
1520 case CQ_TX_ERROP_TSTMP_CONFLICT
:
1521 stats
->tx
.tstmp_conflict
++;
1523 case CQ_TX_ERROP_TSTMP_TIMEOUT
:
1524 stats
->tx
.tstmp_timeout
++;
1526 case CQ_TX_ERROP_MEM_FAULT
:
1527 stats
->tx
.mem_fault
++;
1529 case CQ_TX_ERROP_CK_OVERLAP
:
1530 stats
->tx
.csum_overlap
++;
1532 case CQ_TX_ERROP_CK_OFLOW
:
1533 stats
->tx
.csum_overflow
++;