2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
19 #include "nicvf_queues.h"
21 static void nicvf_get_page(struct nicvf
*nic
)
23 if (!nic
->rb_pageref
|| !nic
->rb_page
)
26 page_ref_add(nic
->rb_page
, nic
->rb_pageref
);
30 /* Poll a register for a specific value */
31 static int nicvf_poll_reg(struct nicvf
*nic
, int qidx
,
32 u64 reg
, int bit_pos
, int bits
, int val
)
38 bit_mask
= (1ULL << bits
) - 1;
39 bit_mask
= (bit_mask
<< bit_pos
);
42 reg_val
= nicvf_queue_reg_read(nic
, reg
, qidx
);
43 if (((reg_val
& bit_mask
) >> bit_pos
) == val
)
45 usleep_range(1000, 2000);
48 netdev_err(nic
->netdev
, "Poll on reg 0x%llx failed\n", reg
);
52 /* Allocate memory for a queue's descriptors */
53 static int nicvf_alloc_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
,
54 int q_len
, int desc_size
, int align_bytes
)
57 dmem
->size
= (desc_size
* q_len
) + align_bytes
;
58 /* Save address, need it while freeing */
59 dmem
->unalign_base
= dma_zalloc_coherent(&nic
->pdev
->dev
, dmem
->size
,
60 &dmem
->dma
, GFP_KERNEL
);
61 if (!dmem
->unalign_base
)
64 /* Align memory address for 'align_bytes' */
65 dmem
->phys_base
= NICVF_ALIGNED_ADDR((u64
)dmem
->dma
, align_bytes
);
66 dmem
->base
= dmem
->unalign_base
+ (dmem
->phys_base
- dmem
->dma
);
70 /* Free queue's descriptor memory */
71 static void nicvf_free_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
)
76 dma_free_coherent(&nic
->pdev
->dev
, dmem
->size
,
77 dmem
->unalign_base
, dmem
->dma
);
78 dmem
->unalign_base
= NULL
;
82 /* Allocate buffer for packet reception
83 * HW returns memory address where packet is DMA'ed but not a pointer
84 * into RBDR ring, so save buffer address at the start of fragment and
85 * align the start address to a cache aligned address
87 static inline int nicvf_alloc_rcv_buffer(struct nicvf
*nic
, gfp_t gfp
,
88 u32 buf_len
, u64
**rbuf
)
90 int order
= (PAGE_SIZE
<= 4096) ? PAGE_ALLOC_COSTLY_ORDER
: 0;
92 /* Check if request can be accomodated in previous allocated page */
94 ((nic
->rb_page_offset
+ buf_len
) < (PAGE_SIZE
<< order
))) {
102 /* Allocate a new page */
104 nic
->rb_page
= alloc_pages(gfp
| __GFP_COMP
| __GFP_NOWARN
,
107 nic
->drv_stats
.rcv_buffer_alloc_failures
++;
110 nic
->rb_page_offset
= 0;
114 *rbuf
= (u64
*)((u64
)page_address(nic
->rb_page
) + nic
->rb_page_offset
);
115 nic
->rb_page_offset
+= buf_len
;
120 /* Build skb around receive buffer */
121 static struct sk_buff
*nicvf_rb_ptr_to_skb(struct nicvf
*nic
,
127 data
= phys_to_virt(rb_ptr
);
129 /* Now build an skb to give to stack */
130 skb
= build_skb(data
, RCV_FRAG_LEN
);
132 put_page(virt_to_page(data
));
140 /* Allocate RBDR ring and populate receive buffers */
141 static int nicvf_init_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
,
142 int ring_len
, int buf_size
)
146 struct rbdr_entry_t
*desc
;
149 err
= nicvf_alloc_q_desc_mem(nic
, &rbdr
->dmem
, ring_len
,
150 sizeof(struct rbdr_entry_t
),
151 NICVF_RCV_BUF_ALIGN_BYTES
);
155 rbdr
->desc
= rbdr
->dmem
.base
;
156 /* Buffer size has to be in multiples of 128 bytes */
157 rbdr
->dma_size
= buf_size
;
159 rbdr
->thresh
= RBDR_THRESH
;
162 for (idx
= 0; idx
< ring_len
; idx
++) {
163 err
= nicvf_alloc_rcv_buffer(nic
, GFP_KERNEL
, RCV_FRAG_LEN
,
168 desc
= GET_RBDR_DESC(rbdr
, idx
);
169 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
177 /* Free RBDR ring and its receive buffers */
178 static void nicvf_free_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
)
182 struct rbdr_entry_t
*desc
;
187 rbdr
->enable
= false;
188 if (!rbdr
->dmem
.base
)
195 while (head
!= tail
) {
196 desc
= GET_RBDR_DESC(rbdr
, head
);
197 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
198 put_page(virt_to_page(phys_to_virt(buf_addr
)));
200 head
&= (rbdr
->dmem
.q_len
- 1);
202 /* Free SKB of tail desc */
203 desc
= GET_RBDR_DESC(rbdr
, tail
);
204 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
205 put_page(virt_to_page(phys_to_virt(buf_addr
)));
208 nicvf_free_q_desc_mem(nic
, &rbdr
->dmem
);
211 /* Refill receive buffer descriptors with new buffers.
213 static void nicvf_refill_rbdr(struct nicvf
*nic
, gfp_t gfp
)
215 struct queue_set
*qs
= nic
->qs
;
216 int rbdr_idx
= qs
->rbdr_cnt
;
220 struct rbdr_entry_t
*desc
;
228 rbdr
= &qs
->rbdr
[rbdr_idx
];
229 /* Check if it's enabled */
233 /* Get no of desc's to be refilled */
234 qcount
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, rbdr_idx
);
236 /* Doorbell can be ringed with a max of ring size minus 1 */
237 if (qcount
>= (qs
->rbdr_len
- 1))
240 refill_rb_cnt
= qs
->rbdr_len
- qcount
- 1;
242 /* Start filling descs from tail */
243 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_TAIL
, rbdr_idx
) >> 3;
244 while (refill_rb_cnt
) {
246 tail
&= (rbdr
->dmem
.q_len
- 1);
248 if (nicvf_alloc_rcv_buffer(nic
, gfp
, RCV_FRAG_LEN
, &rbuf
))
251 desc
= GET_RBDR_DESC(rbdr
, tail
);
252 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
259 /* make sure all memory stores are done before ringing doorbell */
262 /* Check if buffer allocation failed */
264 nic
->rb_alloc_fail
= true;
266 nic
->rb_alloc_fail
= false;
269 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
272 /* Re-enable RBDR interrupts only if buffer allocation is success */
273 if (!nic
->rb_alloc_fail
&& rbdr
->enable
)
274 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, rbdr_idx
);
280 /* Alloc rcv buffers in non-atomic mode for better success */
281 void nicvf_rbdr_work(struct work_struct
*work
)
283 struct nicvf
*nic
= container_of(work
, struct nicvf
, rbdr_work
.work
);
285 nicvf_refill_rbdr(nic
, GFP_KERNEL
);
286 if (nic
->rb_alloc_fail
)
287 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
289 nic
->rb_work_scheduled
= false;
292 /* In Softirq context, alloc rcv buffers in atomic mode */
293 void nicvf_rbdr_task(unsigned long data
)
295 struct nicvf
*nic
= (struct nicvf
*)data
;
297 nicvf_refill_rbdr(nic
, GFP_ATOMIC
);
298 if (nic
->rb_alloc_fail
) {
299 nic
->rb_work_scheduled
= true;
300 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
304 /* Initialize completion queue */
305 static int nicvf_init_cmp_queue(struct nicvf
*nic
,
306 struct cmp_queue
*cq
, int q_len
)
310 err
= nicvf_alloc_q_desc_mem(nic
, &cq
->dmem
, q_len
, CMP_QUEUE_DESC_SIZE
,
311 NICVF_CQ_BASE_ALIGN_BYTES
);
315 cq
->desc
= cq
->dmem
.base
;
316 cq
->thresh
= pass1_silicon(nic
->pdev
) ? 0 : CMP_QUEUE_CQE_THRESH
;
317 nic
->cq_coalesce_usecs
= (CMP_QUEUE_TIMER_THRESH
* 0.05) - 1;
322 static void nicvf_free_cmp_queue(struct nicvf
*nic
, struct cmp_queue
*cq
)
329 nicvf_free_q_desc_mem(nic
, &cq
->dmem
);
332 /* Initialize transmit queue */
333 static int nicvf_init_snd_queue(struct nicvf
*nic
,
334 struct snd_queue
*sq
, int q_len
)
338 err
= nicvf_alloc_q_desc_mem(nic
, &sq
->dmem
, q_len
, SND_QUEUE_DESC_SIZE
,
339 NICVF_SQ_BASE_ALIGN_BYTES
);
343 sq
->desc
= sq
->dmem
.base
;
344 sq
->skbuff
= kcalloc(q_len
, sizeof(u64
), GFP_KERNEL
);
349 atomic_set(&sq
->free_cnt
, q_len
- 1);
350 sq
->thresh
= SND_QUEUE_THRESH
;
352 /* Preallocate memory for TSO segment's header */
353 sq
->tso_hdrs
= dma_alloc_coherent(&nic
->pdev
->dev
,
354 q_len
* TSO_HEADER_SIZE
,
355 &sq
->tso_hdrs_phys
, GFP_KERNEL
);
362 static void nicvf_free_snd_queue(struct nicvf
*nic
, struct snd_queue
*sq
)
370 dma_free_coherent(&nic
->pdev
->dev
,
371 sq
->dmem
.q_len
* TSO_HEADER_SIZE
,
372 sq
->tso_hdrs
, sq
->tso_hdrs_phys
);
375 nicvf_free_q_desc_mem(nic
, &sq
->dmem
);
378 static void nicvf_reclaim_snd_queue(struct nicvf
*nic
,
379 struct queue_set
*qs
, int qidx
)
381 /* Disable send queue */
382 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, 0);
383 /* Check if SQ is stopped */
384 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_SQ_0_7_STATUS
, 21, 1, 0x01))
386 /* Reset send queue */
387 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
390 static void nicvf_reclaim_rcv_queue(struct nicvf
*nic
,
391 struct queue_set
*qs
, int qidx
)
393 union nic_mbx mbx
= {};
395 /* Make sure all packets in the pipeline are written back into mem */
396 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_SW_SYNC
;
397 nicvf_send_msg_to_pf(nic
, &mbx
);
400 static void nicvf_reclaim_cmp_queue(struct nicvf
*nic
,
401 struct queue_set
*qs
, int qidx
)
403 /* Disable timer threshold (doesn't get reset upon CQ reset */
404 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
, qidx
, 0);
405 /* Disable completion queue */
406 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, 0);
407 /* Reset completion queue */
408 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
411 static void nicvf_reclaim_rbdr(struct nicvf
*nic
,
412 struct rbdr
*rbdr
, int qidx
)
417 /* Save head and tail pointers for feeing up buffers */
418 rbdr
->head
= nicvf_queue_reg_read(nic
,
419 NIC_QSET_RBDR_0_1_HEAD
,
421 rbdr
->tail
= nicvf_queue_reg_read(nic
,
422 NIC_QSET_RBDR_0_1_TAIL
,
425 /* If RBDR FIFO is in 'FAIL' state then do a reset first
428 fifo_state
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, qidx
);
429 if (((fifo_state
>> 62) & 0x03) == 0x3)
430 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
431 qidx
, NICVF_RBDR_RESET
);
434 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0);
435 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
438 tmp
= nicvf_queue_reg_read(nic
,
439 NIC_QSET_RBDR_0_1_PREFETCH_STATUS
,
441 if ((tmp
& 0xFFFFFFFF) == ((tmp
>> 32) & 0xFFFFFFFF))
443 usleep_range(1000, 2000);
446 netdev_err(nic
->netdev
,
447 "Failed polling on prefetch status\n");
451 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
452 qidx
, NICVF_RBDR_RESET
);
454 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x02))
456 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0x00);
457 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
461 void nicvf_config_vlan_stripping(struct nicvf
*nic
, netdev_features_t features
)
466 rq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_RQ_GEN_CFG
, 0);
468 /* Enable first VLAN stripping */
469 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
470 rq_cfg
|= (1ULL << 25);
472 rq_cfg
&= ~(1ULL << 25);
473 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_GEN_CFG
, 0, rq_cfg
);
475 /* Configure Secondary Qsets, if any */
476 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++)
477 if (nic
->snicvf
[sqs
])
478 nicvf_queue_reg_write(nic
->snicvf
[sqs
],
479 NIC_QSET_RQ_GEN_CFG
, 0, rq_cfg
);
482 /* Configures receive queue */
483 static void nicvf_rcv_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
484 int qidx
, bool enable
)
486 union nic_mbx mbx
= {};
487 struct rcv_queue
*rq
;
488 struct rq_cfg rq_cfg
;
493 /* Disable receive queue */
494 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, 0);
497 nicvf_reclaim_rcv_queue(nic
, qs
, qidx
);
501 rq
->cq_qs
= qs
->vnic_id
;
503 rq
->start_rbdr_qs
= qs
->vnic_id
;
504 rq
->start_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
505 rq
->cont_rbdr_qs
= qs
->vnic_id
;
506 rq
->cont_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
507 /* all writes of RBDR data to be loaded into L2 Cache as well*/
510 /* Send a mailbox msg to PF to config RQ */
511 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_CFG
;
512 mbx
.rq
.qs_num
= qs
->vnic_id
;
513 mbx
.rq
.rq_num
= qidx
;
514 mbx
.rq
.cfg
= (rq
->caching
<< 26) | (rq
->cq_qs
<< 19) |
515 (rq
->cq_idx
<< 16) | (rq
->cont_rbdr_qs
<< 9) |
516 (rq
->cont_qs_rbdr_idx
<< 8) |
517 (rq
->start_rbdr_qs
<< 1) | (rq
->start_qs_rbdr_idx
);
518 nicvf_send_msg_to_pf(nic
, &mbx
);
520 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_BP_CFG
;
521 mbx
.rq
.cfg
= (1ULL << 63) | (1ULL << 62) | (qs
->vnic_id
<< 0);
522 nicvf_send_msg_to_pf(nic
, &mbx
);
525 * Enable CQ drop to reserve sufficient CQEs for all tx packets
527 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_DROP_CFG
;
528 mbx
.rq
.cfg
= (1ULL << 62) | (RQ_CQ_DROP
<< 8);
529 nicvf_send_msg_to_pf(nic
, &mbx
);
531 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_GEN_CFG
, 0, 0x00);
533 nicvf_config_vlan_stripping(nic
, nic
->netdev
->features
);
535 /* Enable Receive queue */
536 memset(&rq_cfg
, 0, sizeof(struct rq_cfg
));
539 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, *(u64
*)&rq_cfg
);
542 /* Configures completion queue */
543 void nicvf_cmp_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
544 int qidx
, bool enable
)
546 struct cmp_queue
*cq
;
547 struct cq_cfg cq_cfg
;
553 nicvf_reclaim_cmp_queue(nic
, qs
, qidx
);
557 /* Reset completion queue */
558 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
563 spin_lock_init(&cq
->lock
);
564 /* Set completion queue base address */
565 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_BASE
,
566 qidx
, (u64
)(cq
->dmem
.phys_base
));
568 /* Enable Completion queue */
569 memset(&cq_cfg
, 0, sizeof(struct cq_cfg
));
573 cq_cfg
.qsize
= CMP_QSIZE
;
575 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, *(u64
*)&cq_cfg
);
577 /* Set threshold value for interrupt generation */
578 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_THRESH
, qidx
, cq
->thresh
);
579 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
,
580 qidx
, CMP_QUEUE_TIMER_THRESH
);
583 /* Configures transmit queue */
584 static void nicvf_snd_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
585 int qidx
, bool enable
)
587 union nic_mbx mbx
= {};
588 struct snd_queue
*sq
;
589 struct sq_cfg sq_cfg
;
595 nicvf_reclaim_snd_queue(nic
, qs
, qidx
);
599 /* Reset send queue */
600 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
602 sq
->cq_qs
= qs
->vnic_id
;
605 /* Send a mailbox msg to PF to config SQ */
606 mbx
.sq
.msg
= NIC_MBOX_MSG_SQ_CFG
;
607 mbx
.sq
.qs_num
= qs
->vnic_id
;
608 mbx
.sq
.sq_num
= qidx
;
609 mbx
.sq
.sqs_mode
= nic
->sqs_mode
;
610 mbx
.sq
.cfg
= (sq
->cq_qs
<< 3) | sq
->cq_idx
;
611 nicvf_send_msg_to_pf(nic
, &mbx
);
613 /* Set queue base address */
614 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_BASE
,
615 qidx
, (u64
)(sq
->dmem
.phys_base
));
617 /* Enable send queue & set queue size */
618 memset(&sq_cfg
, 0, sizeof(struct sq_cfg
));
622 sq_cfg
.qsize
= SND_QSIZE
;
623 sq_cfg
.tstmp_bgx_intf
= 0;
624 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, *(u64
*)&sq_cfg
);
626 /* Set threshold value for interrupt generation */
627 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_THRESH
, qidx
, sq
->thresh
);
629 /* Set queue:cpu affinity for better load distribution */
630 if (cpu_online(qidx
)) {
631 cpumask_set_cpu(qidx
, &sq
->affinity_mask
);
632 netif_set_xps_queue(nic
->netdev
,
633 &sq
->affinity_mask
, qidx
);
637 /* Configures receive buffer descriptor ring */
638 static void nicvf_rbdr_config(struct nicvf
*nic
, struct queue_set
*qs
,
639 int qidx
, bool enable
)
642 struct rbdr_cfg rbdr_cfg
;
644 rbdr
= &qs
->rbdr
[qidx
];
645 nicvf_reclaim_rbdr(nic
, rbdr
, qidx
);
649 /* Set descriptor base address */
650 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_BASE
,
651 qidx
, (u64
)(rbdr
->dmem
.phys_base
));
653 /* Enable RBDR & set queue size */
654 /* Buffer size should be in multiples of 128 bytes */
655 memset(&rbdr_cfg
, 0, sizeof(struct rbdr_cfg
));
659 rbdr_cfg
.qsize
= RBDR_SIZE
;
660 rbdr_cfg
.avg_con
= 0;
661 rbdr_cfg
.lines
= rbdr
->dma_size
/ 128;
662 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
663 qidx
, *(u64
*)&rbdr_cfg
);
666 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
667 qidx
, qs
->rbdr_len
- 1);
669 /* Set threshold value for interrupt generation */
670 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_THRESH
,
671 qidx
, rbdr
->thresh
- 1);
674 /* Requests PF to assign and enable Qset */
675 void nicvf_qset_config(struct nicvf
*nic
, bool enable
)
677 union nic_mbx mbx
= {};
678 struct queue_set
*qs
= nic
->qs
;
679 struct qs_cfg
*qs_cfg
;
682 netdev_warn(nic
->netdev
,
683 "Qset is still not allocated, don't init queues\n");
688 qs
->vnic_id
= nic
->vf_id
;
690 /* Send a mailbox msg to PF to config Qset */
691 mbx
.qs
.msg
= NIC_MBOX_MSG_QS_CFG
;
692 mbx
.qs
.num
= qs
->vnic_id
;
693 mbx
.qs
.sqs_count
= nic
->sqs_count
;
696 qs_cfg
= (struct qs_cfg
*)&mbx
.qs
.cfg
;
702 qs_cfg
->vnic
= qs
->vnic_id
;
704 nicvf_send_msg_to_pf(nic
, &mbx
);
707 static void nicvf_free_resources(struct nicvf
*nic
)
710 struct queue_set
*qs
= nic
->qs
;
712 /* Free receive buffer descriptor ring */
713 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
714 nicvf_free_rbdr(nic
, &qs
->rbdr
[qidx
]);
716 /* Free completion queue */
717 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
718 nicvf_free_cmp_queue(nic
, &qs
->cq
[qidx
]);
720 /* Free send queue */
721 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
722 nicvf_free_snd_queue(nic
, &qs
->sq
[qidx
]);
725 static int nicvf_alloc_resources(struct nicvf
*nic
)
728 struct queue_set
*qs
= nic
->qs
;
730 /* Alloc receive buffer descriptor ring */
731 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
732 if (nicvf_init_rbdr(nic
, &qs
->rbdr
[qidx
], qs
->rbdr_len
,
737 /* Alloc send queue */
738 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++) {
739 if (nicvf_init_snd_queue(nic
, &qs
->sq
[qidx
], qs
->sq_len
))
743 /* Alloc completion queue */
744 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
745 if (nicvf_init_cmp_queue(nic
, &qs
->cq
[qidx
], qs
->cq_len
))
751 nicvf_free_resources(nic
);
755 int nicvf_set_qset_resources(struct nicvf
*nic
)
757 struct queue_set
*qs
;
759 qs
= devm_kzalloc(&nic
->pdev
->dev
, sizeof(*qs
), GFP_KERNEL
);
764 /* Set count of each queue */
765 qs
->rbdr_cnt
= DEFAULT_RBDR_CNT
;
766 qs
->rq_cnt
= min_t(u8
, MAX_RCV_QUEUES_PER_QS
, num_online_cpus());
767 qs
->sq_cnt
= min_t(u8
, MAX_SND_QUEUES_PER_QS
, num_online_cpus());
768 qs
->cq_cnt
= max_t(u8
, qs
->rq_cnt
, qs
->sq_cnt
);
770 /* Set queue lengths */
771 qs
->rbdr_len
= RCV_BUF_COUNT
;
772 qs
->sq_len
= SND_QUEUE_LEN
;
773 qs
->cq_len
= CMP_QUEUE_LEN
;
775 nic
->rx_queues
= qs
->rq_cnt
;
776 nic
->tx_queues
= qs
->sq_cnt
;
781 int nicvf_config_data_transfer(struct nicvf
*nic
, bool enable
)
783 bool disable
= false;
784 struct queue_set
*qs
= nic
->qs
;
791 if (nicvf_alloc_resources(nic
))
794 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
795 nicvf_snd_queue_config(nic
, qs
, qidx
, enable
);
796 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
797 nicvf_cmp_queue_config(nic
, qs
, qidx
, enable
);
798 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
799 nicvf_rbdr_config(nic
, qs
, qidx
, enable
);
800 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
801 nicvf_rcv_queue_config(nic
, qs
, qidx
, enable
);
803 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
804 nicvf_rcv_queue_config(nic
, qs
, qidx
, disable
);
805 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
806 nicvf_rbdr_config(nic
, qs
, qidx
, disable
);
807 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
808 nicvf_snd_queue_config(nic
, qs
, qidx
, disable
);
809 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
810 nicvf_cmp_queue_config(nic
, qs
, qidx
, disable
);
812 nicvf_free_resources(nic
);
818 /* Get a free desc from SQ
819 * returns descriptor ponter & descriptor number
821 static inline int nicvf_get_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
826 atomic_sub(desc_cnt
, &sq
->free_cnt
);
827 sq
->tail
+= desc_cnt
;
828 sq
->tail
&= (sq
->dmem
.q_len
- 1);
833 /* Free descriptor back to SQ for future use */
834 void nicvf_put_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
836 atomic_add(desc_cnt
, &sq
->free_cnt
);
837 sq
->head
+= desc_cnt
;
838 sq
->head
&= (sq
->dmem
.q_len
- 1);
841 static inline int nicvf_get_nxt_sqentry(struct snd_queue
*sq
, int qentry
)
844 qentry
&= (sq
->dmem
.q_len
- 1);
848 void nicvf_sq_enable(struct nicvf
*nic
, struct snd_queue
*sq
, int qidx
)
852 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
853 sq_cfg
|= NICVF_SQ_EN
;
854 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
855 /* Ring doorbell so that H/W restarts processing SQEs */
856 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
, qidx
, 0);
859 void nicvf_sq_disable(struct nicvf
*nic
, int qidx
)
863 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
864 sq_cfg
&= ~NICVF_SQ_EN
;
865 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
868 void nicvf_sq_free_used_descs(struct net_device
*netdev
, struct snd_queue
*sq
,
873 struct nicvf
*nic
= netdev_priv(netdev
);
874 struct sq_hdr_subdesc
*hdr
;
876 head
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_HEAD
, qidx
) >> 4;
877 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_TAIL
, qidx
) >> 4;
878 while (sq
->head
!= head
) {
879 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, sq
->head
);
880 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
) {
881 nicvf_put_sq_desc(sq
, 1);
884 skb
= (struct sk_buff
*)sq
->skbuff
[sq
->head
];
886 dev_kfree_skb_any(skb
);
887 atomic64_add(1, (atomic64_t
*)&netdev
->stats
.tx_packets
);
888 atomic64_add(hdr
->tot_len
,
889 (atomic64_t
*)&netdev
->stats
.tx_bytes
);
890 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
894 /* Calculate no of SQ subdescriptors needed to transmit all
895 * segments of this TSO packet.
896 * Taken from 'Tilera network driver' with a minor modification.
898 static int nicvf_tso_count_subdescs(struct sk_buff
*skb
)
900 struct skb_shared_info
*sh
= skb_shinfo(skb
);
901 unsigned int sh_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
902 unsigned int data_len
= skb
->len
- sh_len
;
903 unsigned int p_len
= sh
->gso_size
;
904 long f_id
= -1; /* id of the current fragment */
905 long f_size
= skb_headlen(skb
) - sh_len
; /* current fragment size */
906 long f_used
= 0; /* bytes used from the current fragment */
907 long n
; /* size of the current piece of payload */
911 for (segment
= 0; segment
< sh
->gso_segs
; segment
++) {
912 unsigned int p_used
= 0;
914 /* One edesc for header and for each piece of the payload. */
915 for (num_edescs
++; p_used
< p_len
; num_edescs
++) {
916 /* Advance as needed. */
917 while (f_used
>= f_size
) {
919 f_size
= skb_frag_size(&sh
->frags
[f_id
]);
923 /* Use bytes from the current fragment. */
925 if (n
> f_size
- f_used
)
931 /* The last segment may be less than gso_size. */
933 if (data_len
< p_len
)
937 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
938 return num_edescs
+ sh
->gso_segs
;
941 /* Get the number of SQ descriptors needed to xmit this skb */
942 static int nicvf_sq_subdesc_required(struct nicvf
*nic
, struct sk_buff
*skb
)
944 int subdesc_cnt
= MIN_SQ_DESC_PER_PKT_XMIT
;
946 if (skb_shinfo(skb
)->gso_size
&& !nic
->hw_tso
) {
947 subdesc_cnt
= nicvf_tso_count_subdescs(skb
);
951 if (skb_shinfo(skb
)->nr_frags
)
952 subdesc_cnt
+= skb_shinfo(skb
)->nr_frags
;
957 /* Add SQ HEADER subdescriptor.
958 * First subdescriptor for every send descriptor.
961 nicvf_sq_add_hdr_subdesc(struct nicvf
*nic
, struct snd_queue
*sq
, int qentry
,
962 int subdesc_cnt
, struct sk_buff
*skb
, int len
)
965 struct sq_hdr_subdesc
*hdr
;
967 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, qentry
);
968 sq
->skbuff
[qentry
] = (u64
)skb
;
970 memset(hdr
, 0, SND_QUEUE_DESC_SIZE
);
971 hdr
->subdesc_type
= SQ_DESC_TYPE_HEADER
;
972 /* Enable notification via CQE after processing SQE */
974 /* No of subdescriptors following this */
975 hdr
->subdesc_cnt
= subdesc_cnt
;
978 /* Offload checksum calculation to HW */
979 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
980 hdr
->csum_l3
= 1; /* Enable IP csum calculation */
981 hdr
->l3_offset
= skb_network_offset(skb
);
982 hdr
->l4_offset
= skb_transport_offset(skb
);
984 proto
= ip_hdr(skb
)->protocol
;
987 hdr
->csum_l4
= SEND_L4_CSUM_TCP
;
990 hdr
->csum_l4
= SEND_L4_CSUM_UDP
;
993 hdr
->csum_l4
= SEND_L4_CSUM_SCTP
;
998 if (nic
->hw_tso
&& skb_shinfo(skb
)->gso_size
) {
1000 hdr
->tso_start
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1001 hdr
->tso_max_paysize
= skb_shinfo(skb
)->gso_size
;
1002 /* For non-tunneled pkts, point this to L2 ethertype */
1003 hdr
->inner_l3_offset
= skb_network_offset(skb
) - 2;
1004 nic
->drv_stats
.tx_tso
++;
1008 /* SQ GATHER subdescriptor
1009 * Must follow HDR descriptor
1011 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue
*sq
, int qentry
,
1014 struct sq_gather_subdesc
*gather
;
1016 qentry
&= (sq
->dmem
.q_len
- 1);
1017 gather
= (struct sq_gather_subdesc
*)GET_SQ_DESC(sq
, qentry
);
1019 memset(gather
, 0, SND_QUEUE_DESC_SIZE
);
1020 gather
->subdesc_type
= SQ_DESC_TYPE_GATHER
;
1021 gather
->ld_type
= NIC_SEND_LD_TYPE_E_LDD
;
1022 gather
->size
= size
;
1023 gather
->addr
= data
;
1026 /* Segment a TSO packet into 'gso_size' segments and append
1027 * them to SQ for transfer
1029 static int nicvf_sq_append_tso(struct nicvf
*nic
, struct snd_queue
*sq
,
1030 int sq_num
, int qentry
, struct sk_buff
*skb
)
1033 int seg_subdescs
= 0, desc_cnt
= 0;
1034 int seg_len
, total_len
, data_left
;
1035 int hdr_qentry
= qentry
;
1036 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1038 tso_start(skb
, &tso
);
1039 total_len
= skb
->len
- hdr_len
;
1040 while (total_len
> 0) {
1043 /* Save Qentry for adding HDR_SUBDESC at the end */
1044 hdr_qentry
= qentry
;
1046 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
1047 total_len
-= data_left
;
1049 /* Add segment's header */
1050 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1051 hdr
= sq
->tso_hdrs
+ qentry
* TSO_HEADER_SIZE
;
1052 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
1053 nicvf_sq_add_gather_subdesc(sq
, qentry
, hdr_len
,
1055 qentry
* TSO_HEADER_SIZE
);
1056 /* HDR_SUDESC + GATHER */
1060 /* Add segment's payload fragments */
1061 while (data_left
> 0) {
1064 size
= min_t(int, tso
.size
, data_left
);
1066 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1067 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1068 virt_to_phys(tso
.data
));
1073 tso_build_data(skb
, &tso
, size
);
1075 nicvf_sq_add_hdr_subdesc(nic
, sq
, hdr_qentry
,
1076 seg_subdescs
- 1, skb
, seg_len
);
1077 sq
->skbuff
[hdr_qentry
] = (u64
)NULL
;
1078 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1080 desc_cnt
+= seg_subdescs
;
1082 /* Save SKB in the last segment for freeing */
1083 sq
->skbuff
[hdr_qentry
] = (u64
)skb
;
1085 /* make sure all memory stores are done before ringing doorbell */
1088 /* Inform HW to xmit all TSO segments */
1089 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1091 nic
->drv_stats
.tx_tso
++;
1095 /* Append an skb to a SQ for packet transfer. */
1096 int nicvf_sq_append_skb(struct nicvf
*nic
, struct sk_buff
*skb
)
1101 struct queue_set
*qs
;
1102 struct snd_queue
*sq
;
1104 sq_num
= skb_get_queue_mapping(skb
);
1105 if (sq_num
>= MAX_SND_QUEUES_PER_QS
) {
1106 /* Get secondary Qset's SQ structure */
1107 i
= sq_num
/ MAX_SND_QUEUES_PER_QS
;
1108 if (!nic
->snicvf
[i
- 1]) {
1109 netdev_warn(nic
->netdev
,
1110 "Secondary Qset#%d's ptr not initialized\n",
1114 nic
= (struct nicvf
*)nic
->snicvf
[i
- 1];
1115 sq_num
= sq_num
% MAX_SND_QUEUES_PER_QS
;
1119 sq
= &qs
->sq
[sq_num
];
1121 subdesc_cnt
= nicvf_sq_subdesc_required(nic
, skb
);
1122 if (subdesc_cnt
> atomic_read(&sq
->free_cnt
))
1125 qentry
= nicvf_get_sq_desc(sq
, subdesc_cnt
);
1127 /* Check if its a TSO packet */
1128 if (skb_shinfo(skb
)->gso_size
&& !nic
->hw_tso
)
1129 return nicvf_sq_append_tso(nic
, sq
, sq_num
, qentry
, skb
);
1131 /* Add SQ header subdesc */
1132 nicvf_sq_add_hdr_subdesc(nic
, sq
, qentry
, subdesc_cnt
- 1,
1135 /* Add SQ gather subdescs */
1136 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1137 size
= skb_is_nonlinear(skb
) ? skb_headlen(skb
) : skb
->len
;
1138 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
, virt_to_phys(skb
->data
));
1140 /* Check for scattered buffer */
1141 if (!skb_is_nonlinear(skb
))
1144 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1145 const struct skb_frag_struct
*frag
;
1147 frag
= &skb_shinfo(skb
)->frags
[i
];
1149 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1150 size
= skb_frag_size(frag
);
1151 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1153 skb_frag_address(frag
)));
1157 /* make sure all memory stores are done before ringing doorbell */
1160 /* Inform HW to xmit new packet */
1161 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1162 sq_num
, subdesc_cnt
);
1166 /* Use original PCI dev for debug log */
1168 netdev_dbg(nic
->netdev
, "Not enough SQ descriptors to xmit pkt\n");
1172 static inline unsigned frag_num(unsigned i
)
1175 return (i
& ~3) + 3 - (i
& 3);
1181 /* Returns SKB for a received packet */
1182 struct sk_buff
*nicvf_get_rcv_skb(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
)
1185 int payload_len
= 0;
1186 struct sk_buff
*skb
= NULL
;
1187 struct sk_buff
*skb_frag
= NULL
;
1188 struct sk_buff
*prev_frag
= NULL
;
1189 u16
*rb_lens
= NULL
;
1190 u64
*rb_ptrs
= NULL
;
1192 rb_lens
= (void *)cqe_rx
+ (3 * sizeof(u64
));
1193 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1194 * CQE_RX at word6, hence buffer pointers move by word
1196 * Use existing 'hw_tso' flag which will be set for all chips
1197 * except 88xx pass1 instead of a additional cache line
1198 * access (or miss) by using pci dev's revision.
1201 rb_ptrs
= (void *)cqe_rx
+ (6 * sizeof(u64
));
1203 rb_ptrs
= (void *)cqe_rx
+ (7 * sizeof(u64
));
1205 netdev_dbg(nic
->netdev
, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1206 __func__
, cqe_rx
->rb_cnt
, cqe_rx
->rb0_ptr
, cqe_rx
->rb0_sz
);
1208 for (frag
= 0; frag
< cqe_rx
->rb_cnt
; frag
++) {
1209 payload_len
= rb_lens
[frag_num(frag
)];
1211 /* First fragment */
1212 skb
= nicvf_rb_ptr_to_skb(nic
,
1213 *rb_ptrs
- cqe_rx
->align_pad
,
1217 skb_reserve(skb
, cqe_rx
->align_pad
);
1218 skb_put(skb
, payload_len
);
1221 skb_frag
= nicvf_rb_ptr_to_skb(nic
, *rb_ptrs
,
1228 if (!skb_shinfo(skb
)->frag_list
)
1229 skb_shinfo(skb
)->frag_list
= skb_frag
;
1231 prev_frag
->next
= skb_frag
;
1233 prev_frag
= skb_frag
;
1234 skb
->len
+= payload_len
;
1235 skb
->data_len
+= payload_len
;
1236 skb_frag
->len
= payload_len
;
1238 /* Next buffer pointer */
1244 static u64
nicvf_int_type_to_mask(int int_type
, int q_idx
)
1250 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1253 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1255 case NICVF_INTR_RBDR
:
1256 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1258 case NICVF_INTR_PKT_DROP
:
1259 reg_val
= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1261 case NICVF_INTR_TCP_TIMER
:
1262 reg_val
= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1264 case NICVF_INTR_MBOX
:
1265 reg_val
= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1267 case NICVF_INTR_QS_ERR
:
1268 reg_val
= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1277 /* Enable interrupt */
1278 void nicvf_enable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1280 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1283 netdev_dbg(nic
->netdev
,
1284 "Failed to enable interrupt: unknown type\n");
1287 nicvf_reg_write(nic
, NIC_VF_ENA_W1S
,
1288 nicvf_reg_read(nic
, NIC_VF_ENA_W1S
) | mask
);
1291 /* Disable interrupt */
1292 void nicvf_disable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1294 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1297 netdev_dbg(nic
->netdev
,
1298 "Failed to disable interrupt: unknown type\n");
1302 nicvf_reg_write(nic
, NIC_VF_ENA_W1C
, mask
);
1305 /* Clear interrupt */
1306 void nicvf_clear_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1308 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1311 netdev_dbg(nic
->netdev
,
1312 "Failed to clear interrupt: unknown type\n");
1316 nicvf_reg_write(nic
, NIC_VF_INT
, mask
);
1319 /* Check if interrupt is enabled */
1320 int nicvf_is_intr_enabled(struct nicvf
*nic
, int int_type
, int q_idx
)
1322 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1323 /* If interrupt type is unknown, we treat it disabled. */
1325 netdev_dbg(nic
->netdev
,
1326 "Failed to check interrupt enable: unknown type\n");
1330 return mask
& nicvf_reg_read(nic
, NIC_VF_ENA_W1S
);
1333 void nicvf_update_rq_stats(struct nicvf
*nic
, int rq_idx
)
1335 struct rcv_queue
*rq
;
1337 #define GET_RQ_STATS(reg) \
1338 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1339 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1341 rq
= &nic
->qs
->rq
[rq_idx
];
1342 rq
->stats
.bytes
= GET_RQ_STATS(RQ_SQ_STATS_OCTS
);
1343 rq
->stats
.pkts
= GET_RQ_STATS(RQ_SQ_STATS_PKTS
);
1346 void nicvf_update_sq_stats(struct nicvf
*nic
, int sq_idx
)
1348 struct snd_queue
*sq
;
1350 #define GET_SQ_STATS(reg) \
1351 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1352 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1354 sq
= &nic
->qs
->sq
[sq_idx
];
1355 sq
->stats
.bytes
= GET_SQ_STATS(RQ_SQ_STATS_OCTS
);
1356 sq
->stats
.pkts
= GET_SQ_STATS(RQ_SQ_STATS_PKTS
);
1359 /* Check for errors in the receive cmp.queue entry */
1360 int nicvf_check_cqe_rx_errs(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
)
1362 struct nicvf_hw_stats
*stats
= &nic
->hw_stats
;
1364 if (!cqe_rx
->err_level
&& !cqe_rx
->err_opcode
)
1367 if (netif_msg_rx_err(nic
))
1368 netdev_err(nic
->netdev
,
1369 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1371 cqe_rx
->err_level
, cqe_rx
->err_opcode
);
1373 switch (cqe_rx
->err_opcode
) {
1374 case CQ_RX_ERROP_RE_PARTIAL
:
1375 stats
->rx_bgx_truncated_pkts
++;
1377 case CQ_RX_ERROP_RE_JABBER
:
1378 stats
->rx_jabber_errs
++;
1380 case CQ_RX_ERROP_RE_FCS
:
1381 stats
->rx_fcs_errs
++;
1383 case CQ_RX_ERROP_RE_RX_CTL
:
1384 stats
->rx_bgx_errs
++;
1386 case CQ_RX_ERROP_PREL2_ERR
:
1387 stats
->rx_prel2_errs
++;
1389 case CQ_RX_ERROP_L2_MAL
:
1390 stats
->rx_l2_hdr_malformed
++;
1392 case CQ_RX_ERROP_L2_OVERSIZE
:
1393 stats
->rx_oversize
++;
1395 case CQ_RX_ERROP_L2_UNDERSIZE
:
1396 stats
->rx_undersize
++;
1398 case CQ_RX_ERROP_L2_LENMISM
:
1399 stats
->rx_l2_len_mismatch
++;
1401 case CQ_RX_ERROP_L2_PCLP
:
1402 stats
->rx_l2_pclp
++;
1404 case CQ_RX_ERROP_IP_NOT
:
1405 stats
->rx_ip_ver_errs
++;
1407 case CQ_RX_ERROP_IP_CSUM_ERR
:
1408 stats
->rx_ip_csum_errs
++;
1410 case CQ_RX_ERROP_IP_MAL
:
1411 stats
->rx_ip_hdr_malformed
++;
1413 case CQ_RX_ERROP_IP_MALD
:
1414 stats
->rx_ip_payload_malformed
++;
1416 case CQ_RX_ERROP_IP_HOP
:
1417 stats
->rx_ip_ttl_errs
++;
1419 case CQ_RX_ERROP_L3_PCLP
:
1420 stats
->rx_l3_pclp
++;
1422 case CQ_RX_ERROP_L4_MAL
:
1423 stats
->rx_l4_malformed
++;
1425 case CQ_RX_ERROP_L4_CHK
:
1426 stats
->rx_l4_csum_errs
++;
1428 case CQ_RX_ERROP_UDP_LEN
:
1429 stats
->rx_udp_len_errs
++;
1431 case CQ_RX_ERROP_L4_PORT
:
1432 stats
->rx_l4_port_errs
++;
1434 case CQ_RX_ERROP_TCP_FLAG
:
1435 stats
->rx_tcp_flag_errs
++;
1437 case CQ_RX_ERROP_TCP_OFFSET
:
1438 stats
->rx_tcp_offset_errs
++;
1440 case CQ_RX_ERROP_L4_PCLP
:
1441 stats
->rx_l4_pclp
++;
1443 case CQ_RX_ERROP_RBDR_TRUNC
:
1444 stats
->rx_truncated_pkts
++;
1451 /* Check for errors in the send cmp.queue entry */
1452 int nicvf_check_cqe_tx_errs(struct nicvf
*nic
,
1453 struct cmp_queue
*cq
, struct cqe_send_t
*cqe_tx
)
1455 struct cmp_queue_stats
*stats
= &cq
->stats
;
1457 switch (cqe_tx
->send_status
) {
1458 case CQ_TX_ERROP_GOOD
:
1461 case CQ_TX_ERROP_DESC_FAULT
:
1462 stats
->tx
.desc_fault
++;
1464 case CQ_TX_ERROP_HDR_CONS_ERR
:
1465 stats
->tx
.hdr_cons_err
++;
1467 case CQ_TX_ERROP_SUBDC_ERR
:
1468 stats
->tx
.subdesc_err
++;
1470 case CQ_TX_ERROP_IMM_SIZE_OFLOW
:
1471 stats
->tx
.imm_size_oflow
++;
1473 case CQ_TX_ERROP_DATA_SEQUENCE_ERR
:
1474 stats
->tx
.data_seq_err
++;
1476 case CQ_TX_ERROP_MEM_SEQUENCE_ERR
:
1477 stats
->tx
.mem_seq_err
++;
1479 case CQ_TX_ERROP_LOCK_VIOL
:
1480 stats
->tx
.lock_viol
++;
1482 case CQ_TX_ERROP_DATA_FAULT
:
1483 stats
->tx
.data_fault
++;
1485 case CQ_TX_ERROP_TSTMP_CONFLICT
:
1486 stats
->tx
.tstmp_conflict
++;
1488 case CQ_TX_ERROP_TSTMP_TIMEOUT
:
1489 stats
->tx
.tstmp_timeout
++;
1491 case CQ_TX_ERROP_MEM_FAULT
:
1492 stats
->tx
.mem_fault
++;
1494 case CQ_TX_ERROP_CK_OVERLAP
:
1495 stats
->tx
.csum_overlap
++;
1497 case CQ_TX_ERROP_CK_OFLOW
:
1498 stats
->tx
.csum_overflow
++;