net: thunderx: Enable CQE count threshold interrupt
[deliverable/linux.git] / drivers / net / ethernet / cavium / thunder / nicvf_queues.c
index 99a29d0b59181f35df96ffc97637d95c4ce0f23d..d0d1b54900610046955a390f6f5c87ce45cbf1df 100644 (file)
 #include "q_struct.h"
 #include "nicvf_queues.h"
 
-struct rbuf_info {
-       struct page *page;
-       void    *data;
-       u64     offset;
-};
-
-#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
-
 /* Poll a register for a specific value */
 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
                          u64 reg, int bit_pos, int bits, int val)
@@ -86,8 +78,6 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
                                         u32 buf_len, u64 **rbuf)
 {
-       u64 data;
-       struct rbuf_info *rinfo;
        int order = get_order(buf_len);
 
        /* Check if request can be accomodated in previous allocated page */
@@ -113,46 +103,28 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
                nic->rb_page_offset = 0;
        }
 
-       data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
-
-       /* Align buffer addr to cache line i.e 128 bytes */
-       rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
-       /* Save page address for reference updation */
-       rinfo->page = nic->rb_page;
-       /* Store start address for later retrieval */
-       rinfo->data = (void *)data;
-       /* Store alignment offset */
-       rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
-
-       data += rinfo->offset;
+       *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
 
-       /* Give next aligned address to hw for DMA */
-       *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
        return 0;
 }
 
-/* Retrieve actual buffer start address and build skb for received packet */
+/* Build skb around receive buffer */
 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
                                           u64 rb_ptr, int len)
 {
+       void *data;
        struct sk_buff *skb;
-       struct rbuf_info *rinfo;
 
-       rb_ptr = (u64)phys_to_virt(rb_ptr);
-       /* Get buffer start address and alignment offset */
-       rinfo = GET_RBUF_INFO(rb_ptr);
+       data = phys_to_virt(rb_ptr);
 
        /* Now build an skb to give to stack */
-       skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+       skb = build_skb(data, RCV_FRAG_LEN);
        if (!skb) {
-               put_page(rinfo->page);
+               put_page(virt_to_page(data));
                return NULL;
        }
 
-       /* Set correct skb->data */
-       skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
-
-       prefetch((void *)rb_ptr);
+       prefetch(skb->data);
        return skb;
 }
 
@@ -196,7 +168,6 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        int head, tail;
        u64 buf_addr;
        struct rbdr_entry_t *desc;
-       struct rbuf_info *rinfo;
 
        if (!rbdr)
                return;
@@ -212,16 +183,14 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        while (head != tail) {
                desc = GET_RBDR_DESC(rbdr, head);
                buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
-               rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
-               put_page(rinfo->page);
+               put_page(virt_to_page(phys_to_virt(buf_addr)));
                head++;
                head &= (rbdr->dmem.q_len - 1);
        }
        /* Free SKB of tail desc */
        desc = GET_RBDR_DESC(rbdr, tail);
        buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
-       rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
-       put_page(rinfo->page);
+       put_page(virt_to_page(phys_to_virt(buf_addr)));
 
        /* Free RBDR ring */
        nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -330,7 +299,7 @@ static int nicvf_init_cmp_queue(struct nicvf *nic,
                return err;
 
        cq->desc = cq->dmem.base;
-       cq->thresh = CMP_QUEUE_CQE_THRESH;
+       cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
        nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
 
        return 0;
@@ -956,7 +925,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
 {
        int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
 
-       if (skb_shinfo(skb)->gso_size) {
+       if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
                subdesc_cnt = nicvf_tso_count_subdescs(skb);
                return subdesc_cnt;
        }
@@ -971,7 +940,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
  * First subdescriptor for every send descriptor.
  */
 static inline void
-nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
                         int subdesc_cnt, struct sk_buff *skb, int len)
 {
        int proto;
@@ -1007,6 +976,15 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
                        break;
                }
        }
+
+       if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
+               hdr->tso = 1;
+               hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
+               /* For non-tunneled pkts, point this to L2 ethertype */
+               hdr->inner_l3_offset = skb_network_offset(skb) - 2;
+               nic->drv_stats.tx_tso++;
+       }
 }
 
 /* SQ GATHER subdescriptor
@@ -1076,7 +1054,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
                        data_left -= size;
                        tso_build_data(skb, &tso, size);
                }
-               nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+               nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
                                         seg_subdescs - 1, skb, seg_len);
                sq->skbuff[hdr_qentry] = (u64)NULL;
                qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1129,11 +1107,12 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 
        /* Check if its a TSO packet */
-       if (skb_shinfo(skb)->gso_size)
+       if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
                return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
 
        /* Add SQ header subdesc */
-       nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+       nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+                                skb, skb->len);
 
        /* Add SQ gather subdescs */
        qentry = nicvf_get_nxt_sqentry(sq, qentry);
This page took 0.026669 seconds and 5 git commands to generate.