2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
38 static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq
*rq
,
39 struct mlx5e_rx_wqe
*wqe
, u16 ix
)
44 skb
= netdev_alloc_skb(rq
->netdev
, rq
->wqe_sz
);
48 skb_reserve(skb
, MLX5E_NET_IP_ALIGN
);
50 dma_addr
= dma_map_single(rq
->pdev
,
51 /* hw start padding */
52 skb
->data
- MLX5E_NET_IP_ALIGN
,
57 if (unlikely(dma_mapping_error(rq
->pdev
, dma_addr
)))
60 *((dma_addr_t
*)skb
->cb
) = dma_addr
;
61 wqe
->data
.addr
= cpu_to_be64(dma_addr
+ MLX5E_NET_IP_ALIGN
);
73 bool mlx5e_post_rx_wqes(struct mlx5e_rq
*rq
)
75 struct mlx5_wq_ll
*wq
= &rq
->wq
;
77 if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
)))
80 while (!mlx5_wq_ll_is_full(wq
)) {
81 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(wq
, wq
->head
);
83 if (unlikely(mlx5e_alloc_rx_wqe(rq
, wqe
, wq
->head
)))
86 mlx5_wq_ll_push(wq
, be16_to_cpu(wqe
->next
.next_wqe_index
));
89 /* ensure wqes are visible to device before updating doorbell record */
92 mlx5_wq_ll_update_db_record(wq
);
94 return !mlx5_wq_ll_is_full(wq
);
97 static void mlx5e_lro_update_hdr(struct sk_buff
*skb
, struct mlx5_cqe64
*cqe
)
99 struct ethhdr
*eth
= (struct ethhdr
*)(skb
->data
);
100 struct iphdr
*ipv4
= (struct iphdr
*)(skb
->data
+ ETH_HLEN
);
101 struct ipv6hdr
*ipv6
= (struct ipv6hdr
*)(skb
->data
+ ETH_HLEN
);
104 u8 l4_hdr_type
= get_cqe_l4_hdr_type(cqe
);
105 int tcp_ack
= ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA
== l4_hdr_type
) ||
106 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA
== l4_hdr_type
));
108 u16 tot_len
= be32_to_cpu(cqe
->byte_cnt
) - ETH_HLEN
;
110 if (eth
->h_proto
== htons(ETH_P_IP
)) {
111 tcp
= (struct tcphdr
*)(skb
->data
+ ETH_HLEN
+
112 sizeof(struct iphdr
));
115 tcp
= (struct tcphdr
*)(skb
->data
+ ETH_HLEN
+
116 sizeof(struct ipv6hdr
));
120 if (get_cqe_lro_tcppsh(cqe
))
125 tcp
->ack_seq
= cqe
->lro_ack_seq_num
;
126 tcp
->window
= cqe
->lro_tcp_win
;
130 ipv4
->ttl
= cqe
->lro_min_ttl
;
131 ipv4
->tot_len
= cpu_to_be16(tot_len
);
133 ipv4
->check
= ip_fast_csum((unsigned char *)ipv4
,
136 ipv6
->hop_limit
= cqe
->lro_min_ttl
;
137 ipv6
->payload_len
= cpu_to_be16(tot_len
-
138 sizeof(struct ipv6hdr
));
142 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64
*cqe
,
145 u8 cht
= cqe
->rss_hash_type
;
146 int ht
= (cht
& CQE_RSS_HTYPE_L4
) ? PKT_HASH_TYPE_L4
:
147 (cht
& CQE_RSS_HTYPE_IP
) ? PKT_HASH_TYPE_L3
:
149 skb_set_hash(skb
, be32_to_cpu(cqe
->rss_hash_result
), ht
);
152 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64
*cqe
,
156 struct net_device
*netdev
= rq
->netdev
;
157 u32 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
160 skb_put(skb
, cqe_bcnt
);
162 lro_num_seg
= be32_to_cpu(cqe
->srqn
) >> 24;
163 if (lro_num_seg
> 1) {
164 mlx5e_lro_update_hdr(skb
, cqe
);
165 skb_shinfo(skb
)->gso_size
= MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
166 rq
->stats
.lro_packets
++;
167 rq
->stats
.lro_bytes
+= cqe_bcnt
;
170 if (likely(netdev
->features
& NETIF_F_RXCSUM
) &&
171 (cqe
->hds_ip_ext
& CQE_L2_OK
) &&
172 (cqe
->hds_ip_ext
& CQE_L3_OK
) &&
173 (cqe
->hds_ip_ext
& CQE_L4_OK
)) {
174 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
176 skb
->ip_summed
= CHECKSUM_NONE
;
177 rq
->stats
.csum_none
++;
180 skb
->protocol
= eth_type_trans(skb
, netdev
);
182 skb_record_rx_queue(skb
, rq
->ix
);
184 if (likely(netdev
->features
& NETIF_F_RXHASH
))
185 mlx5e_skb_set_hash(cqe
, skb
);
187 if (cqe_has_vlan(cqe
))
188 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
189 be16_to_cpu(cqe
->vlan_info
));
192 bool mlx5e_poll_rx_cq(struct mlx5e_cq
*cq
, int budget
)
194 struct mlx5e_rq
*rq
= cq
->sqrq
;
197 /* avoid accessing cq (dma coherent memory) if not needed */
198 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
))
201 for (i
= 0; i
< budget
; i
++) {
202 struct mlx5e_rx_wqe
*wqe
;
203 struct mlx5_cqe64
*cqe
;
205 __be16 wqe_counter_be
;
208 cqe
= mlx5e_get_cqe(cq
);
212 wqe_counter_be
= cqe
->wqe_counter
;
213 wqe_counter
= be16_to_cpu(wqe_counter_be
);
214 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_counter
);
215 skb
= rq
->skb
[wqe_counter
];
216 rq
->skb
[wqe_counter
] = NULL
;
218 dma_unmap_single(rq
->pdev
,
219 *((dma_addr_t
*)skb
->cb
),
223 if (unlikely((cqe
->op_own
>> 4) != MLX5_CQE_RESP_SEND
)) {
229 mlx5e_build_rx_skb(cqe
, rq
, skb
);
231 napi_gro_receive(cq
->napi
, skb
);
234 mlx5_wq_ll_pop(&rq
->wq
, wqe_counter_be
,
235 &wqe
->next
.next_wqe_index
);
238 mlx5_cqwq_update_db_record(&cq
->wq
);
240 /* ensure cq space is freed before enabling more cqes */
244 set_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
);