2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
38 static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq
*rq
,
39 struct mlx5e_rx_wqe
*wqe
, u16 ix
)
44 skb
= netdev_alloc_skb(rq
->netdev
, rq
->wqe_sz
);
48 dma_addr
= dma_map_single(rq
->pdev
,
49 /* hw start padding */
55 if (unlikely(dma_mapping_error(rq
->pdev
, dma_addr
)))
58 skb_reserve(skb
, MLX5E_NET_IP_ALIGN
);
60 *((dma_addr_t
*)skb
->cb
) = dma_addr
;
61 wqe
->data
.addr
= cpu_to_be64(dma_addr
+ MLX5E_NET_IP_ALIGN
);
73 bool mlx5e_post_rx_wqes(struct mlx5e_rq
*rq
)
75 struct mlx5_wq_ll
*wq
= &rq
->wq
;
77 if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
)))
80 while (!mlx5_wq_ll_is_full(wq
)) {
81 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(wq
, wq
->head
);
83 if (unlikely(mlx5e_alloc_rx_wqe(rq
, wqe
, wq
->head
)))
86 mlx5_wq_ll_push(wq
, be16_to_cpu(wqe
->next
.next_wqe_index
));
89 /* ensure wqes are visible to device before updating doorbell record */
92 mlx5_wq_ll_update_db_record(wq
);
94 return !mlx5_wq_ll_is_full(wq
);
97 static void mlx5e_lro_update_hdr(struct sk_buff
*skb
, struct mlx5_cqe64
*cqe
)
99 struct ethhdr
*eth
= (struct ethhdr
*)(skb
->data
);
100 struct iphdr
*ipv4
= (struct iphdr
*)(skb
->data
+ ETH_HLEN
);
101 struct ipv6hdr
*ipv6
= (struct ipv6hdr
*)(skb
->data
+ ETH_HLEN
);
104 u8 l4_hdr_type
= get_cqe_l4_hdr_type(cqe
);
105 int tcp_ack
= ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA
== l4_hdr_type
) ||
106 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA
== l4_hdr_type
));
108 u16 tot_len
= be32_to_cpu(cqe
->byte_cnt
) - ETH_HLEN
;
110 if (eth
->h_proto
== htons(ETH_P_IP
)) {
111 tcp
= (struct tcphdr
*)(skb
->data
+ ETH_HLEN
+
112 sizeof(struct iphdr
));
114 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
116 tcp
= (struct tcphdr
*)(skb
->data
+ ETH_HLEN
+
117 sizeof(struct ipv6hdr
));
119 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
122 if (get_cqe_lro_tcppsh(cqe
))
127 tcp
->ack_seq
= cqe
->lro_ack_seq_num
;
128 tcp
->window
= cqe
->lro_tcp_win
;
132 ipv4
->ttl
= cqe
->lro_min_ttl
;
133 ipv4
->tot_len
= cpu_to_be16(tot_len
);
135 ipv4
->check
= ip_fast_csum((unsigned char *)ipv4
,
138 ipv6
->hop_limit
= cqe
->lro_min_ttl
;
139 ipv6
->payload_len
= cpu_to_be16(tot_len
-
140 sizeof(struct ipv6hdr
));
144 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64
*cqe
,
147 u8 cht
= cqe
->rss_hash_type
;
148 int ht
= (cht
& CQE_RSS_HTYPE_L4
) ? PKT_HASH_TYPE_L4
:
149 (cht
& CQE_RSS_HTYPE_IP
) ? PKT_HASH_TYPE_L3
:
151 skb_set_hash(skb
, be32_to_cpu(cqe
->rss_hash_result
), ht
);
154 static inline bool is_first_ethertype_ip(struct sk_buff
*skb
)
156 __be16 ethertype
= ((struct ethhdr
*)skb
->data
)->h_proto
;
158 return (ethertype
== htons(ETH_P_IP
) || ethertype
== htons(ETH_P_IPV6
));
161 static inline void mlx5e_handle_csum(struct net_device
*netdev
,
162 struct mlx5_cqe64
*cqe
,
166 if (unlikely(!(netdev
->features
& NETIF_F_RXCSUM
)))
169 if (likely(cqe
->hds_ip_ext
& CQE_L4_OK
)) {
170 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
171 } else if (is_first_ethertype_ip(skb
)) {
172 skb
->ip_summed
= CHECKSUM_COMPLETE
;
173 skb
->csum
= csum_unfold(cqe
->check_sum
);
182 skb
->ip_summed
= CHECKSUM_NONE
;
183 rq
->stats
.csum_none
++;
186 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64
*cqe
,
190 struct net_device
*netdev
= rq
->netdev
;
191 u32 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
194 skb_put(skb
, cqe_bcnt
);
196 lro_num_seg
= be32_to_cpu(cqe
->srqn
) >> 24;
197 if (lro_num_seg
> 1) {
198 mlx5e_lro_update_hdr(skb
, cqe
);
199 skb_shinfo(skb
)->gso_size
= DIV_ROUND_UP(cqe_bcnt
, lro_num_seg
);
200 rq
->stats
.lro_packets
++;
201 rq
->stats
.lro_bytes
+= cqe_bcnt
;
204 mlx5e_handle_csum(netdev
, cqe
, rq
, skb
);
206 skb
->protocol
= eth_type_trans(skb
, netdev
);
208 skb_record_rx_queue(skb
, rq
->ix
);
210 if (likely(netdev
->features
& NETIF_F_RXHASH
))
211 mlx5e_skb_set_hash(cqe
, skb
);
213 if (cqe_has_vlan(cqe
))
214 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
215 be16_to_cpu(cqe
->vlan_info
));
218 bool mlx5e_poll_rx_cq(struct mlx5e_cq
*cq
, int budget
)
220 struct mlx5e_rq
*rq
= container_of(cq
, struct mlx5e_rq
, cq
);
223 /* avoid accessing cq (dma coherent memory) if not needed */
224 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
))
227 for (i
= 0; i
< budget
; i
++) {
228 struct mlx5e_rx_wqe
*wqe
;
229 struct mlx5_cqe64
*cqe
;
231 __be16 wqe_counter_be
;
234 cqe
= mlx5e_get_cqe(cq
);
238 mlx5_cqwq_pop(&cq
->wq
);
240 wqe_counter_be
= cqe
->wqe_counter
;
241 wqe_counter
= be16_to_cpu(wqe_counter_be
);
242 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_counter
);
243 skb
= rq
->skb
[wqe_counter
];
245 rq
->skb
[wqe_counter
] = NULL
;
247 dma_unmap_single(rq
->pdev
,
248 *((dma_addr_t
*)skb
->cb
),
252 if (unlikely((cqe
->op_own
>> 4) != MLX5_CQE_RESP_SEND
)) {
258 mlx5e_build_rx_skb(cqe
, rq
, skb
);
260 napi_gro_receive(cq
->napi
, skb
);
263 mlx5_wq_ll_pop(&rq
->wq
, wqe_counter_be
,
264 &wqe
->next
.next_wqe_index
);
267 mlx5_cqwq_update_db_record(&cq
->wq
);
269 /* ensure cq space is freed before enabling more cqes */
273 set_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
);