Commit | Line | Data |
---|---|---|
e586b3b0 AV |
1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/ip.h> | |
34 | #include <linux/ipv6.h> | |
35 | #include <linux/tcp.h> | |
7ae92ae5 | 36 | #include <net/busy_poll.h> |
e586b3b0 | 37 | #include "en.h" |
12185a9f | 38 | #include "en_tc.h" |
e586b3b0 | 39 | |
ef9814de EBE |
40 | static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) |
41 | { | |
42 | return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; | |
43 | } | |
44 | ||
2f48af12 | 45 | int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) |
e586b3b0 AV |
46 | { |
47 | struct sk_buff *skb; | |
48 | dma_addr_t dma_addr; | |
49 | ||
50 | skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); | |
51 | if (unlikely(!skb)) | |
52 | return -ENOMEM; | |
53 | ||
e586b3b0 AV |
54 | dma_addr = dma_map_single(rq->pdev, |
55 | /* hw start padding */ | |
fc11fbf9 SM |
56 | skb->data, |
57 | /* hw end padding */ | |
e586b3b0 AV |
58 | rq->wqe_sz, |
59 | DMA_FROM_DEVICE); | |
60 | ||
61 | if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) | |
62 | goto err_free_skb; | |
63 | ||
fc11fbf9 SM |
64 | skb_reserve(skb, MLX5E_NET_IP_ALIGN); |
65 | ||
e586b3b0 AV |
66 | *((dma_addr_t *)skb->cb) = dma_addr; |
67 | wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); | |
68 | ||
69 | rq->skb[ix] = skb; | |
70 | ||
71 | return 0; | |
72 | ||
73 | err_free_skb: | |
74 | dev_kfree_skb(skb); | |
75 | ||
76 | return -ENOMEM; | |
77 | } | |
78 | ||
461017cb TT |
79 | int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) |
80 | { | |
81 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; | |
82 | gfp_t gfp_mask; | |
83 | int i; | |
84 | ||
85 | gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC; | |
86 | wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, | |
87 | MLX5_MPWRQ_WQE_PAGE_ORDER); | |
88 | if (unlikely(!wi->dma_info.page)) | |
89 | return -ENOMEM; | |
90 | ||
91 | wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0, | |
92 | rq->wqe_sz, PCI_DMA_FROMDEVICE); | |
93 | if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) { | |
94 | put_page(wi->dma_info.page); | |
95 | return -ENOMEM; | |
96 | } | |
97 | ||
98 | /* We split the high-order page into order-0 ones and manage their | |
99 | * reference counter to minimize the memory held by small skb fragments | |
100 | */ | |
101 | split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER); | |
102 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { | |
103 | atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, | |
104 | &wi->dma_info.page[i]._count); | |
105 | wi->skbs_frags[i] = 0; | |
106 | } | |
107 | ||
108 | wi->consumed_strides = 0; | |
109 | wqe->data.addr = cpu_to_be64(wi->dma_info.addr); | |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
e586b3b0 AV |
114 | bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) |
115 | { | |
116 | struct mlx5_wq_ll *wq = &rq->wq; | |
117 | ||
118 | if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) | |
119 | return false; | |
120 | ||
121 | while (!mlx5_wq_ll_is_full(wq)) { | |
122 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); | |
123 | ||
2f48af12 | 124 | if (unlikely(rq->alloc_wqe(rq, wqe, wq->head))) |
e586b3b0 AV |
125 | break; |
126 | ||
127 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); | |
128 | } | |
129 | ||
130 | /* ensure wqes are visible to device before updating doorbell record */ | |
131 | dma_wmb(); | |
132 | ||
133 | mlx5_wq_ll_update_db_record(wq); | |
134 | ||
135 | return !mlx5_wq_ll_is_full(wq); | |
136 | } | |
137 | ||
461017cb TT |
138 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, |
139 | u32 cqe_bcnt) | |
e586b3b0 AV |
140 | { |
141 | struct ethhdr *eth = (struct ethhdr *)(skb->data); | |
142 | struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); | |
143 | struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); | |
144 | struct tcphdr *tcp; | |
145 | ||
146 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | |
147 | int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || | |
148 | (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); | |
149 | ||
461017cb | 150 | u16 tot_len = cqe_bcnt - ETH_HLEN; |
e586b3b0 AV |
151 | |
152 | if (eth->h_proto == htons(ETH_P_IP)) { | |
153 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + | |
154 | sizeof(struct iphdr)); | |
155 | ipv6 = NULL; | |
d9a40271 | 156 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
e586b3b0 AV |
157 | } else { |
158 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + | |
159 | sizeof(struct ipv6hdr)); | |
160 | ipv4 = NULL; | |
d9a40271 | 161 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
e586b3b0 AV |
162 | } |
163 | ||
164 | if (get_cqe_lro_tcppsh(cqe)) | |
165 | tcp->psh = 1; | |
166 | ||
167 | if (tcp_ack) { | |
168 | tcp->ack = 1; | |
169 | tcp->ack_seq = cqe->lro_ack_seq_num; | |
170 | tcp->window = cqe->lro_tcp_win; | |
171 | } | |
172 | ||
173 | if (ipv4) { | |
174 | ipv4->ttl = cqe->lro_min_ttl; | |
175 | ipv4->tot_len = cpu_to_be16(tot_len); | |
176 | ipv4->check = 0; | |
177 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, | |
178 | ipv4->ihl); | |
179 | } else { | |
180 | ipv6->hop_limit = cqe->lro_min_ttl; | |
181 | ipv6->payload_len = cpu_to_be16(tot_len - | |
182 | sizeof(struct ipv6hdr)); | |
183 | } | |
184 | } | |
185 | ||
186 | static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, | |
187 | struct sk_buff *skb) | |
188 | { | |
189 | u8 cht = cqe->rss_hash_type; | |
190 | int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : | |
191 | (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : | |
192 | PKT_HASH_TYPE_NONE; | |
193 | skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); | |
194 | } | |
195 | ||
bbceefce AS |
196 | static inline bool is_first_ethertype_ip(struct sk_buff *skb) |
197 | { | |
198 | __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; | |
199 | ||
200 | return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); | |
201 | } | |
202 | ||
203 | static inline void mlx5e_handle_csum(struct net_device *netdev, | |
204 | struct mlx5_cqe64 *cqe, | |
205 | struct mlx5e_rq *rq, | |
5f6d12d1 MF |
206 | struct sk_buff *skb, |
207 | bool lro) | |
bbceefce AS |
208 | { |
209 | if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) | |
210 | goto csum_none; | |
211 | ||
5f6d12d1 | 212 | if (lro) { |
bbceefce | 213 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
5f6d12d1 | 214 | } else if (likely(is_first_ethertype_ip(skb))) { |
bbceefce | 215 | skb->ip_summed = CHECKSUM_COMPLETE; |
ecf842f6 | 216 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); |
bbceefce AS |
217 | rq->stats.csum_sw++; |
218 | } else { | |
219 | goto csum_none; | |
220 | } | |
221 | ||
222 | return; | |
223 | ||
224 | csum_none: | |
225 | skb->ip_summed = CHECKSUM_NONE; | |
226 | rq->stats.csum_none++; | |
227 | } | |
228 | ||
e586b3b0 | 229 | static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, |
461017cb | 230 | u32 cqe_bcnt, |
e586b3b0 AV |
231 | struct mlx5e_rq *rq, |
232 | struct sk_buff *skb) | |
233 | { | |
234 | struct net_device *netdev = rq->netdev; | |
ef9814de | 235 | struct mlx5e_tstamp *tstamp = rq->tstamp; |
e586b3b0 AV |
236 | int lro_num_seg; |
237 | ||
e586b3b0 AV |
238 | lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; |
239 | if (lro_num_seg > 1) { | |
461017cb | 240 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); |
d9a40271 | 241 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); |
e586b3b0 AV |
242 | rq->stats.lro_packets++; |
243 | rq->stats.lro_bytes += cqe_bcnt; | |
244 | } | |
245 | ||
ef9814de EBE |
246 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) |
247 | mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); | |
248 | ||
5f6d12d1 | 249 | mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); |
e586b3b0 AV |
250 | |
251 | skb->protocol = eth_type_trans(skb, netdev); | |
252 | ||
253 | skb_record_rx_queue(skb, rq->ix); | |
254 | ||
255 | if (likely(netdev->features & NETIF_F_RXHASH)) | |
256 | mlx5e_skb_set_hash(cqe, skb); | |
257 | ||
258 | if (cqe_has_vlan(cqe)) | |
259 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
260 | be16_to_cpu(cqe->vlan_info)); | |
12185a9f AV |
261 | |
262 | skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; | |
e586b3b0 AV |
263 | } |
264 | ||
461017cb TT |
265 | static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, |
266 | struct mlx5_cqe64 *cqe, | |
267 | u32 cqe_bcnt, | |
268 | struct sk_buff *skb) | |
269 | { | |
270 | rq->stats.packets++; | |
271 | rq->stats.bytes += cqe_bcnt; | |
272 | mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); | |
273 | napi_gro_receive(rq->cq.napi, skb); | |
274 | } | |
275 | ||
2f48af12 TT |
276 | void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
277 | { | |
278 | struct mlx5e_rx_wqe *wqe; | |
279 | struct sk_buff *skb; | |
280 | __be16 wqe_counter_be; | |
281 | u16 wqe_counter; | |
461017cb | 282 | u32 cqe_bcnt; |
2f48af12 TT |
283 | |
284 | wqe_counter_be = cqe->wqe_counter; | |
285 | wqe_counter = be16_to_cpu(wqe_counter_be); | |
286 | wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); | |
287 | skb = rq->skb[wqe_counter]; | |
288 | prefetch(skb->data); | |
289 | rq->skb[wqe_counter] = NULL; | |
290 | ||
291 | dma_unmap_single(rq->pdev, | |
292 | *((dma_addr_t *)skb->cb), | |
293 | rq->wqe_sz, | |
294 | DMA_FROM_DEVICE); | |
295 | ||
296 | if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { | |
297 | rq->stats.wqe_err++; | |
298 | dev_kfree_skb(skb); | |
299 | goto wq_ll_pop; | |
300 | } | |
301 | ||
461017cb TT |
302 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); |
303 | skb_put(skb, cqe_bcnt); | |
304 | ||
305 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); | |
2f48af12 TT |
306 | |
307 | wq_ll_pop: | |
308 | mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, | |
309 | &wqe->next.next_wqe_index); | |
310 | } | |
311 | ||
461017cb TT |
312 | void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
313 | { | |
314 | u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); | |
315 | u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); | |
316 | u16 wqe_id = be16_to_cpu(cqe->wqe_id); | |
317 | struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id]; | |
318 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); | |
319 | struct sk_buff *skb; | |
320 | u32 consumed_bytes; | |
321 | u32 head_offset; | |
322 | u32 frag_offset; | |
323 | u32 wqe_offset; | |
324 | u32 page_idx; | |
325 | u16 byte_cnt; | |
326 | u16 cqe_bcnt; | |
327 | u16 headlen; | |
328 | int i; | |
329 | ||
330 | wi->consumed_strides += cstrides; | |
331 | ||
332 | if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { | |
333 | rq->stats.wqe_err++; | |
334 | goto mpwrq_cqe_out; | |
335 | } | |
336 | ||
337 | if (unlikely(mpwrq_is_filler_cqe(cqe))) { | |
338 | rq->stats.mpwqe_filler++; | |
339 | goto mpwrq_cqe_out; | |
340 | } | |
341 | ||
342 | skb = netdev_alloc_skb(rq->netdev, | |
343 | ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, | |
344 | sizeof(long))); | |
345 | if (unlikely(!skb)) | |
346 | goto mpwrq_cqe_out; | |
347 | ||
348 | prefetch(skb->data); | |
349 | wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE; | |
350 | consumed_bytes = cstrides * MLX5_MPWRQ_STRIDE_SIZE; | |
351 | dma_sync_single_for_cpu(rq->pdev, wi->dma_info.addr + wqe_offset, | |
352 | consumed_bytes, DMA_FROM_DEVICE); | |
353 | ||
354 | head_offset = wqe_offset & (PAGE_SIZE - 1); | |
355 | page_idx = wqe_offset >> PAGE_SHIFT; | |
356 | cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); | |
357 | headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); | |
358 | frag_offset = head_offset + headlen; | |
359 | ||
360 | byte_cnt = cqe_bcnt - headlen; | |
361 | while (byte_cnt) { | |
362 | u32 pg_consumed_bytes = | |
363 | min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); | |
364 | unsigned int truesize = | |
365 | ALIGN(pg_consumed_bytes, MLX5_MPWRQ_STRIDE_SIZE); | |
366 | ||
367 | wi->skbs_frags[page_idx]++; | |
368 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
369 | &wi->dma_info.page[page_idx], frag_offset, | |
370 | pg_consumed_bytes, truesize); | |
371 | byte_cnt -= pg_consumed_bytes; | |
372 | frag_offset = 0; | |
373 | page_idx++; | |
374 | } | |
375 | ||
376 | skb_copy_to_linear_data(skb, | |
377 | page_address(wi->dma_info.page) + wqe_offset, | |
378 | ALIGN(headlen, sizeof(long))); | |
379 | /* skb linear part was allocated with headlen and aligned to long */ | |
380 | skb->tail += headlen; | |
381 | skb->len += headlen; | |
382 | ||
383 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); | |
384 | ||
385 | mpwrq_cqe_out: | |
386 | if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES)) | |
387 | return; | |
388 | ||
389 | dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, | |
390 | PCI_DMA_FROMDEVICE); | |
391 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { | |
392 | atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], | |
393 | &wi->dma_info.page[i]._count); | |
394 | put_page(&wi->dma_info.page[i]); | |
395 | } | |
396 | mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); | |
397 | } | |
398 | ||
44fb6fbb | 399 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) |
e586b3b0 | 400 | { |
e3391054 | 401 | struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); |
44fb6fbb | 402 | int work_done; |
e586b3b0 | 403 | |
44fb6fbb | 404 | for (work_done = 0; work_done < budget; work_done++) { |
2f48af12 | 405 | struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq); |
e586b3b0 | 406 | |
e586b3b0 AV |
407 | if (!cqe) |
408 | break; | |
409 | ||
a1f5a1a8 AS |
410 | mlx5_cqwq_pop(&cq->wq); |
411 | ||
2f48af12 | 412 | rq->handle_rx_cqe(rq, cqe); |
e586b3b0 AV |
413 | } |
414 | ||
415 | mlx5_cqwq_update_db_record(&cq->wq); | |
416 | ||
417 | /* ensure cq space is freed before enabling more cqes */ | |
418 | wmb(); | |
419 | ||
44fb6fbb | 420 | return work_done; |
e586b3b0 | 421 | } |