2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <net/busy_poll.h>
35 #include <linux/bpf.h>
36 #include <linux/mlx4/cq.h>
37 #include <linux/slab.h>
38 #include <linux/mlx4/qp.h>
39 #include <linux/skbuff.h>
40 #include <linux/rculist.h>
41 #include <linux/if_ether.h>
42 #include <linux/if_vlan.h>
43 #include <linux/vmalloc.h>
44 #include <linux/irq.h>
46 #if IS_ENABLED(CONFIG_IPV6)
47 #include <net/ip6_checksum.h>
52 static int mlx4_alloc_pages(struct mlx4_en_priv
*priv
,
53 struct mlx4_en_rx_alloc
*page_alloc
,
54 const struct mlx4_en_frag_info
*frag_info
,
61 for (order
= frag_info
->order
; ;) {
65 gfp
|= __GFP_COMP
| __GFP_NOWARN
| __GFP_NOMEMALLOC
;
66 page
= alloc_pages(gfp
, order
);
70 ((PAGE_SIZE
<< order
) < frag_info
->frag_size
))
73 dma
= dma_map_page(priv
->ddev
, page
, 0, PAGE_SIZE
<< order
,
75 if (dma_mapping_error(priv
->ddev
, dma
)) {
79 page_alloc
->page_size
= PAGE_SIZE
<< order
;
80 page_alloc
->page
= page
;
81 page_alloc
->dma
= dma
;
82 page_alloc
->page_offset
= 0;
83 /* Not doing get_page() for each frag is a big win
84 * on asymetric workloads. Note we can not use atomic_set().
86 page_ref_add(page
, page_alloc
->page_size
/ frag_info
->frag_stride
- 1);
90 static int mlx4_en_alloc_frags(struct mlx4_en_priv
*priv
,
91 struct mlx4_en_rx_desc
*rx_desc
,
92 struct mlx4_en_rx_alloc
*frags
,
93 struct mlx4_en_rx_alloc
*ring_alloc
,
96 struct mlx4_en_rx_alloc page_alloc
[MLX4_EN_MAX_RX_FRAGS
];
97 const struct mlx4_en_frag_info
*frag_info
;
102 for (i
= 0; i
< priv
->num_frags
; i
++) {
103 frag_info
= &priv
->frag_info
[i
];
104 page_alloc
[i
] = ring_alloc
[i
];
105 page_alloc
[i
].page_offset
+= frag_info
->frag_stride
;
107 if (page_alloc
[i
].page_offset
+ frag_info
->frag_stride
<=
108 ring_alloc
[i
].page_size
)
111 if (mlx4_alloc_pages(priv
, &page_alloc
[i
], frag_info
, gfp
))
115 for (i
= 0; i
< priv
->num_frags
; i
++) {
116 frags
[i
] = ring_alloc
[i
];
117 dma
= ring_alloc
[i
].dma
+ ring_alloc
[i
].page_offset
;
118 ring_alloc
[i
] = page_alloc
[i
];
119 rx_desc
->data
[i
].addr
= cpu_to_be64(dma
);
126 if (page_alloc
[i
].page
!= ring_alloc
[i
].page
) {
127 dma_unmap_page(priv
->ddev
, page_alloc
[i
].dma
,
128 page_alloc
[i
].page_size
,
129 priv
->frag_info
[i
].dma_dir
);
130 page
= page_alloc
[i
].page
;
131 /* Revert changes done by mlx4_alloc_pages */
132 page_ref_sub(page
, page_alloc
[i
].page_size
/
133 priv
->frag_info
[i
].frag_stride
- 1);
140 static void mlx4_en_free_frag(struct mlx4_en_priv
*priv
,
141 struct mlx4_en_rx_alloc
*frags
,
144 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
145 u32 next_frag_end
= frags
[i
].page_offset
+ 2 * frag_info
->frag_stride
;
148 if (next_frag_end
> frags
[i
].page_size
)
149 dma_unmap_page(priv
->ddev
, frags
[i
].dma
, frags
[i
].page_size
,
153 put_page(frags
[i
].page
);
156 static int mlx4_en_init_allocator(struct mlx4_en_priv
*priv
,
157 struct mlx4_en_rx_ring
*ring
)
160 struct mlx4_en_rx_alloc
*page_alloc
;
162 for (i
= 0; i
< priv
->num_frags
; i
++) {
163 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
165 if (mlx4_alloc_pages(priv
, &ring
->page_alloc
[i
],
166 frag_info
, GFP_KERNEL
| __GFP_COLD
))
169 en_dbg(DRV
, priv
, " frag %d allocator: - size:%d frags:%d\n",
170 i
, ring
->page_alloc
[i
].page_size
,
171 page_ref_count(ring
->page_alloc
[i
].page
));
179 page_alloc
= &ring
->page_alloc
[i
];
180 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
181 page_alloc
->page_size
,
182 priv
->frag_info
[i
].dma_dir
);
183 page
= page_alloc
->page
;
184 /* Revert changes done by mlx4_alloc_pages */
185 page_ref_sub(page
, page_alloc
->page_size
/
186 priv
->frag_info
[i
].frag_stride
- 1);
188 page_alloc
->page
= NULL
;
193 static void mlx4_en_destroy_allocator(struct mlx4_en_priv
*priv
,
194 struct mlx4_en_rx_ring
*ring
)
196 struct mlx4_en_rx_alloc
*page_alloc
;
199 for (i
= 0; i
< priv
->num_frags
; i
++) {
200 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
202 page_alloc
= &ring
->page_alloc
[i
];
203 en_dbg(DRV
, priv
, "Freeing allocator:%d count:%d\n",
204 i
, page_count(page_alloc
->page
));
206 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
207 page_alloc
->page_size
, frag_info
->dma_dir
);
208 while (page_alloc
->page_offset
+ frag_info
->frag_stride
<
209 page_alloc
->page_size
) {
210 put_page(page_alloc
->page
);
211 page_alloc
->page_offset
+= frag_info
->frag_stride
;
213 page_alloc
->page
= NULL
;
217 static void mlx4_en_init_rx_desc(struct mlx4_en_priv
*priv
,
218 struct mlx4_en_rx_ring
*ring
, int index
)
220 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ ring
->stride
* index
;
224 /* Set size and memtype fields */
225 for (i
= 0; i
< priv
->num_frags
; i
++) {
226 rx_desc
->data
[i
].byte_count
=
227 cpu_to_be32(priv
->frag_info
[i
].frag_size
);
228 rx_desc
->data
[i
].lkey
= cpu_to_be32(priv
->mdev
->mr
.key
);
231 /* If the number of used fragments does not fill up the ring stride,
232 * remaining (unused) fragments must be padded with null address/size
233 * and a special memory key */
234 possible_frags
= (ring
->stride
- sizeof(struct mlx4_en_rx_desc
)) / DS_SIZE
;
235 for (i
= priv
->num_frags
; i
< possible_frags
; i
++) {
236 rx_desc
->data
[i
].byte_count
= 0;
237 rx_desc
->data
[i
].lkey
= cpu_to_be32(MLX4_EN_MEMTYPE_PAD
);
238 rx_desc
->data
[i
].addr
= 0;
242 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv
*priv
,
243 struct mlx4_en_rx_ring
*ring
, int index
,
246 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ (index
* ring
->stride
);
247 struct mlx4_en_rx_alloc
*frags
= ring
->rx_info
+
248 (index
<< priv
->log_rx_info
);
250 if (ring
->page_cache
.index
> 0) {
251 frags
[0] = ring
->page_cache
.buf
[--ring
->page_cache
.index
];
252 rx_desc
->data
[0].addr
= cpu_to_be64(frags
[0].dma
);
256 return mlx4_en_alloc_frags(priv
, rx_desc
, frags
, ring
->page_alloc
, gfp
);
259 static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring
*ring
)
261 return ring
->prod
== ring
->cons
;
264 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring
*ring
)
266 *ring
->wqres
.db
.db
= cpu_to_be32(ring
->prod
& 0xffff);
269 static void mlx4_en_free_rx_desc(struct mlx4_en_priv
*priv
,
270 struct mlx4_en_rx_ring
*ring
,
273 struct mlx4_en_rx_alloc
*frags
;
276 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
277 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
278 en_dbg(DRV
, priv
, "Freeing fragment:%d\n", nr
);
279 mlx4_en_free_frag(priv
, frags
, nr
);
283 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv
*priv
)
285 struct mlx4_en_rx_ring
*ring
;
290 for (buf_ind
= 0; buf_ind
< priv
->prof
->rx_ring_size
; buf_ind
++) {
291 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
292 ring
= priv
->rx_ring
[ring_ind
];
294 if (mlx4_en_prepare_rx_desc(priv
, ring
,
296 GFP_KERNEL
| __GFP_COLD
)) {
297 if (ring
->actual_size
< MLX4_EN_MIN_RX_SIZE
) {
298 en_err(priv
, "Failed to allocate enough rx buffers\n");
301 new_size
= rounddown_pow_of_two(ring
->actual_size
);
302 en_warn(priv
, "Only %d buffers allocated reducing ring size to %d\n",
303 ring
->actual_size
, new_size
);
314 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
315 ring
= priv
->rx_ring
[ring_ind
];
316 while (ring
->actual_size
> new_size
) {
319 mlx4_en_free_rx_desc(priv
, ring
, ring
->actual_size
);
326 static void mlx4_en_free_rx_buf(struct mlx4_en_priv
*priv
,
327 struct mlx4_en_rx_ring
*ring
)
331 en_dbg(DRV
, priv
, "Freeing Rx buf - cons:%d prod:%d\n",
332 ring
->cons
, ring
->prod
);
334 /* Unmap and free Rx buffers */
335 while (!mlx4_en_is_ring_empty(ring
)) {
336 index
= ring
->cons
& ring
->size_mask
;
337 en_dbg(DRV
, priv
, "Processing descriptor:%d\n", index
);
338 mlx4_en_free_rx_desc(priv
, ring
, index
);
343 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev
*mdev
)
348 struct mlx4_dev
*dev
= mdev
->dev
;
350 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
) {
351 num_of_eqs
= max_t(int, MIN_RX_RINGS
,
353 mlx4_get_eqs_per_port(mdev
->dev
, i
),
356 num_rx_rings
= mlx4_low_memory_profile() ? MIN_RX_RINGS
:
357 min_t(int, num_of_eqs
,
358 netif_get_num_default_rss_queues());
359 mdev
->profile
.prof
[i
].rx_ring_num
=
360 rounddown_pow_of_two(num_rx_rings
);
364 int mlx4_en_create_rx_ring(struct mlx4_en_priv
*priv
,
365 struct mlx4_en_rx_ring
**pring
,
366 u32 size
, u16 stride
, int node
)
368 struct mlx4_en_dev
*mdev
= priv
->mdev
;
369 struct mlx4_en_rx_ring
*ring
;
373 ring
= kzalloc_node(sizeof(*ring
), GFP_KERNEL
, node
);
375 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
377 en_err(priv
, "Failed to allocate RX ring structure\n");
385 ring
->size_mask
= size
- 1;
386 ring
->stride
= stride
;
387 ring
->log_stride
= ffs(ring
->stride
) - 1;
388 ring
->buf_size
= ring
->size
* ring
->stride
+ TXBB_SIZE
;
390 tmp
= size
* roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS
*
391 sizeof(struct mlx4_en_rx_alloc
));
392 ring
->rx_info
= vmalloc_node(tmp
, node
);
393 if (!ring
->rx_info
) {
394 ring
->rx_info
= vmalloc(tmp
);
395 if (!ring
->rx_info
) {
401 en_dbg(DRV
, priv
, "Allocated rx_info ring at addr:%p size:%d\n",
404 /* Allocate HW buffers on provided NUMA node */
405 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, node
);
406 err
= mlx4_alloc_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
);
407 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, mdev
->dev
->numa_node
);
411 ring
->buf
= ring
->wqres
.buf
.direct
.buf
;
413 ring
->hwtstamp_rx_filter
= priv
->hwtstamp_config
.rx_filter
;
419 vfree(ring
->rx_info
);
420 ring
->rx_info
= NULL
;
428 int mlx4_en_activate_rx_rings(struct mlx4_en_priv
*priv
)
430 struct mlx4_en_rx_ring
*ring
;
434 int stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
435 DS_SIZE
* priv
->num_frags
);
437 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
438 ring
= priv
->rx_ring
[ring_ind
];
442 ring
->actual_size
= 0;
443 ring
->cqn
= priv
->rx_cq
[ring_ind
]->mcq
.cqn
;
445 ring
->stride
= stride
;
446 if (ring
->stride
<= TXBB_SIZE
)
447 ring
->buf
+= TXBB_SIZE
;
449 ring
->log_stride
= ffs(ring
->stride
) - 1;
450 ring
->buf_size
= ring
->size
* ring
->stride
;
452 memset(ring
->buf
, 0, ring
->buf_size
);
453 mlx4_en_update_rx_prod_db(ring
);
455 /* Initialize all descriptors */
456 for (i
= 0; i
< ring
->size
; i
++)
457 mlx4_en_init_rx_desc(priv
, ring
, i
);
459 /* Initialize page allocators */
460 err
= mlx4_en_init_allocator(priv
, ring
);
462 en_err(priv
, "Failed initializing ring allocator\n");
463 if (ring
->stride
<= TXBB_SIZE
)
464 ring
->buf
-= TXBB_SIZE
;
469 err
= mlx4_en_fill_rx_buffers(priv
);
473 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
474 ring
= priv
->rx_ring
[ring_ind
];
476 ring
->size_mask
= ring
->actual_size
- 1;
477 mlx4_en_update_rx_prod_db(ring
);
483 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++)
484 mlx4_en_free_rx_buf(priv
, priv
->rx_ring
[ring_ind
]);
486 ring_ind
= priv
->rx_ring_num
- 1;
488 while (ring_ind
>= 0) {
489 if (priv
->rx_ring
[ring_ind
]->stride
<= TXBB_SIZE
)
490 priv
->rx_ring
[ring_ind
]->buf
-= TXBB_SIZE
;
491 mlx4_en_destroy_allocator(priv
, priv
->rx_ring
[ring_ind
]);
497 /* We recover from out of memory by scheduling our napi poll
498 * function (mlx4_en_process_cq), which tries to allocate
499 * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
501 void mlx4_en_recover_from_oom(struct mlx4_en_priv
*priv
)
508 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
509 if (mlx4_en_is_ring_empty(priv
->rx_ring
[ring
]))
510 napi_reschedule(&priv
->rx_cq
[ring
]->napi
);
514 /* When the rx ring is running in page-per-packet mode, a released frame can go
515 * directly into a small cache, to avoid unmapping or touching the page
516 * allocator. In bpf prog performance scenarios, buffers are either forwarded
517 * or dropped, never converted to skbs, so every page can come directly from
518 * this cache when it is sized to be a multiple of the napi budget.
520 bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring
*ring
,
521 struct mlx4_en_rx_alloc
*frame
)
523 struct mlx4_en_page_cache
*cache
= &ring
->page_cache
;
525 if (cache
->index
>= MLX4_EN_CACHE_SIZE
)
528 cache
->buf
[cache
->index
++] = *frame
;
532 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv
*priv
,
533 struct mlx4_en_rx_ring
**pring
,
534 u32 size
, u16 stride
)
536 struct mlx4_en_dev
*mdev
= priv
->mdev
;
537 struct mlx4_en_rx_ring
*ring
= *pring
;
538 struct bpf_prog
*old_prog
;
540 old_prog
= READ_ONCE(ring
->xdp_prog
);
542 bpf_prog_put(old_prog
);
543 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, size
* stride
+ TXBB_SIZE
);
544 vfree(ring
->rx_info
);
545 ring
->rx_info
= NULL
;
550 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv
*priv
,
551 struct mlx4_en_rx_ring
*ring
)
555 for (i
= 0; i
< ring
->page_cache
.index
; i
++) {
556 struct mlx4_en_rx_alloc
*frame
= &ring
->page_cache
.buf
[i
];
558 dma_unmap_page(priv
->ddev
, frame
->dma
, frame
->page_size
,
559 priv
->frag_info
[0].dma_dir
);
560 put_page(frame
->page
);
562 ring
->page_cache
.index
= 0;
563 mlx4_en_free_rx_buf(priv
, ring
);
564 if (ring
->stride
<= TXBB_SIZE
)
565 ring
->buf
-= TXBB_SIZE
;
566 mlx4_en_destroy_allocator(priv
, ring
);
570 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv
*priv
,
571 struct mlx4_en_rx_desc
*rx_desc
,
572 struct mlx4_en_rx_alloc
*frags
,
576 struct skb_frag_struct
*skb_frags_rx
= skb_shinfo(skb
)->frags
;
577 struct mlx4_en_frag_info
*frag_info
;
581 /* Collect used fragments while replacing them in the HW descriptors */
582 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
583 frag_info
= &priv
->frag_info
[nr
];
584 if (length
<= frag_info
->frag_prefix_size
)
589 dma
= be64_to_cpu(rx_desc
->data
[nr
].addr
);
590 dma_sync_single_for_cpu(priv
->ddev
, dma
, frag_info
->frag_size
,
593 /* Save page reference in skb */
594 __skb_frag_set_page(&skb_frags_rx
[nr
], frags
[nr
].page
);
595 skb_frag_size_set(&skb_frags_rx
[nr
], frag_info
->frag_size
);
596 skb_frags_rx
[nr
].page_offset
= frags
[nr
].page_offset
;
597 skb
->truesize
+= frag_info
->frag_stride
;
598 frags
[nr
].page
= NULL
;
600 /* Adjust size of last fragment to match actual length */
602 skb_frag_size_set(&skb_frags_rx
[nr
- 1],
603 length
- priv
->frag_info
[nr
- 1].frag_prefix_size
);
609 __skb_frag_unref(&skb_frags_rx
[nr
]);
615 static struct sk_buff
*mlx4_en_rx_skb(struct mlx4_en_priv
*priv
,
616 struct mlx4_en_rx_desc
*rx_desc
,
617 struct mlx4_en_rx_alloc
*frags
,
625 skb
= netdev_alloc_skb(priv
->dev
, SMALL_PACKET_SIZE
+ NET_IP_ALIGN
);
627 en_dbg(RX_ERR
, priv
, "Failed allocating skb\n");
630 skb_reserve(skb
, NET_IP_ALIGN
);
633 /* Get pointer to first fragment so we could copy the headers into the
634 * (linear part of the) skb */
635 va
= page_address(frags
[0].page
) + frags
[0].page_offset
;
637 if (length
<= SMALL_PACKET_SIZE
) {
638 /* We are copying all relevant data to the skb - temporarily
639 * sync buffers for the copy */
640 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
641 dma_sync_single_for_cpu(priv
->ddev
, dma
, length
,
643 skb_copy_to_linear_data(skb
, va
, length
);
646 unsigned int pull_len
;
648 /* Move relevant fragments to skb */
649 used_frags
= mlx4_en_complete_rx_desc(priv
, rx_desc
, frags
,
651 if (unlikely(!used_frags
)) {
655 skb_shinfo(skb
)->nr_frags
= used_frags
;
657 pull_len
= eth_get_headlen(va
, SMALL_PACKET_SIZE
);
658 /* Copy headers into the skb linear buffer */
659 memcpy(skb
->data
, va
, pull_len
);
660 skb
->tail
+= pull_len
;
662 /* Skip headers in first fragment */
663 skb_shinfo(skb
)->frags
[0].page_offset
+= pull_len
;
665 /* Adjust size of first fragment */
666 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[0], pull_len
);
667 skb
->data_len
= length
- pull_len
;
672 static void validate_loopback(struct mlx4_en_priv
*priv
, struct sk_buff
*skb
)
675 int offset
= ETH_HLEN
;
677 for (i
= 0; i
< MLX4_LOOPBACK_TEST_PAYLOAD
; i
++, offset
++) {
678 if (*(skb
->data
+ offset
) != (unsigned char) (i
& 0xff))
682 priv
->loopback_ok
= 1;
685 dev_kfree_skb_any(skb
);
688 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv
*priv
,
689 struct mlx4_en_rx_ring
*ring
)
691 int index
= ring
->prod
& ring
->size_mask
;
693 while ((u32
) (ring
->prod
- ring
->cons
) < ring
->actual_size
) {
694 if (mlx4_en_prepare_rx_desc(priv
, ring
, index
,
695 GFP_ATOMIC
| __GFP_COLD
))
698 index
= ring
->prod
& ring
->size_mask
;
702 /* When hardware doesn't strip the vlan, we need to calculate the checksum
703 * over it and add it to the hardware's checksum calculation
705 static inline __wsum
get_fixed_vlan_csum(__wsum hw_checksum
,
706 struct vlan_hdr
*vlanh
)
708 return csum_add(hw_checksum
, *(__wsum
*)vlanh
);
711 /* Although the stack expects checksum which doesn't include the pseudo
712 * header, the HW adds it. To address that, we are subtracting the pseudo
713 * header checksum from the checksum value provided by the HW.
715 static void get_fixed_ipv4_csum(__wsum hw_checksum
, struct sk_buff
*skb
,
718 __u16 length_for_csum
= 0;
719 __wsum csum_pseudo_header
= 0;
721 length_for_csum
= (be16_to_cpu(iph
->tot_len
) - (iph
->ihl
<< 2));
722 csum_pseudo_header
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
723 length_for_csum
, iph
->protocol
, 0);
724 skb
->csum
= csum_sub(hw_checksum
, csum_pseudo_header
);
727 #if IS_ENABLED(CONFIG_IPV6)
728 /* In IPv6 packets, besides subtracting the pseudo header checksum,
729 * we also compute/add the IP header checksum which
730 * is not added by the HW.
732 static int get_fixed_ipv6_csum(__wsum hw_checksum
, struct sk_buff
*skb
,
733 struct ipv6hdr
*ipv6h
)
735 __wsum csum_pseudo_hdr
= 0;
737 if (ipv6h
->nexthdr
== IPPROTO_FRAGMENT
|| ipv6h
->nexthdr
== IPPROTO_HOPOPTS
)
739 hw_checksum
= csum_add(hw_checksum
, (__force __wsum
)htons(ipv6h
->nexthdr
));
741 csum_pseudo_hdr
= csum_partial(&ipv6h
->saddr
,
742 sizeof(ipv6h
->saddr
) + sizeof(ipv6h
->daddr
), 0);
743 csum_pseudo_hdr
= csum_add(csum_pseudo_hdr
, (__force __wsum
)ipv6h
->payload_len
);
744 csum_pseudo_hdr
= csum_add(csum_pseudo_hdr
, (__force __wsum
)ntohs(ipv6h
->nexthdr
));
746 skb
->csum
= csum_sub(hw_checksum
, csum_pseudo_hdr
);
747 skb
->csum
= csum_add(skb
->csum
, csum_partial(ipv6h
, sizeof(struct ipv6hdr
), 0));
751 static int check_csum(struct mlx4_cqe
*cqe
, struct sk_buff
*skb
, void *va
,
752 netdev_features_t dev_features
)
754 __wsum hw_checksum
= 0;
756 void *hdr
= (u8
*)va
+ sizeof(struct ethhdr
);
758 hw_checksum
= csum_unfold((__force __sum16
)cqe
->checksum
);
760 if (cqe
->vlan_my_qpn
& cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK
) &&
761 !(dev_features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
762 hw_checksum
= get_fixed_vlan_csum(hw_checksum
, hdr
);
763 hdr
+= sizeof(struct vlan_hdr
);
766 if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
))
767 get_fixed_ipv4_csum(hw_checksum
, skb
, hdr
);
768 #if IS_ENABLED(CONFIG_IPV6)
769 else if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV6
))
770 if (get_fixed_ipv6_csum(hw_checksum
, skb
, hdr
))
776 int mlx4_en_process_rx_cq(struct net_device
*dev
, struct mlx4_en_cq
*cq
, int budget
)
778 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
779 struct mlx4_en_dev
*mdev
= priv
->mdev
;
780 struct mlx4_cqe
*cqe
;
781 struct mlx4_en_rx_ring
*ring
= priv
->rx_ring
[cq
->ring
];
782 struct mlx4_en_rx_alloc
*frags
;
783 struct mlx4_en_rx_desc
*rx_desc
;
784 struct bpf_prog
*xdp_prog
;
785 int doorbell_pending
;
793 int factor
= priv
->cqe_factor
;
803 xdp_prog
= READ_ONCE(ring
->xdp_prog
);
804 doorbell_pending
= 0;
805 tx_index
= (priv
->tx_ring_num
- priv
->xdp_ring_num
) + cq
->ring
;
807 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
808 * descriptor offset can be deduced from the CQE index instead of
809 * reading 'cqe->index' */
810 index
= cq
->mcq
.cons_index
& ring
->size_mask
;
811 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
813 /* Process all completed CQEs */
814 while (XNOR(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
,
815 cq
->mcq
.cons_index
& cq
->size
)) {
817 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
818 rx_desc
= ring
->buf
+ (index
<< ring
->log_stride
);
821 * make sure we read the CQE after we read the ownership bit
825 /* Drop packet on bad receive or bad checksum */
826 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
827 MLX4_CQE_OPCODE_ERROR
)) {
828 en_err(priv
, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
829 ((struct mlx4_err_cqe
*)cqe
)->vendor_err_syndrome
,
830 ((struct mlx4_err_cqe
*)cqe
)->syndrome
);
833 if (unlikely(cqe
->badfcs_enc
& MLX4_CQE_BAD_FCS
)) {
834 en_dbg(RX_ERR
, priv
, "Accepted frame with bad FCS\n");
838 /* Check if we need to drop the packet if SRIOV is not enabled
839 * and not performing the selftest or flb disabled
841 if (priv
->flags
& MLX4_EN_FLAG_RX_FILTER_NEEDED
) {
844 /* Get pointer to first fragment since we haven't
845 * skb yet and cast it to ethhdr struct
847 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
848 dma_sync_single_for_cpu(priv
->ddev
, dma
, sizeof(*ethh
),
850 ethh
= (struct ethhdr
*)(page_address(frags
[0].page
) +
851 frags
[0].page_offset
);
853 if (is_multicast_ether_addr(ethh
->h_dest
)) {
854 struct mlx4_mac_entry
*entry
;
855 struct hlist_head
*bucket
;
856 unsigned int mac_hash
;
858 /* Drop the packet, since HW loopback-ed it */
859 mac_hash
= ethh
->h_source
[MLX4_EN_MAC_HASH_IDX
];
860 bucket
= &priv
->mac_hash
[mac_hash
];
862 hlist_for_each_entry_rcu(entry
, bucket
, hlist
) {
863 if (ether_addr_equal_64bits(entry
->mac
,
874 * Packet is OK - process it.
876 length
= be32_to_cpu(cqe
->byte_cnt
);
877 length
-= ring
->fcs_del
;
878 ring
->bytes
+= length
;
880 l2_tunnel
= (dev
->hw_enc_features
& NETIF_F_RXCSUM
) &&
881 (cqe
->vlan_my_qpn
& cpu_to_be32(MLX4_CQE_L2_TUNNEL
));
883 /* A bpf program gets first chance to drop the packet. It may
884 * read bytes but not past the end of the frag.
891 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
892 dma_sync_single_for_cpu(priv
->ddev
, dma
,
893 priv
->frag_info
[0].frag_size
,
896 xdp
.data
= page_address(frags
[0].page
) +
897 frags
[0].page_offset
;
898 xdp
.data_end
= xdp
.data
+ length
;
900 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
905 if (!mlx4_en_xmit_frame(frags
, dev
,
911 bpf_warn_invalid_xdp_action(act
);
914 if (mlx4_en_rx_recycle(ring
, frags
))
920 if (likely(dev
->features
& NETIF_F_RXCSUM
)) {
921 if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_TCP
|
922 MLX4_CQE_STATUS_UDP
)) {
923 if ((cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPOK
)) &&
924 cqe
->checksum
== cpu_to_be16(0xffff)) {
925 ip_summed
= CHECKSUM_UNNECESSARY
;
928 ip_summed
= CHECKSUM_NONE
;
932 if (priv
->flags
& MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP
&&
933 (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
934 MLX4_CQE_STATUS_IPV6
))) {
935 ip_summed
= CHECKSUM_COMPLETE
;
936 ring
->csum_complete
++;
938 ip_summed
= CHECKSUM_NONE
;
943 ip_summed
= CHECKSUM_NONE
;
947 /* This packet is eligible for GRO if it is:
948 * - DIX Ethernet (type interpretation)
950 * - without IP options
951 * - not an IP fragment
953 if (dev
->features
& NETIF_F_GRO
) {
954 struct sk_buff
*gro_skb
= napi_get_frags(&cq
->napi
);
958 nr
= mlx4_en_complete_rx_desc(priv
,
959 rx_desc
, frags
, gro_skb
,
964 if (ip_summed
== CHECKSUM_COMPLETE
) {
965 void *va
= skb_frag_address(skb_shinfo(gro_skb
)->frags
);
966 if (check_csum(cqe
, gro_skb
, va
,
968 ip_summed
= CHECKSUM_NONE
;
970 ring
->csum_complete
--;
974 skb_shinfo(gro_skb
)->nr_frags
= nr
;
975 gro_skb
->len
= length
;
976 gro_skb
->data_len
= length
;
977 gro_skb
->ip_summed
= ip_summed
;
979 if (l2_tunnel
&& ip_summed
== CHECKSUM_UNNECESSARY
)
980 gro_skb
->csum_level
= 1;
982 if ((cqe
->vlan_my_qpn
&
983 cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK
)) &&
984 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
985 u16 vid
= be16_to_cpu(cqe
->sl_vid
);
987 __vlan_hwaccel_put_tag(gro_skb
, htons(ETH_P_8021Q
), vid
);
988 } else if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
989 MLX4_CQE_SVLAN_PRESENT_MASK
) &&
990 (dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
991 __vlan_hwaccel_put_tag(gro_skb
,
993 be16_to_cpu(cqe
->sl_vid
));
996 if (dev
->features
& NETIF_F_RXHASH
)
997 skb_set_hash(gro_skb
,
998 be32_to_cpu(cqe
->immed_rss_invalid
),
999 (ip_summed
== CHECKSUM_UNNECESSARY
) ?
1003 skb_record_rx_queue(gro_skb
, cq
->ring
);
1005 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
1006 timestamp
= mlx4_en_get_cqe_ts(cqe
);
1007 mlx4_en_fill_hwtstamps(mdev
,
1008 skb_hwtstamps(gro_skb
),
1012 napi_gro_frags(&cq
->napi
);
1016 /* GRO not possible, complete processing here */
1017 skb
= mlx4_en_rx_skb(priv
, rx_desc
, frags
, length
);
1023 if (unlikely(priv
->validate_loopback
)) {
1024 validate_loopback(priv
, skb
);
1028 if (ip_summed
== CHECKSUM_COMPLETE
) {
1029 if (check_csum(cqe
, skb
, skb
->data
, dev
->features
)) {
1030 ip_summed
= CHECKSUM_NONE
;
1031 ring
->csum_complete
--;
1036 skb
->ip_summed
= ip_summed
;
1037 skb
->protocol
= eth_type_trans(skb
, dev
);
1038 skb_record_rx_queue(skb
, cq
->ring
);
1040 if (l2_tunnel
&& ip_summed
== CHECKSUM_UNNECESSARY
)
1041 skb
->csum_level
= 1;
1043 if (dev
->features
& NETIF_F_RXHASH
)
1045 be32_to_cpu(cqe
->immed_rss_invalid
),
1046 (ip_summed
== CHECKSUM_UNNECESSARY
) ?
1050 if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
1051 MLX4_CQE_CVLAN_PRESENT_MASK
) &&
1052 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
1053 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), be16_to_cpu(cqe
->sl_vid
));
1054 else if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
1055 MLX4_CQE_SVLAN_PRESENT_MASK
) &&
1056 (dev
->features
& NETIF_F_HW_VLAN_STAG_RX
))
1057 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021AD
),
1058 be16_to_cpu(cqe
->sl_vid
));
1060 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
1061 timestamp
= mlx4_en_get_cqe_ts(cqe
);
1062 mlx4_en_fill_hwtstamps(mdev
, skb_hwtstamps(skb
),
1066 napi_gro_receive(&cq
->napi
, skb
);
1068 for (nr
= 0; nr
< priv
->num_frags
; nr
++)
1069 mlx4_en_free_frag(priv
, frags
, nr
);
1072 ++cq
->mcq
.cons_index
;
1073 index
= (cq
->mcq
.cons_index
) & ring
->size_mask
;
1074 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
1075 if (++polled
== budget
)
1080 if (doorbell_pending
)
1081 mlx4_en_xmit_doorbell(priv
->tx_ring
[tx_index
]);
1083 AVG_PERF_COUNTER(priv
->pstats
.rx_coal_avg
, polled
);
1084 mlx4_cq_set_ci(&cq
->mcq
);
1085 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1086 ring
->cons
= cq
->mcq
.cons_index
;
1087 mlx4_en_refill_rx_buffers(priv
, ring
);
1088 mlx4_en_update_rx_prod_db(ring
);
1093 void mlx4_en_rx_irq(struct mlx4_cq
*mcq
)
1095 struct mlx4_en_cq
*cq
= container_of(mcq
, struct mlx4_en_cq
, mcq
);
1096 struct mlx4_en_priv
*priv
= netdev_priv(cq
->dev
);
1098 if (likely(priv
->port_up
))
1099 napi_schedule_irqoff(&cq
->napi
);
1101 mlx4_en_arm_cq(priv
, cq
);
1104 /* Rx CQ polling - called by NAPI */
1105 int mlx4_en_poll_rx_cq(struct napi_struct
*napi
, int budget
)
1107 struct mlx4_en_cq
*cq
= container_of(napi
, struct mlx4_en_cq
, napi
);
1108 struct net_device
*dev
= cq
->dev
;
1109 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1112 done
= mlx4_en_process_rx_cq(dev
, cq
, budget
);
1114 /* If we used up all the quota - we're probably not done yet... */
1115 if (done
== budget
) {
1116 const struct cpumask
*aff
;
1117 struct irq_data
*idata
;
1120 INC_PERF_COUNTER(priv
->pstats
.napi_quota
);
1122 cpu_curr
= smp_processor_id();
1123 idata
= irq_desc_get_irq_data(cq
->irq_desc
);
1124 aff
= irq_data_get_affinity_mask(idata
);
1126 if (likely(cpumask_test_cpu(cpu_curr
, aff
)))
1129 /* Current cpu is not according to smp_irq_affinity -
1130 * probably affinity changed. need to stop this NAPI
1131 * poll, and restart it on the right CPU
1136 napi_complete_done(napi
, done
);
1137 mlx4_en_arm_cq(priv
, cq
);
1141 static const int frag_sizes
[] = {
1148 void mlx4_en_calc_rx_buf(struct net_device
*dev
)
1150 enum dma_data_direction dma_dir
= PCI_DMA_FROMDEVICE
;
1151 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1152 int eff_mtu
= MLX4_EN_EFF_MTU(dev
->mtu
);
1153 int order
= MLX4_EN_ALLOC_PREFER_ORDER
;
1154 u32 align
= SMP_CACHE_BYTES
;
1158 /* bpf requires buffers to be set up as 1 packet per page.
1159 * This only works when num_frags == 1.
1161 if (priv
->xdp_ring_num
) {
1162 dma_dir
= PCI_DMA_BIDIRECTIONAL
;
1163 /* This will gain efficient xdp frame recycling at the expense
1164 * of more costly truesize accounting
1170 while (buf_size
< eff_mtu
) {
1171 priv
->frag_info
[i
].order
= order
;
1172 priv
->frag_info
[i
].frag_size
=
1173 (eff_mtu
> buf_size
+ frag_sizes
[i
]) ?
1174 frag_sizes
[i
] : eff_mtu
- buf_size
;
1175 priv
->frag_info
[i
].frag_prefix_size
= buf_size
;
1176 priv
->frag_info
[i
].frag_stride
=
1177 ALIGN(priv
->frag_info
[i
].frag_size
, align
);
1178 priv
->frag_info
[i
].dma_dir
= dma_dir
;
1179 buf_size
+= priv
->frag_info
[i
].frag_size
;
1183 priv
->num_frags
= i
;
1184 priv
->rx_skb_size
= eff_mtu
;
1185 priv
->log_rx_info
= ROUNDUP_LOG2(i
* sizeof(struct mlx4_en_rx_alloc
));
1187 en_dbg(DRV
, priv
, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
1188 eff_mtu
, priv
->num_frags
);
1189 for (i
= 0; i
< priv
->num_frags
; i
++) {
1191 " frag:%d - size:%d prefix:%d stride:%d\n",
1193 priv
->frag_info
[i
].frag_size
,
1194 priv
->frag_info
[i
].frag_prefix_size
,
1195 priv
->frag_info
[i
].frag_stride
);
1199 /* RSS related functions */
1201 static int mlx4_en_config_rss_qp(struct mlx4_en_priv
*priv
, int qpn
,
1202 struct mlx4_en_rx_ring
*ring
,
1203 enum mlx4_qp_state
*state
,
1206 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1207 struct mlx4_qp_context
*context
;
1210 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
1214 err
= mlx4_qp_alloc(mdev
->dev
, qpn
, qp
, GFP_KERNEL
);
1216 en_err(priv
, "Failed to allocate qp #%x\n", qpn
);
1219 qp
->event
= mlx4_en_sqp_event
;
1221 memset(context
, 0, sizeof *context
);
1222 mlx4_en_fill_qp_context(priv
, ring
->actual_size
, ring
->stride
, 0, 0,
1223 qpn
, ring
->cqn
, -1, context
);
1224 context
->db_rec_addr
= cpu_to_be64(ring
->wqres
.db
.dma
);
1226 /* Cancel FCS removal if FW allows */
1227 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
) {
1228 context
->param3
|= cpu_to_be32(1 << 29);
1229 if (priv
->dev
->features
& NETIF_F_RXFCS
)
1232 ring
->fcs_del
= ETH_FCS_LEN
;
1236 err
= mlx4_qp_to_ready(mdev
->dev
, &ring
->wqres
.mtt
, context
, qp
, state
);
1238 mlx4_qp_remove(mdev
->dev
, qp
);
1239 mlx4_qp_free(mdev
->dev
, qp
);
1241 mlx4_en_update_rx_prod_db(ring
);
1247 int mlx4_en_create_drop_qp(struct mlx4_en_priv
*priv
)
1252 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, 1, 1, &qpn
,
1253 MLX4_RESERVE_A0_QP
);
1255 en_err(priv
, "Failed reserving drop qpn\n");
1258 err
= mlx4_qp_alloc(priv
->mdev
->dev
, qpn
, &priv
->drop_qp
, GFP_KERNEL
);
1260 en_err(priv
, "Failed allocating drop qp\n");
1261 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1268 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv
*priv
)
1272 qpn
= priv
->drop_qp
.qpn
;
1273 mlx4_qp_remove(priv
->mdev
->dev
, &priv
->drop_qp
);
1274 mlx4_qp_free(priv
->mdev
->dev
, &priv
->drop_qp
);
1275 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1278 /* Allocate rx qp's and configure them according to rss map */
1279 int mlx4_en_config_rss_steer(struct mlx4_en_priv
*priv
)
1281 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1282 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1283 struct mlx4_qp_context context
;
1284 struct mlx4_rss_context
*rss_context
;
1287 u8 rss_mask
= (MLX4_RSS_IPV4
| MLX4_RSS_TCP_IPV4
| MLX4_RSS_IPV6
|
1293 en_dbg(DRV
, priv
, "Configuring rss steering\n");
1294 err
= mlx4_qp_reserve_range(mdev
->dev
, priv
->rx_ring_num
,
1296 &rss_map
->base_qpn
, 0);
1298 en_err(priv
, "Failed reserving %d qps\n", priv
->rx_ring_num
);
1302 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1303 qpn
= rss_map
->base_qpn
+ i
;
1304 err
= mlx4_en_config_rss_qp(priv
, qpn
, priv
->rx_ring
[i
],
1313 /* Configure RSS indirection qp */
1314 err
= mlx4_qp_alloc(mdev
->dev
, priv
->base_qpn
, &rss_map
->indir_qp
, GFP_KERNEL
);
1316 en_err(priv
, "Failed to allocate RSS indirection QP\n");
1319 rss_map
->indir_qp
.event
= mlx4_en_sqp_event
;
1320 mlx4_en_fill_qp_context(priv
, 0, 0, 0, 1, priv
->base_qpn
,
1321 priv
->rx_ring
[0]->cqn
, -1, &context
);
1323 if (!priv
->prof
->rss_rings
|| priv
->prof
->rss_rings
> priv
->rx_ring_num
)
1324 rss_rings
= priv
->rx_ring_num
;
1326 rss_rings
= priv
->prof
->rss_rings
;
1328 ptr
= ((void *) &context
) + offsetof(struct mlx4_qp_context
, pri_path
)
1329 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
1331 rss_context
->base_qpn
= cpu_to_be32(ilog2(rss_rings
) << 24 |
1332 (rss_map
->base_qpn
));
1333 rss_context
->default_qpn
= cpu_to_be32(rss_map
->base_qpn
);
1334 if (priv
->mdev
->profile
.udp_rss
) {
1335 rss_mask
|= MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
;
1336 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
1339 if (mdev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
1340 en_info(priv
, "Setting RSS context tunnel type to RSS on inner headers\n");
1341 rss_mask
|= MLX4_RSS_BY_INNER_HEADERS
;
1344 rss_context
->flags
= rss_mask
;
1345 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1346 if (priv
->rss_hash_fn
== ETH_RSS_HASH_XOR
) {
1347 rss_context
->hash_fn
= MLX4_RSS_HASH_XOR
;
1348 } else if (priv
->rss_hash_fn
== ETH_RSS_HASH_TOP
) {
1349 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1350 memcpy(rss_context
->rss_key
, priv
->rss_key
,
1351 MLX4_EN_RSS_KEY_SIZE
);
1353 en_err(priv
, "Unknown RSS hash function requested\n");
1357 err
= mlx4_qp_to_ready(mdev
->dev
, &priv
->res
.mtt
, &context
,
1358 &rss_map
->indir_qp
, &rss_map
->indir_state
);
1365 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1366 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1367 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1368 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1370 for (i
= 0; i
< good_qps
; i
++) {
1371 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1372 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1373 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1374 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1376 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);
1380 void mlx4_en_release_rss_steer(struct mlx4_en_priv
*priv
)
1382 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1383 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1386 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1387 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1388 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1389 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1391 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1392 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1393 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1394 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1395 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1397 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);