2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <net/busy_poll.h>
35 #include <linux/mlx4/cq.h>
36 #include <linux/slab.h>
37 #include <linux/mlx4/qp.h>
38 #include <linux/skbuff.h>
39 #include <linux/rculist.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/vmalloc.h>
43 #include <linux/irq.h>
45 #if IS_ENABLED(CONFIG_IPV6)
46 #include <net/ip6_checksum.h>
51 static int mlx4_alloc_pages(struct mlx4_en_priv
*priv
,
52 struct mlx4_en_rx_alloc
*page_alloc
,
53 const struct mlx4_en_frag_info
*frag_info
,
60 for (order
= MLX4_EN_ALLOC_PREFER_ORDER
; ;) {
64 gfp
|= __GFP_COMP
| __GFP_NOWARN
;
65 page
= alloc_pages(gfp
, order
);
69 ((PAGE_SIZE
<< order
) < frag_info
->frag_size
))
72 dma
= dma_map_page(priv
->ddev
, page
, 0, PAGE_SIZE
<< order
,
74 if (dma_mapping_error(priv
->ddev
, dma
)) {
78 page_alloc
->page_size
= PAGE_SIZE
<< order
;
79 page_alloc
->page
= page
;
80 page_alloc
->dma
= dma
;
81 page_alloc
->page_offset
= 0;
82 /* Not doing get_page() for each frag is a big win
83 * on asymetric workloads. Note we can not use atomic_set().
85 atomic_add(page_alloc
->page_size
/ frag_info
->frag_stride
- 1,
90 static int mlx4_en_alloc_frags(struct mlx4_en_priv
*priv
,
91 struct mlx4_en_rx_desc
*rx_desc
,
92 struct mlx4_en_rx_alloc
*frags
,
93 struct mlx4_en_rx_alloc
*ring_alloc
,
96 struct mlx4_en_rx_alloc page_alloc
[MLX4_EN_MAX_RX_FRAGS
];
97 const struct mlx4_en_frag_info
*frag_info
;
102 for (i
= 0; i
< priv
->num_frags
; i
++) {
103 frag_info
= &priv
->frag_info
[i
];
104 page_alloc
[i
] = ring_alloc
[i
];
105 page_alloc
[i
].page_offset
+= frag_info
->frag_stride
;
107 if (page_alloc
[i
].page_offset
+ frag_info
->frag_stride
<=
108 ring_alloc
[i
].page_size
)
111 if (mlx4_alloc_pages(priv
, &page_alloc
[i
], frag_info
, gfp
))
115 for (i
= 0; i
< priv
->num_frags
; i
++) {
116 frags
[i
] = ring_alloc
[i
];
117 dma
= ring_alloc
[i
].dma
+ ring_alloc
[i
].page_offset
;
118 ring_alloc
[i
] = page_alloc
[i
];
119 rx_desc
->data
[i
].addr
= cpu_to_be64(dma
);
126 if (page_alloc
[i
].page
!= ring_alloc
[i
].page
) {
127 dma_unmap_page(priv
->ddev
, page_alloc
[i
].dma
,
128 page_alloc
[i
].page_size
, PCI_DMA_FROMDEVICE
);
129 page
= page_alloc
[i
].page
;
130 atomic_set(&page
->_count
, 1);
137 static void mlx4_en_free_frag(struct mlx4_en_priv
*priv
,
138 struct mlx4_en_rx_alloc
*frags
,
141 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
142 u32 next_frag_end
= frags
[i
].page_offset
+ 2 * frag_info
->frag_stride
;
145 if (next_frag_end
> frags
[i
].page_size
)
146 dma_unmap_page(priv
->ddev
, frags
[i
].dma
, frags
[i
].page_size
,
150 put_page(frags
[i
].page
);
153 static int mlx4_en_init_allocator(struct mlx4_en_priv
*priv
,
154 struct mlx4_en_rx_ring
*ring
)
157 struct mlx4_en_rx_alloc
*page_alloc
;
159 for (i
= 0; i
< priv
->num_frags
; i
++) {
160 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
162 if (mlx4_alloc_pages(priv
, &ring
->page_alloc
[i
],
163 frag_info
, GFP_KERNEL
| __GFP_COLD
))
166 en_dbg(DRV
, priv
, " frag %d allocator: - size:%d frags:%d\n",
167 i
, ring
->page_alloc
[i
].page_size
,
168 atomic_read(&ring
->page_alloc
[i
].page
->_count
));
176 page_alloc
= &ring
->page_alloc
[i
];
177 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
178 page_alloc
->page_size
, PCI_DMA_FROMDEVICE
);
179 page
= page_alloc
->page
;
180 atomic_set(&page
->_count
, 1);
182 page_alloc
->page
= NULL
;
187 static void mlx4_en_destroy_allocator(struct mlx4_en_priv
*priv
,
188 struct mlx4_en_rx_ring
*ring
)
190 struct mlx4_en_rx_alloc
*page_alloc
;
193 for (i
= 0; i
< priv
->num_frags
; i
++) {
194 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
196 page_alloc
= &ring
->page_alloc
[i
];
197 en_dbg(DRV
, priv
, "Freeing allocator:%d count:%d\n",
198 i
, page_count(page_alloc
->page
));
200 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
201 page_alloc
->page_size
, PCI_DMA_FROMDEVICE
);
202 while (page_alloc
->page_offset
+ frag_info
->frag_stride
<
203 page_alloc
->page_size
) {
204 put_page(page_alloc
->page
);
205 page_alloc
->page_offset
+= frag_info
->frag_stride
;
207 page_alloc
->page
= NULL
;
211 static void mlx4_en_init_rx_desc(struct mlx4_en_priv
*priv
,
212 struct mlx4_en_rx_ring
*ring
, int index
)
214 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ ring
->stride
* index
;
218 /* Set size and memtype fields */
219 for (i
= 0; i
< priv
->num_frags
; i
++) {
220 rx_desc
->data
[i
].byte_count
=
221 cpu_to_be32(priv
->frag_info
[i
].frag_size
);
222 rx_desc
->data
[i
].lkey
= cpu_to_be32(priv
->mdev
->mr
.key
);
225 /* If the number of used fragments does not fill up the ring stride,
226 * remaining (unused) fragments must be padded with null address/size
227 * and a special memory key */
228 possible_frags
= (ring
->stride
- sizeof(struct mlx4_en_rx_desc
)) / DS_SIZE
;
229 for (i
= priv
->num_frags
; i
< possible_frags
; i
++) {
230 rx_desc
->data
[i
].byte_count
= 0;
231 rx_desc
->data
[i
].lkey
= cpu_to_be32(MLX4_EN_MEMTYPE_PAD
);
232 rx_desc
->data
[i
].addr
= 0;
236 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv
*priv
,
237 struct mlx4_en_rx_ring
*ring
, int index
,
240 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ (index
* ring
->stride
);
241 struct mlx4_en_rx_alloc
*frags
= ring
->rx_info
+
242 (index
<< priv
->log_rx_info
);
244 return mlx4_en_alloc_frags(priv
, rx_desc
, frags
, ring
->page_alloc
, gfp
);
247 static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring
*ring
)
249 return ring
->prod
== ring
->cons
;
252 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring
*ring
)
254 *ring
->wqres
.db
.db
= cpu_to_be32(ring
->prod
& 0xffff);
257 static void mlx4_en_free_rx_desc(struct mlx4_en_priv
*priv
,
258 struct mlx4_en_rx_ring
*ring
,
261 struct mlx4_en_rx_alloc
*frags
;
264 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
265 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
266 en_dbg(DRV
, priv
, "Freeing fragment:%d\n", nr
);
267 mlx4_en_free_frag(priv
, frags
, nr
);
271 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv
*priv
)
273 struct mlx4_en_rx_ring
*ring
;
278 for (buf_ind
= 0; buf_ind
< priv
->prof
->rx_ring_size
; buf_ind
++) {
279 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
280 ring
= priv
->rx_ring
[ring_ind
];
282 if (mlx4_en_prepare_rx_desc(priv
, ring
,
284 GFP_KERNEL
| __GFP_COLD
)) {
285 if (ring
->actual_size
< MLX4_EN_MIN_RX_SIZE
) {
286 en_err(priv
, "Failed to allocate enough rx buffers\n");
289 new_size
= rounddown_pow_of_two(ring
->actual_size
);
290 en_warn(priv
, "Only %d buffers allocated reducing ring size to %d\n",
291 ring
->actual_size
, new_size
);
302 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
303 ring
= priv
->rx_ring
[ring_ind
];
304 while (ring
->actual_size
> new_size
) {
307 mlx4_en_free_rx_desc(priv
, ring
, ring
->actual_size
);
314 static void mlx4_en_free_rx_buf(struct mlx4_en_priv
*priv
,
315 struct mlx4_en_rx_ring
*ring
)
319 en_dbg(DRV
, priv
, "Freeing Rx buf - cons:%d prod:%d\n",
320 ring
->cons
, ring
->prod
);
322 /* Unmap and free Rx buffers */
323 while (!mlx4_en_is_ring_empty(ring
)) {
324 index
= ring
->cons
& ring
->size_mask
;
325 en_dbg(DRV
, priv
, "Processing descriptor:%d\n", index
);
326 mlx4_en_free_rx_desc(priv
, ring
, index
);
331 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev
*mdev
)
336 struct mlx4_dev
*dev
= mdev
->dev
;
338 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
) {
339 num_of_eqs
= max_t(int, MIN_RX_RINGS
,
341 mlx4_get_eqs_per_port(mdev
->dev
, i
),
344 num_rx_rings
= mlx4_low_memory_profile() ? MIN_RX_RINGS
:
345 min_t(int, num_of_eqs
,
346 netif_get_num_default_rss_queues());
347 mdev
->profile
.prof
[i
].rx_ring_num
=
348 rounddown_pow_of_two(num_rx_rings
);
352 int mlx4_en_create_rx_ring(struct mlx4_en_priv
*priv
,
353 struct mlx4_en_rx_ring
**pring
,
354 u32 size
, u16 stride
, int node
)
356 struct mlx4_en_dev
*mdev
= priv
->mdev
;
357 struct mlx4_en_rx_ring
*ring
;
361 ring
= kzalloc_node(sizeof(*ring
), GFP_KERNEL
, node
);
363 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
365 en_err(priv
, "Failed to allocate RX ring structure\n");
373 ring
->size_mask
= size
- 1;
374 ring
->stride
= stride
;
375 ring
->log_stride
= ffs(ring
->stride
) - 1;
376 ring
->buf_size
= ring
->size
* ring
->stride
+ TXBB_SIZE
;
378 tmp
= size
* roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS
*
379 sizeof(struct mlx4_en_rx_alloc
));
380 ring
->rx_info
= vmalloc_node(tmp
, node
);
381 if (!ring
->rx_info
) {
382 ring
->rx_info
= vmalloc(tmp
);
383 if (!ring
->rx_info
) {
389 en_dbg(DRV
, priv
, "Allocated rx_info ring at addr:%p size:%d\n",
392 /* Allocate HW buffers on provided NUMA node */
393 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, node
);
394 err
= mlx4_alloc_hwq_res(mdev
->dev
, &ring
->wqres
,
395 ring
->buf_size
, 2 * PAGE_SIZE
);
396 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, mdev
->dev
->numa_node
);
400 err
= mlx4_en_map_buffer(&ring
->wqres
.buf
);
402 en_err(priv
, "Failed to map RX buffer\n");
405 ring
->buf
= ring
->wqres
.buf
.direct
.buf
;
407 ring
->hwtstamp_rx_filter
= priv
->hwtstamp_config
.rx_filter
;
413 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
);
415 vfree(ring
->rx_info
);
416 ring
->rx_info
= NULL
;
424 int mlx4_en_activate_rx_rings(struct mlx4_en_priv
*priv
)
426 struct mlx4_en_rx_ring
*ring
;
430 int stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
431 DS_SIZE
* priv
->num_frags
);
433 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
434 ring
= priv
->rx_ring
[ring_ind
];
438 ring
->actual_size
= 0;
439 ring
->cqn
= priv
->rx_cq
[ring_ind
]->mcq
.cqn
;
441 ring
->stride
= stride
;
442 if (ring
->stride
<= TXBB_SIZE
)
443 ring
->buf
+= TXBB_SIZE
;
445 ring
->log_stride
= ffs(ring
->stride
) - 1;
446 ring
->buf_size
= ring
->size
* ring
->stride
;
448 memset(ring
->buf
, 0, ring
->buf_size
);
449 mlx4_en_update_rx_prod_db(ring
);
451 /* Initialize all descriptors */
452 for (i
= 0; i
< ring
->size
; i
++)
453 mlx4_en_init_rx_desc(priv
, ring
, i
);
455 /* Initialize page allocators */
456 err
= mlx4_en_init_allocator(priv
, ring
);
458 en_err(priv
, "Failed initializing ring allocator\n");
459 if (ring
->stride
<= TXBB_SIZE
)
460 ring
->buf
-= TXBB_SIZE
;
465 err
= mlx4_en_fill_rx_buffers(priv
);
469 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
470 ring
= priv
->rx_ring
[ring_ind
];
472 ring
->size_mask
= ring
->actual_size
- 1;
473 mlx4_en_update_rx_prod_db(ring
);
479 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++)
480 mlx4_en_free_rx_buf(priv
, priv
->rx_ring
[ring_ind
]);
482 ring_ind
= priv
->rx_ring_num
- 1;
484 while (ring_ind
>= 0) {
485 if (priv
->rx_ring
[ring_ind
]->stride
<= TXBB_SIZE
)
486 priv
->rx_ring
[ring_ind
]->buf
-= TXBB_SIZE
;
487 mlx4_en_destroy_allocator(priv
, priv
->rx_ring
[ring_ind
]);
493 /* We recover from out of memory by scheduling our napi poll
494 * function (mlx4_en_process_cq), which tries to allocate
495 * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
497 void mlx4_en_recover_from_oom(struct mlx4_en_priv
*priv
)
504 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
505 if (mlx4_en_is_ring_empty(priv
->rx_ring
[ring
]))
506 napi_reschedule(&priv
->rx_cq
[ring
]->napi
);
510 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv
*priv
,
511 struct mlx4_en_rx_ring
**pring
,
512 u32 size
, u16 stride
)
514 struct mlx4_en_dev
*mdev
= priv
->mdev
;
515 struct mlx4_en_rx_ring
*ring
= *pring
;
517 mlx4_en_unmap_buffer(&ring
->wqres
.buf
);
518 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, size
* stride
+ TXBB_SIZE
);
519 vfree(ring
->rx_info
);
520 ring
->rx_info
= NULL
;
523 #ifdef CONFIG_RFS_ACCEL
524 mlx4_en_cleanup_filters(priv
);
528 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv
*priv
,
529 struct mlx4_en_rx_ring
*ring
)
531 mlx4_en_free_rx_buf(priv
, ring
);
532 if (ring
->stride
<= TXBB_SIZE
)
533 ring
->buf
-= TXBB_SIZE
;
534 mlx4_en_destroy_allocator(priv
, ring
);
538 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv
*priv
,
539 struct mlx4_en_rx_desc
*rx_desc
,
540 struct mlx4_en_rx_alloc
*frags
,
544 struct skb_frag_struct
*skb_frags_rx
= skb_shinfo(skb
)->frags
;
545 struct mlx4_en_frag_info
*frag_info
;
549 /* Collect used fragments while replacing them in the HW descriptors */
550 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
551 frag_info
= &priv
->frag_info
[nr
];
552 if (length
<= frag_info
->frag_prefix_size
)
557 dma
= be64_to_cpu(rx_desc
->data
[nr
].addr
);
558 dma_sync_single_for_cpu(priv
->ddev
, dma
, frag_info
->frag_size
,
561 /* Save page reference in skb */
562 __skb_frag_set_page(&skb_frags_rx
[nr
], frags
[nr
].page
);
563 skb_frag_size_set(&skb_frags_rx
[nr
], frag_info
->frag_size
);
564 skb_frags_rx
[nr
].page_offset
= frags
[nr
].page_offset
;
565 skb
->truesize
+= frag_info
->frag_stride
;
566 frags
[nr
].page
= NULL
;
568 /* Adjust size of last fragment to match actual length */
570 skb_frag_size_set(&skb_frags_rx
[nr
- 1],
571 length
- priv
->frag_info
[nr
- 1].frag_prefix_size
);
577 __skb_frag_unref(&skb_frags_rx
[nr
]);
583 static struct sk_buff
*mlx4_en_rx_skb(struct mlx4_en_priv
*priv
,
584 struct mlx4_en_rx_desc
*rx_desc
,
585 struct mlx4_en_rx_alloc
*frags
,
593 skb
= netdev_alloc_skb(priv
->dev
, SMALL_PACKET_SIZE
+ NET_IP_ALIGN
);
595 en_dbg(RX_ERR
, priv
, "Failed allocating skb\n");
598 skb_reserve(skb
, NET_IP_ALIGN
);
601 /* Get pointer to first fragment so we could copy the headers into the
602 * (linear part of the) skb */
603 va
= page_address(frags
[0].page
) + frags
[0].page_offset
;
605 if (length
<= SMALL_PACKET_SIZE
) {
606 /* We are copying all relevant data to the skb - temporarily
607 * sync buffers for the copy */
608 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
609 dma_sync_single_for_cpu(priv
->ddev
, dma
, length
,
611 skb_copy_to_linear_data(skb
, va
, length
);
614 unsigned int pull_len
;
616 /* Move relevant fragments to skb */
617 used_frags
= mlx4_en_complete_rx_desc(priv
, rx_desc
, frags
,
619 if (unlikely(!used_frags
)) {
623 skb_shinfo(skb
)->nr_frags
= used_frags
;
625 pull_len
= eth_get_headlen(va
, SMALL_PACKET_SIZE
);
626 /* Copy headers into the skb linear buffer */
627 memcpy(skb
->data
, va
, pull_len
);
628 skb
->tail
+= pull_len
;
630 /* Skip headers in first fragment */
631 skb_shinfo(skb
)->frags
[0].page_offset
+= pull_len
;
633 /* Adjust size of first fragment */
634 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[0], pull_len
);
635 skb
->data_len
= length
- pull_len
;
640 static void validate_loopback(struct mlx4_en_priv
*priv
, struct sk_buff
*skb
)
643 int offset
= ETH_HLEN
;
645 for (i
= 0; i
< MLX4_LOOPBACK_TEST_PAYLOAD
; i
++, offset
++) {
646 if (*(skb
->data
+ offset
) != (unsigned char) (i
& 0xff))
650 priv
->loopback_ok
= 1;
653 dev_kfree_skb_any(skb
);
656 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv
*priv
,
657 struct mlx4_en_rx_ring
*ring
)
659 int index
= ring
->prod
& ring
->size_mask
;
661 while ((u32
) (ring
->prod
- ring
->cons
) < ring
->actual_size
) {
662 if (mlx4_en_prepare_rx_desc(priv
, ring
, index
,
663 GFP_ATOMIC
| __GFP_COLD
))
666 index
= ring
->prod
& ring
->size_mask
;
670 /* When hardware doesn't strip the vlan, we need to calculate the checksum
671 * over it and add it to the hardware's checksum calculation
673 static inline __wsum
get_fixed_vlan_csum(__wsum hw_checksum
,
674 struct vlan_hdr
*vlanh
)
676 return csum_add(hw_checksum
, *(__wsum
*)vlanh
);
679 /* Although the stack expects checksum which doesn't include the pseudo
680 * header, the HW adds it. To address that, we are subtracting the pseudo
681 * header checksum from the checksum value provided by the HW.
683 static void get_fixed_ipv4_csum(__wsum hw_checksum
, struct sk_buff
*skb
,
686 __u16 length_for_csum
= 0;
687 __wsum csum_pseudo_header
= 0;
689 length_for_csum
= (be16_to_cpu(iph
->tot_len
) - (iph
->ihl
<< 2));
690 csum_pseudo_header
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
691 length_for_csum
, iph
->protocol
, 0);
692 skb
->csum
= csum_sub(hw_checksum
, csum_pseudo_header
);
695 #if IS_ENABLED(CONFIG_IPV6)
696 /* In IPv6 packets, besides subtracting the pseudo header checksum,
697 * we also compute/add the IP header checksum which
698 * is not added by the HW.
700 static int get_fixed_ipv6_csum(__wsum hw_checksum
, struct sk_buff
*skb
,
701 struct ipv6hdr
*ipv6h
)
703 __wsum csum_pseudo_hdr
= 0;
705 if (ipv6h
->nexthdr
== IPPROTO_FRAGMENT
|| ipv6h
->nexthdr
== IPPROTO_HOPOPTS
)
707 hw_checksum
= csum_add(hw_checksum
, (__force __wsum
)(ipv6h
->nexthdr
<< 8));
709 csum_pseudo_hdr
= csum_partial(&ipv6h
->saddr
,
710 sizeof(ipv6h
->saddr
) + sizeof(ipv6h
->daddr
), 0);
711 csum_pseudo_hdr
= csum_add(csum_pseudo_hdr
, (__force __wsum
)ipv6h
->payload_len
);
712 csum_pseudo_hdr
= csum_add(csum_pseudo_hdr
, (__force __wsum
)ntohs(ipv6h
->nexthdr
));
714 skb
->csum
= csum_sub(hw_checksum
, csum_pseudo_hdr
);
715 skb
->csum
= csum_add(skb
->csum
, csum_partial(ipv6h
, sizeof(struct ipv6hdr
), 0));
719 static int check_csum(struct mlx4_cqe
*cqe
, struct sk_buff
*skb
, void *va
,
720 netdev_features_t dev_features
)
722 __wsum hw_checksum
= 0;
724 void *hdr
= (u8
*)va
+ sizeof(struct ethhdr
);
726 hw_checksum
= csum_unfold((__force __sum16
)cqe
->checksum
);
728 if (cqe
->vlan_my_qpn
& cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK
) &&
729 !(dev_features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
730 hw_checksum
= get_fixed_vlan_csum(hw_checksum
, hdr
);
731 hdr
+= sizeof(struct vlan_hdr
);
734 if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
))
735 get_fixed_ipv4_csum(hw_checksum
, skb
, hdr
);
736 #if IS_ENABLED(CONFIG_IPV6)
737 else if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV6
))
738 if (get_fixed_ipv6_csum(hw_checksum
, skb
, hdr
))
744 int mlx4_en_process_rx_cq(struct net_device
*dev
, struct mlx4_en_cq
*cq
, int budget
)
746 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
747 struct mlx4_en_dev
*mdev
= priv
->mdev
;
748 struct mlx4_cqe
*cqe
;
749 struct mlx4_en_rx_ring
*ring
= priv
->rx_ring
[cq
->ring
];
750 struct mlx4_en_rx_alloc
*frags
;
751 struct mlx4_en_rx_desc
*rx_desc
;
758 int factor
= priv
->cqe_factor
;
768 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
769 * descriptor offset can be deduced from the CQE index instead of
770 * reading 'cqe->index' */
771 index
= cq
->mcq
.cons_index
& ring
->size_mask
;
772 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
774 /* Process all completed CQEs */
775 while (XNOR(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
,
776 cq
->mcq
.cons_index
& cq
->size
)) {
778 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
779 rx_desc
= ring
->buf
+ (index
<< ring
->log_stride
);
782 * make sure we read the CQE after we read the ownership bit
786 /* Drop packet on bad receive or bad checksum */
787 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
788 MLX4_CQE_OPCODE_ERROR
)) {
789 en_err(priv
, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
790 ((struct mlx4_err_cqe
*)cqe
)->vendor_err_syndrome
,
791 ((struct mlx4_err_cqe
*)cqe
)->syndrome
);
794 if (unlikely(cqe
->badfcs_enc
& MLX4_CQE_BAD_FCS
)) {
795 en_dbg(RX_ERR
, priv
, "Accepted frame with bad FCS\n");
799 /* Check if we need to drop the packet if SRIOV is not enabled
800 * and not performing the selftest or flb disabled
802 if (priv
->flags
& MLX4_EN_FLAG_RX_FILTER_NEEDED
) {
805 /* Get pointer to first fragment since we haven't
806 * skb yet and cast it to ethhdr struct
808 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
809 dma_sync_single_for_cpu(priv
->ddev
, dma
, sizeof(*ethh
),
811 ethh
= (struct ethhdr
*)(page_address(frags
[0].page
) +
812 frags
[0].page_offset
);
814 if (is_multicast_ether_addr(ethh
->h_dest
)) {
815 struct mlx4_mac_entry
*entry
;
816 struct hlist_head
*bucket
;
817 unsigned int mac_hash
;
819 /* Drop the packet, since HW loopback-ed it */
820 mac_hash
= ethh
->h_source
[MLX4_EN_MAC_HASH_IDX
];
821 bucket
= &priv
->mac_hash
[mac_hash
];
823 hlist_for_each_entry_rcu(entry
, bucket
, hlist
) {
824 if (ether_addr_equal_64bits(entry
->mac
,
835 * Packet is OK - process it.
837 length
= be32_to_cpu(cqe
->byte_cnt
);
838 length
-= ring
->fcs_del
;
839 ring
->bytes
+= length
;
841 l2_tunnel
= (dev
->hw_enc_features
& NETIF_F_RXCSUM
) &&
842 (cqe
->vlan_my_qpn
& cpu_to_be32(MLX4_CQE_L2_TUNNEL
));
844 if (likely(dev
->features
& NETIF_F_RXCSUM
)) {
845 if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_TCP
|
846 MLX4_CQE_STATUS_UDP
)) {
847 if ((cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPOK
)) &&
848 cqe
->checksum
== cpu_to_be16(0xffff)) {
849 ip_summed
= CHECKSUM_UNNECESSARY
;
852 ip_summed
= CHECKSUM_NONE
;
856 if (priv
->flags
& MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP
&&
857 (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
858 MLX4_CQE_STATUS_IPV6
))) {
859 ip_summed
= CHECKSUM_COMPLETE
;
860 ring
->csum_complete
++;
862 ip_summed
= CHECKSUM_NONE
;
867 ip_summed
= CHECKSUM_NONE
;
871 /* This packet is eligible for GRO if it is:
872 * - DIX Ethernet (type interpretation)
874 * - without IP options
875 * - not an IP fragment
876 * - no LLS polling in progress
878 if (!mlx4_en_cq_busy_polling(cq
) &&
879 (dev
->features
& NETIF_F_GRO
)) {
880 struct sk_buff
*gro_skb
= napi_get_frags(&cq
->napi
);
884 nr
= mlx4_en_complete_rx_desc(priv
,
885 rx_desc
, frags
, gro_skb
,
890 if (ip_summed
== CHECKSUM_COMPLETE
) {
891 void *va
= skb_frag_address(skb_shinfo(gro_skb
)->frags
);
892 if (check_csum(cqe
, gro_skb
, va
,
894 ip_summed
= CHECKSUM_NONE
;
896 ring
->csum_complete
--;
900 skb_shinfo(gro_skb
)->nr_frags
= nr
;
901 gro_skb
->len
= length
;
902 gro_skb
->data_len
= length
;
903 gro_skb
->ip_summed
= ip_summed
;
905 if (l2_tunnel
&& ip_summed
== CHECKSUM_UNNECESSARY
)
906 gro_skb
->csum_level
= 1;
908 if ((cqe
->vlan_my_qpn
&
909 cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK
)) &&
910 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
911 u16 vid
= be16_to_cpu(cqe
->sl_vid
);
913 __vlan_hwaccel_put_tag(gro_skb
, htons(ETH_P_8021Q
), vid
);
914 } else if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
915 MLX4_CQE_SVLAN_PRESENT_MASK
) &&
916 (dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
917 __vlan_hwaccel_put_tag(gro_skb
,
919 be16_to_cpu(cqe
->sl_vid
));
922 if (dev
->features
& NETIF_F_RXHASH
)
923 skb_set_hash(gro_skb
,
924 be32_to_cpu(cqe
->immed_rss_invalid
),
925 (ip_summed
== CHECKSUM_UNNECESSARY
) ?
929 skb_record_rx_queue(gro_skb
, cq
->ring
);
930 skb_mark_napi_id(gro_skb
, &cq
->napi
);
932 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
933 timestamp
= mlx4_en_get_cqe_ts(cqe
);
934 mlx4_en_fill_hwtstamps(mdev
,
935 skb_hwtstamps(gro_skb
),
939 napi_gro_frags(&cq
->napi
);
943 /* GRO not possible, complete processing here */
944 skb
= mlx4_en_rx_skb(priv
, rx_desc
, frags
, length
);
946 priv
->stats
.rx_dropped
++;
950 if (unlikely(priv
->validate_loopback
)) {
951 validate_loopback(priv
, skb
);
955 if (ip_summed
== CHECKSUM_COMPLETE
) {
956 if (check_csum(cqe
, skb
, skb
->data
, dev
->features
)) {
957 ip_summed
= CHECKSUM_NONE
;
958 ring
->csum_complete
--;
963 skb
->ip_summed
= ip_summed
;
964 skb
->protocol
= eth_type_trans(skb
, dev
);
965 skb_record_rx_queue(skb
, cq
->ring
);
967 if (l2_tunnel
&& ip_summed
== CHECKSUM_UNNECESSARY
)
970 if (dev
->features
& NETIF_F_RXHASH
)
972 be32_to_cpu(cqe
->immed_rss_invalid
),
973 (ip_summed
== CHECKSUM_UNNECESSARY
) ?
977 if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
978 MLX4_CQE_CVLAN_PRESENT_MASK
) &&
979 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
980 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), be16_to_cpu(cqe
->sl_vid
));
981 else if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
982 MLX4_CQE_SVLAN_PRESENT_MASK
) &&
983 (dev
->features
& NETIF_F_HW_VLAN_STAG_RX
))
984 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021AD
),
985 be16_to_cpu(cqe
->sl_vid
));
987 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
988 timestamp
= mlx4_en_get_cqe_ts(cqe
);
989 mlx4_en_fill_hwtstamps(mdev
, skb_hwtstamps(skb
),
993 skb_mark_napi_id(skb
, &cq
->napi
);
995 if (!mlx4_en_cq_busy_polling(cq
))
996 napi_gro_receive(&cq
->napi
, skb
);
998 netif_receive_skb(skb
);
1001 for (nr
= 0; nr
< priv
->num_frags
; nr
++)
1002 mlx4_en_free_frag(priv
, frags
, nr
);
1004 ++cq
->mcq
.cons_index
;
1005 index
= (cq
->mcq
.cons_index
) & ring
->size_mask
;
1006 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
1007 if (++polled
== budget
)
1012 AVG_PERF_COUNTER(priv
->pstats
.rx_coal_avg
, polled
);
1013 mlx4_cq_set_ci(&cq
->mcq
);
1014 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1015 ring
->cons
= cq
->mcq
.cons_index
;
1016 mlx4_en_refill_rx_buffers(priv
, ring
);
1017 mlx4_en_update_rx_prod_db(ring
);
1022 void mlx4_en_rx_irq(struct mlx4_cq
*mcq
)
1024 struct mlx4_en_cq
*cq
= container_of(mcq
, struct mlx4_en_cq
, mcq
);
1025 struct mlx4_en_priv
*priv
= netdev_priv(cq
->dev
);
1027 if (likely(priv
->port_up
))
1028 napi_schedule_irqoff(&cq
->napi
);
1030 mlx4_en_arm_cq(priv
, cq
);
1033 /* Rx CQ polling - called by NAPI */
1034 int mlx4_en_poll_rx_cq(struct napi_struct
*napi
, int budget
)
1036 struct mlx4_en_cq
*cq
= container_of(napi
, struct mlx4_en_cq
, napi
);
1037 struct net_device
*dev
= cq
->dev
;
1038 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1041 if (!mlx4_en_cq_lock_napi(cq
))
1044 done
= mlx4_en_process_rx_cq(dev
, cq
, budget
);
1046 mlx4_en_cq_unlock_napi(cq
);
1048 /* If we used up all the quota - we're probably not done yet... */
1049 if (done
== budget
) {
1051 const struct cpumask
*aff
;
1053 INC_PERF_COUNTER(priv
->pstats
.napi_quota
);
1055 cpu_curr
= smp_processor_id();
1056 aff
= irq_desc_get_irq_data(cq
->irq_desc
)->affinity
;
1058 if (likely(cpumask_test_cpu(cpu_curr
, aff
)))
1061 /* Current cpu is not according to smp_irq_affinity -
1062 * probably affinity changed. need to stop this NAPI
1063 * poll, and restart it on the right CPU
1068 napi_complete_done(napi
, done
);
1069 mlx4_en_arm_cq(priv
, cq
);
1073 static const int frag_sizes
[] = {
1080 void mlx4_en_calc_rx_buf(struct net_device
*dev
)
1082 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1083 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
1084 * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
1086 int eff_mtu
= dev
->mtu
+ ETH_HLEN
+ (2 * VLAN_HLEN
);
1090 while (buf_size
< eff_mtu
) {
1091 priv
->frag_info
[i
].frag_size
=
1092 (eff_mtu
> buf_size
+ frag_sizes
[i
]) ?
1093 frag_sizes
[i
] : eff_mtu
- buf_size
;
1094 priv
->frag_info
[i
].frag_prefix_size
= buf_size
;
1095 priv
->frag_info
[i
].frag_stride
=
1096 ALIGN(priv
->frag_info
[i
].frag_size
,
1098 buf_size
+= priv
->frag_info
[i
].frag_size
;
1102 priv
->num_frags
= i
;
1103 priv
->rx_skb_size
= eff_mtu
;
1104 priv
->log_rx_info
= ROUNDUP_LOG2(i
* sizeof(struct mlx4_en_rx_alloc
));
1106 en_dbg(DRV
, priv
, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
1107 eff_mtu
, priv
->num_frags
);
1108 for (i
= 0; i
< priv
->num_frags
; i
++) {
1110 " frag:%d - size:%d prefix:%d stride:%d\n",
1112 priv
->frag_info
[i
].frag_size
,
1113 priv
->frag_info
[i
].frag_prefix_size
,
1114 priv
->frag_info
[i
].frag_stride
);
1118 /* RSS related functions */
1120 static int mlx4_en_config_rss_qp(struct mlx4_en_priv
*priv
, int qpn
,
1121 struct mlx4_en_rx_ring
*ring
,
1122 enum mlx4_qp_state
*state
,
1125 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1126 struct mlx4_qp_context
*context
;
1129 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
1133 err
= mlx4_qp_alloc(mdev
->dev
, qpn
, qp
, GFP_KERNEL
);
1135 en_err(priv
, "Failed to allocate qp #%x\n", qpn
);
1138 qp
->event
= mlx4_en_sqp_event
;
1140 memset(context
, 0, sizeof *context
);
1141 mlx4_en_fill_qp_context(priv
, ring
->actual_size
, ring
->stride
, 0, 0,
1142 qpn
, ring
->cqn
, -1, context
);
1143 context
->db_rec_addr
= cpu_to_be64(ring
->wqres
.db
.dma
);
1145 /* Cancel FCS removal if FW allows */
1146 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
) {
1147 context
->param3
|= cpu_to_be32(1 << 29);
1148 if (priv
->dev
->features
& NETIF_F_RXFCS
)
1151 ring
->fcs_del
= ETH_FCS_LEN
;
1155 err
= mlx4_qp_to_ready(mdev
->dev
, &ring
->wqres
.mtt
, context
, qp
, state
);
1157 mlx4_qp_remove(mdev
->dev
, qp
);
1158 mlx4_qp_free(mdev
->dev
, qp
);
1160 mlx4_en_update_rx_prod_db(ring
);
1166 int mlx4_en_create_drop_qp(struct mlx4_en_priv
*priv
)
1171 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, 1, 1, &qpn
,
1172 MLX4_RESERVE_A0_QP
);
1174 en_err(priv
, "Failed reserving drop qpn\n");
1177 err
= mlx4_qp_alloc(priv
->mdev
->dev
, qpn
, &priv
->drop_qp
, GFP_KERNEL
);
1179 en_err(priv
, "Failed allocating drop qp\n");
1180 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1187 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv
*priv
)
1191 qpn
= priv
->drop_qp
.qpn
;
1192 mlx4_qp_remove(priv
->mdev
->dev
, &priv
->drop_qp
);
1193 mlx4_qp_free(priv
->mdev
->dev
, &priv
->drop_qp
);
1194 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1197 /* Allocate rx qp's and configure them according to rss map */
1198 int mlx4_en_config_rss_steer(struct mlx4_en_priv
*priv
)
1200 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1201 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1202 struct mlx4_qp_context context
;
1203 struct mlx4_rss_context
*rss_context
;
1206 u8 rss_mask
= (MLX4_RSS_IPV4
| MLX4_RSS_TCP_IPV4
| MLX4_RSS_IPV6
|
1212 en_dbg(DRV
, priv
, "Configuring rss steering\n");
1213 err
= mlx4_qp_reserve_range(mdev
->dev
, priv
->rx_ring_num
,
1215 &rss_map
->base_qpn
, 0);
1217 en_err(priv
, "Failed reserving %d qps\n", priv
->rx_ring_num
);
1221 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1222 qpn
= rss_map
->base_qpn
+ i
;
1223 err
= mlx4_en_config_rss_qp(priv
, qpn
, priv
->rx_ring
[i
],
1232 /* Configure RSS indirection qp */
1233 err
= mlx4_qp_alloc(mdev
->dev
, priv
->base_qpn
, &rss_map
->indir_qp
, GFP_KERNEL
);
1235 en_err(priv
, "Failed to allocate RSS indirection QP\n");
1238 rss_map
->indir_qp
.event
= mlx4_en_sqp_event
;
1239 mlx4_en_fill_qp_context(priv
, 0, 0, 0, 1, priv
->base_qpn
,
1240 priv
->rx_ring
[0]->cqn
, -1, &context
);
1242 if (!priv
->prof
->rss_rings
|| priv
->prof
->rss_rings
> priv
->rx_ring_num
)
1243 rss_rings
= priv
->rx_ring_num
;
1245 rss_rings
= priv
->prof
->rss_rings
;
1247 ptr
= ((void *) &context
) + offsetof(struct mlx4_qp_context
, pri_path
)
1248 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
1250 rss_context
->base_qpn
= cpu_to_be32(ilog2(rss_rings
) << 24 |
1251 (rss_map
->base_qpn
));
1252 rss_context
->default_qpn
= cpu_to_be32(rss_map
->base_qpn
);
1253 if (priv
->mdev
->profile
.udp_rss
) {
1254 rss_mask
|= MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
;
1255 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
1258 if (mdev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
1259 en_info(priv
, "Setting RSS context tunnel type to RSS on inner headers\n");
1260 rss_mask
|= MLX4_RSS_BY_INNER_HEADERS
;
1263 rss_context
->flags
= rss_mask
;
1264 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1265 if (priv
->rss_hash_fn
== ETH_RSS_HASH_XOR
) {
1266 rss_context
->hash_fn
= MLX4_RSS_HASH_XOR
;
1267 } else if (priv
->rss_hash_fn
== ETH_RSS_HASH_TOP
) {
1268 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1269 memcpy(rss_context
->rss_key
, priv
->rss_key
,
1270 MLX4_EN_RSS_KEY_SIZE
);
1271 netdev_rss_key_fill(rss_context
->rss_key
,
1272 MLX4_EN_RSS_KEY_SIZE
);
1274 en_err(priv
, "Unknown RSS hash function requested\n");
1278 err
= mlx4_qp_to_ready(mdev
->dev
, &priv
->res
.mtt
, &context
,
1279 &rss_map
->indir_qp
, &rss_map
->indir_state
);
1286 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1287 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1288 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1289 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1291 for (i
= 0; i
< good_qps
; i
++) {
1292 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1293 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1294 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1295 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1297 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);
1301 void mlx4_en_release_rss_steer(struct mlx4_en_priv
*priv
)
1303 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1304 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1307 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1308 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1309 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1310 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1312 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1313 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1314 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1315 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1316 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1318 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);