2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <net/busy_poll.h>
35 #include <linux/mlx4/cq.h>
36 #include <linux/slab.h>
37 #include <linux/mlx4/qp.h>
38 #include <linux/skbuff.h>
39 #include <linux/rculist.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/vmalloc.h>
43 #include <linux/irq.h>
47 static int mlx4_alloc_pages(struct mlx4_en_priv
*priv
,
48 struct mlx4_en_rx_alloc
*page_alloc
,
49 const struct mlx4_en_frag_info
*frag_info
,
56 for (order
= MLX4_EN_ALLOC_PREFER_ORDER
; ;) {
60 gfp
|= __GFP_COMP
| __GFP_NOWARN
;
61 page
= alloc_pages(gfp
, order
);
65 ((PAGE_SIZE
<< order
) < frag_info
->frag_size
))
68 dma
= dma_map_page(priv
->ddev
, page
, 0, PAGE_SIZE
<< order
,
70 if (dma_mapping_error(priv
->ddev
, dma
)) {
74 page_alloc
->page_size
= PAGE_SIZE
<< order
;
75 page_alloc
->page
= page
;
76 page_alloc
->dma
= dma
;
77 page_alloc
->page_offset
= frag_info
->frag_align
;
78 /* Not doing get_page() for each frag is a big win
79 * on asymetric workloads. Note we can not use atomic_set().
81 atomic_add(page_alloc
->page_size
/ frag_info
->frag_stride
- 1,
86 static int mlx4_en_alloc_frags(struct mlx4_en_priv
*priv
,
87 struct mlx4_en_rx_desc
*rx_desc
,
88 struct mlx4_en_rx_alloc
*frags
,
89 struct mlx4_en_rx_alloc
*ring_alloc
,
92 struct mlx4_en_rx_alloc page_alloc
[MLX4_EN_MAX_RX_FRAGS
];
93 const struct mlx4_en_frag_info
*frag_info
;
98 for (i
= 0; i
< priv
->num_frags
; i
++) {
99 frag_info
= &priv
->frag_info
[i
];
100 page_alloc
[i
] = ring_alloc
[i
];
101 page_alloc
[i
].page_offset
+= frag_info
->frag_stride
;
103 if (page_alloc
[i
].page_offset
+ frag_info
->frag_stride
<=
104 ring_alloc
[i
].page_size
)
107 if (mlx4_alloc_pages(priv
, &page_alloc
[i
], frag_info
, gfp
))
111 for (i
= 0; i
< priv
->num_frags
; i
++) {
112 frags
[i
] = ring_alloc
[i
];
113 dma
= ring_alloc
[i
].dma
+ ring_alloc
[i
].page_offset
;
114 ring_alloc
[i
] = page_alloc
[i
];
115 rx_desc
->data
[i
].addr
= cpu_to_be64(dma
);
122 frag_info
= &priv
->frag_info
[i
];
123 if (page_alloc
[i
].page
!= ring_alloc
[i
].page
) {
124 dma_unmap_page(priv
->ddev
, page_alloc
[i
].dma
,
125 page_alloc
[i
].page_size
, PCI_DMA_FROMDEVICE
);
126 page
= page_alloc
[i
].page
;
127 atomic_set(&page
->_count
, 1);
134 static void mlx4_en_free_frag(struct mlx4_en_priv
*priv
,
135 struct mlx4_en_rx_alloc
*frags
,
138 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
139 u32 next_frag_end
= frags
[i
].page_offset
+ 2 * frag_info
->frag_stride
;
142 if (next_frag_end
> frags
[i
].page_size
)
143 dma_unmap_page(priv
->ddev
, frags
[i
].dma
, frags
[i
].page_size
,
147 put_page(frags
[i
].page
);
150 static int mlx4_en_init_allocator(struct mlx4_en_priv
*priv
,
151 struct mlx4_en_rx_ring
*ring
)
154 struct mlx4_en_rx_alloc
*page_alloc
;
156 for (i
= 0; i
< priv
->num_frags
; i
++) {
157 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
159 if (mlx4_alloc_pages(priv
, &ring
->page_alloc
[i
],
160 frag_info
, GFP_KERNEL
))
169 page_alloc
= &ring
->page_alloc
[i
];
170 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
171 page_alloc
->page_size
, PCI_DMA_FROMDEVICE
);
172 page
= page_alloc
->page
;
173 atomic_set(&page
->_count
, 1);
175 page_alloc
->page
= NULL
;
180 static void mlx4_en_destroy_allocator(struct mlx4_en_priv
*priv
,
181 struct mlx4_en_rx_ring
*ring
)
183 struct mlx4_en_rx_alloc
*page_alloc
;
186 for (i
= 0; i
< priv
->num_frags
; i
++) {
187 const struct mlx4_en_frag_info
*frag_info
= &priv
->frag_info
[i
];
189 page_alloc
= &ring
->page_alloc
[i
];
190 en_dbg(DRV
, priv
, "Freeing allocator:%d count:%d\n",
191 i
, page_count(page_alloc
->page
));
193 dma_unmap_page(priv
->ddev
, page_alloc
->dma
,
194 page_alloc
->page_size
, PCI_DMA_FROMDEVICE
);
195 while (page_alloc
->page_offset
+ frag_info
->frag_stride
<
196 page_alloc
->page_size
) {
197 put_page(page_alloc
->page
);
198 page_alloc
->page_offset
+= frag_info
->frag_stride
;
200 page_alloc
->page
= NULL
;
204 static void mlx4_en_init_rx_desc(struct mlx4_en_priv
*priv
,
205 struct mlx4_en_rx_ring
*ring
, int index
)
207 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ ring
->stride
* index
;
211 /* Set size and memtype fields */
212 for (i
= 0; i
< priv
->num_frags
; i
++) {
213 rx_desc
->data
[i
].byte_count
=
214 cpu_to_be32(priv
->frag_info
[i
].frag_size
);
215 rx_desc
->data
[i
].lkey
= cpu_to_be32(priv
->mdev
->mr
.key
);
218 /* If the number of used fragments does not fill up the ring stride,
219 * remaining (unused) fragments must be padded with null address/size
220 * and a special memory key */
221 possible_frags
= (ring
->stride
- sizeof(struct mlx4_en_rx_desc
)) / DS_SIZE
;
222 for (i
= priv
->num_frags
; i
< possible_frags
; i
++) {
223 rx_desc
->data
[i
].byte_count
= 0;
224 rx_desc
->data
[i
].lkey
= cpu_to_be32(MLX4_EN_MEMTYPE_PAD
);
225 rx_desc
->data
[i
].addr
= 0;
229 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv
*priv
,
230 struct mlx4_en_rx_ring
*ring
, int index
,
233 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ (index
* ring
->stride
);
234 struct mlx4_en_rx_alloc
*frags
= ring
->rx_info
+
235 (index
<< priv
->log_rx_info
);
237 return mlx4_en_alloc_frags(priv
, rx_desc
, frags
, ring
->page_alloc
, gfp
);
240 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring
*ring
)
242 *ring
->wqres
.db
.db
= cpu_to_be32(ring
->prod
& 0xffff);
245 static void mlx4_en_free_rx_desc(struct mlx4_en_priv
*priv
,
246 struct mlx4_en_rx_ring
*ring
,
249 struct mlx4_en_rx_alloc
*frags
;
252 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
253 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
254 en_dbg(DRV
, priv
, "Freeing fragment:%d\n", nr
);
255 mlx4_en_free_frag(priv
, frags
, nr
);
259 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv
*priv
)
261 struct mlx4_en_rx_ring
*ring
;
266 for (buf_ind
= 0; buf_ind
< priv
->prof
->rx_ring_size
; buf_ind
++) {
267 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
268 ring
= priv
->rx_ring
[ring_ind
];
270 if (mlx4_en_prepare_rx_desc(priv
, ring
,
273 if (ring
->actual_size
< MLX4_EN_MIN_RX_SIZE
) {
274 en_err(priv
, "Failed to allocate enough rx buffers\n");
277 new_size
= rounddown_pow_of_two(ring
->actual_size
);
278 en_warn(priv
, "Only %d buffers allocated reducing ring size to %d\n",
279 ring
->actual_size
, new_size
);
290 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
291 ring
= priv
->rx_ring
[ring_ind
];
292 while (ring
->actual_size
> new_size
) {
295 mlx4_en_free_rx_desc(priv
, ring
, ring
->actual_size
);
302 static void mlx4_en_free_rx_buf(struct mlx4_en_priv
*priv
,
303 struct mlx4_en_rx_ring
*ring
)
307 en_dbg(DRV
, priv
, "Freeing Rx buf - cons:%d prod:%d\n",
308 ring
->cons
, ring
->prod
);
310 /* Unmap and free Rx buffers */
311 BUG_ON((u32
) (ring
->prod
- ring
->cons
) > ring
->actual_size
);
312 while (ring
->cons
!= ring
->prod
) {
313 index
= ring
->cons
& ring
->size_mask
;
314 en_dbg(DRV
, priv
, "Processing descriptor:%d\n", index
);
315 mlx4_en_free_rx_desc(priv
, ring
, index
);
320 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev
*mdev
)
325 struct mlx4_dev
*dev
= mdev
->dev
;
327 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
) {
328 if (!dev
->caps
.comp_pool
)
329 num_of_eqs
= max_t(int, MIN_RX_RINGS
,
331 dev
->caps
.num_comp_vectors
,
334 num_of_eqs
= min_t(int, MAX_MSIX_P_PORT
,
336 dev
->caps
.num_ports
) - 1;
338 num_rx_rings
= mlx4_low_memory_profile() ? MIN_RX_RINGS
:
339 min_t(int, num_of_eqs
,
340 netif_get_num_default_rss_queues());
341 mdev
->profile
.prof
[i
].rx_ring_num
=
342 rounddown_pow_of_two(num_rx_rings
);
346 int mlx4_en_create_rx_ring(struct mlx4_en_priv
*priv
,
347 struct mlx4_en_rx_ring
**pring
,
348 u32 size
, u16 stride
, int node
)
350 struct mlx4_en_dev
*mdev
= priv
->mdev
;
351 struct mlx4_en_rx_ring
*ring
;
355 ring
= kzalloc_node(sizeof(*ring
), GFP_KERNEL
, node
);
357 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
359 en_err(priv
, "Failed to allocate RX ring structure\n");
367 ring
->size_mask
= size
- 1;
368 ring
->stride
= stride
;
369 ring
->log_stride
= ffs(ring
->stride
) - 1;
370 ring
->buf_size
= ring
->size
* ring
->stride
+ TXBB_SIZE
;
372 tmp
= size
* roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS
*
373 sizeof(struct mlx4_en_rx_alloc
));
374 ring
->rx_info
= vmalloc_node(tmp
, node
);
375 if (!ring
->rx_info
) {
376 ring
->rx_info
= vmalloc(tmp
);
377 if (!ring
->rx_info
) {
383 en_dbg(DRV
, priv
, "Allocated rx_info ring at addr:%p size:%d\n",
386 /* Allocate HW buffers on provided NUMA node */
387 set_dev_node(&mdev
->dev
->pdev
->dev
, node
);
388 err
= mlx4_alloc_hwq_res(mdev
->dev
, &ring
->wqres
,
389 ring
->buf_size
, 2 * PAGE_SIZE
);
390 set_dev_node(&mdev
->dev
->pdev
->dev
, mdev
->dev
->numa_node
);
394 err
= mlx4_en_map_buffer(&ring
->wqres
.buf
);
396 en_err(priv
, "Failed to map RX buffer\n");
399 ring
->buf
= ring
->wqres
.buf
.direct
.buf
;
401 ring
->hwtstamp_rx_filter
= priv
->hwtstamp_config
.rx_filter
;
407 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
);
409 vfree(ring
->rx_info
);
410 ring
->rx_info
= NULL
;
418 int mlx4_en_activate_rx_rings(struct mlx4_en_priv
*priv
)
420 struct mlx4_en_rx_ring
*ring
;
424 int stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
425 DS_SIZE
* priv
->num_frags
);
427 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
428 ring
= priv
->rx_ring
[ring_ind
];
432 ring
->actual_size
= 0;
433 ring
->cqn
= priv
->rx_cq
[ring_ind
]->mcq
.cqn
;
435 ring
->stride
= stride
;
436 if (ring
->stride
<= TXBB_SIZE
)
437 ring
->buf
+= TXBB_SIZE
;
439 ring
->log_stride
= ffs(ring
->stride
) - 1;
440 ring
->buf_size
= ring
->size
* ring
->stride
;
442 memset(ring
->buf
, 0, ring
->buf_size
);
443 mlx4_en_update_rx_prod_db(ring
);
445 /* Initialize all descriptors */
446 for (i
= 0; i
< ring
->size
; i
++)
447 mlx4_en_init_rx_desc(priv
, ring
, i
);
449 /* Initialize page allocators */
450 err
= mlx4_en_init_allocator(priv
, ring
);
452 en_err(priv
, "Failed initializing ring allocator\n");
453 if (ring
->stride
<= TXBB_SIZE
)
454 ring
->buf
-= TXBB_SIZE
;
459 err
= mlx4_en_fill_rx_buffers(priv
);
463 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
464 ring
= priv
->rx_ring
[ring_ind
];
466 ring
->size_mask
= ring
->actual_size
- 1;
467 mlx4_en_update_rx_prod_db(ring
);
473 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++)
474 mlx4_en_free_rx_buf(priv
, priv
->rx_ring
[ring_ind
]);
476 ring_ind
= priv
->rx_ring_num
- 1;
478 while (ring_ind
>= 0) {
479 if (priv
->rx_ring
[ring_ind
]->stride
<= TXBB_SIZE
)
480 priv
->rx_ring
[ring_ind
]->buf
-= TXBB_SIZE
;
481 mlx4_en_destroy_allocator(priv
, priv
->rx_ring
[ring_ind
]);
487 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv
*priv
,
488 struct mlx4_en_rx_ring
**pring
,
489 u32 size
, u16 stride
)
491 struct mlx4_en_dev
*mdev
= priv
->mdev
;
492 struct mlx4_en_rx_ring
*ring
= *pring
;
494 mlx4_en_unmap_buffer(&ring
->wqres
.buf
);
495 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, size
* stride
+ TXBB_SIZE
);
496 vfree(ring
->rx_info
);
497 ring
->rx_info
= NULL
;
500 #ifdef CONFIG_RFS_ACCEL
501 mlx4_en_cleanup_filters(priv
);
505 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv
*priv
,
506 struct mlx4_en_rx_ring
*ring
)
508 mlx4_en_free_rx_buf(priv
, ring
);
509 if (ring
->stride
<= TXBB_SIZE
)
510 ring
->buf
-= TXBB_SIZE
;
511 mlx4_en_destroy_allocator(priv
, ring
);
515 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv
*priv
,
516 struct mlx4_en_rx_desc
*rx_desc
,
517 struct mlx4_en_rx_alloc
*frags
,
521 struct skb_frag_struct
*skb_frags_rx
= skb_shinfo(skb
)->frags
;
522 struct mlx4_en_frag_info
*frag_info
;
526 /* Collect used fragments while replacing them in the HW descriptors */
527 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
528 frag_info
= &priv
->frag_info
[nr
];
529 if (length
<= frag_info
->frag_prefix_size
)
534 dma
= be64_to_cpu(rx_desc
->data
[nr
].addr
);
535 dma_sync_single_for_cpu(priv
->ddev
, dma
, frag_info
->frag_size
,
538 /* Save page reference in skb */
539 __skb_frag_set_page(&skb_frags_rx
[nr
], frags
[nr
].page
);
540 skb_frag_size_set(&skb_frags_rx
[nr
], frag_info
->frag_size
);
541 skb_frags_rx
[nr
].page_offset
= frags
[nr
].page_offset
;
542 skb
->truesize
+= frag_info
->frag_stride
;
543 frags
[nr
].page
= NULL
;
545 /* Adjust size of last fragment to match actual length */
547 skb_frag_size_set(&skb_frags_rx
[nr
- 1],
548 length
- priv
->frag_info
[nr
- 1].frag_prefix_size
);
554 __skb_frag_unref(&skb_frags_rx
[nr
]);
560 static struct sk_buff
*mlx4_en_rx_skb(struct mlx4_en_priv
*priv
,
561 struct mlx4_en_rx_desc
*rx_desc
,
562 struct mlx4_en_rx_alloc
*frags
,
570 skb
= netdev_alloc_skb(priv
->dev
, SMALL_PACKET_SIZE
+ NET_IP_ALIGN
);
572 en_dbg(RX_ERR
, priv
, "Failed allocating skb\n");
575 skb_reserve(skb
, NET_IP_ALIGN
);
578 /* Get pointer to first fragment so we could copy the headers into the
579 * (linear part of the) skb */
580 va
= page_address(frags
[0].page
) + frags
[0].page_offset
;
582 if (length
<= SMALL_PACKET_SIZE
) {
583 /* We are copying all relevant data to the skb - temporarily
584 * sync buffers for the copy */
585 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
586 dma_sync_single_for_cpu(priv
->ddev
, dma
, length
,
588 skb_copy_to_linear_data(skb
, va
, length
);
591 unsigned int pull_len
;
593 /* Move relevant fragments to skb */
594 used_frags
= mlx4_en_complete_rx_desc(priv
, rx_desc
, frags
,
596 if (unlikely(!used_frags
)) {
600 skb_shinfo(skb
)->nr_frags
= used_frags
;
602 pull_len
= eth_get_headlen(va
, SMALL_PACKET_SIZE
);
603 /* Copy headers into the skb linear buffer */
604 memcpy(skb
->data
, va
, pull_len
);
605 skb
->tail
+= pull_len
;
607 /* Skip headers in first fragment */
608 skb_shinfo(skb
)->frags
[0].page_offset
+= pull_len
;
610 /* Adjust size of first fragment */
611 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[0], pull_len
);
612 skb
->data_len
= length
- pull_len
;
617 static void validate_loopback(struct mlx4_en_priv
*priv
, struct sk_buff
*skb
)
620 int offset
= ETH_HLEN
;
622 for (i
= 0; i
< MLX4_LOOPBACK_TEST_PAYLOAD
; i
++, offset
++) {
623 if (*(skb
->data
+ offset
) != (unsigned char) (i
& 0xff))
627 priv
->loopback_ok
= 1;
630 dev_kfree_skb_any(skb
);
633 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv
*priv
,
634 struct mlx4_en_rx_ring
*ring
)
636 int index
= ring
->prod
& ring
->size_mask
;
638 while ((u32
) (ring
->prod
- ring
->cons
) < ring
->actual_size
) {
639 if (mlx4_en_prepare_rx_desc(priv
, ring
, index
, GFP_ATOMIC
))
642 index
= ring
->prod
& ring
->size_mask
;
646 int mlx4_en_process_rx_cq(struct net_device
*dev
, struct mlx4_en_cq
*cq
, int budget
)
648 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
649 struct mlx4_en_dev
*mdev
= priv
->mdev
;
650 struct mlx4_cqe
*cqe
;
651 struct mlx4_en_rx_ring
*ring
= priv
->rx_ring
[cq
->ring
];
652 struct mlx4_en_rx_alloc
*frags
;
653 struct mlx4_en_rx_desc
*rx_desc
;
660 int factor
= priv
->cqe_factor
;
670 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
671 * descriptor offset can be deduced from the CQE index instead of
672 * reading 'cqe->index' */
673 index
= cq
->mcq
.cons_index
& ring
->size_mask
;
674 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
676 /* Process all completed CQEs */
677 while (XNOR(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
,
678 cq
->mcq
.cons_index
& cq
->size
)) {
680 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
681 rx_desc
= ring
->buf
+ (index
<< ring
->log_stride
);
684 * make sure we read the CQE after we read the ownership bit
688 /* Drop packet on bad receive or bad checksum */
689 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
690 MLX4_CQE_OPCODE_ERROR
)) {
691 en_err(priv
, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
692 ((struct mlx4_err_cqe
*)cqe
)->vendor_err_syndrome
,
693 ((struct mlx4_err_cqe
*)cqe
)->syndrome
);
696 if (unlikely(cqe
->badfcs_enc
& MLX4_CQE_BAD_FCS
)) {
697 en_dbg(RX_ERR
, priv
, "Accepted frame with bad FCS\n");
701 /* Check if we need to drop the packet if SRIOV is not enabled
702 * and not performing the selftest or flb disabled
704 if (priv
->flags
& MLX4_EN_FLAG_RX_FILTER_NEEDED
) {
707 /* Get pointer to first fragment since we haven't
708 * skb yet and cast it to ethhdr struct
710 dma
= be64_to_cpu(rx_desc
->data
[0].addr
);
711 dma_sync_single_for_cpu(priv
->ddev
, dma
, sizeof(*ethh
),
713 ethh
= (struct ethhdr
*)(page_address(frags
[0].page
) +
714 frags
[0].page_offset
);
716 if (is_multicast_ether_addr(ethh
->h_dest
)) {
717 struct mlx4_mac_entry
*entry
;
718 struct hlist_head
*bucket
;
719 unsigned int mac_hash
;
721 /* Drop the packet, since HW loopback-ed it */
722 mac_hash
= ethh
->h_source
[MLX4_EN_MAC_HASH_IDX
];
723 bucket
= &priv
->mac_hash
[mac_hash
];
725 hlist_for_each_entry_rcu(entry
, bucket
, hlist
) {
726 if (ether_addr_equal_64bits(entry
->mac
,
737 * Packet is OK - process it.
739 length
= be32_to_cpu(cqe
->byte_cnt
);
740 length
-= ring
->fcs_del
;
741 ring
->bytes
+= length
;
743 l2_tunnel
= (dev
->hw_enc_features
& NETIF_F_RXCSUM
) &&
744 (cqe
->vlan_my_qpn
& cpu_to_be32(MLX4_CQE_L2_TUNNEL
));
746 if (likely(dev
->features
& NETIF_F_RXCSUM
)) {
747 if ((cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPOK
)) &&
748 (cqe
->checksum
== cpu_to_be16(0xffff))) {
750 /* This packet is eligible for GRO if it is:
751 * - DIX Ethernet (type interpretation)
753 * - without IP options
754 * - not an IP fragment
755 * - no LLS polling in progress
757 if (!mlx4_en_cq_busy_polling(cq
) &&
758 (dev
->features
& NETIF_F_GRO
)) {
759 struct sk_buff
*gro_skb
= napi_get_frags(&cq
->napi
);
763 nr
= mlx4_en_complete_rx_desc(priv
,
764 rx_desc
, frags
, gro_skb
,
769 skb_shinfo(gro_skb
)->nr_frags
= nr
;
770 gro_skb
->len
= length
;
771 gro_skb
->data_len
= length
;
772 gro_skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
775 gro_skb
->csum_level
= 1;
776 if ((cqe
->vlan_my_qpn
&
777 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK
)) &&
778 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
779 u16 vid
= be16_to_cpu(cqe
->sl_vid
);
781 __vlan_hwaccel_put_tag(gro_skb
, htons(ETH_P_8021Q
), vid
);
784 if (dev
->features
& NETIF_F_RXHASH
)
785 skb_set_hash(gro_skb
,
786 be32_to_cpu(cqe
->immed_rss_invalid
),
789 skb_record_rx_queue(gro_skb
, cq
->ring
);
790 skb_mark_napi_id(gro_skb
, &cq
->napi
);
792 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
793 timestamp
= mlx4_en_get_cqe_ts(cqe
);
794 mlx4_en_fill_hwtstamps(mdev
,
795 skb_hwtstamps(gro_skb
),
799 napi_gro_frags(&cq
->napi
);
803 /* GRO not possible, complete processing here */
804 ip_summed
= CHECKSUM_UNNECESSARY
;
806 ip_summed
= CHECKSUM_NONE
;
810 ip_summed
= CHECKSUM_NONE
;
814 skb
= mlx4_en_rx_skb(priv
, rx_desc
, frags
, length
);
816 priv
->stats
.rx_dropped
++;
820 if (unlikely(priv
->validate_loopback
)) {
821 validate_loopback(priv
, skb
);
825 skb
->ip_summed
= ip_summed
;
826 skb
->protocol
= eth_type_trans(skb
, dev
);
827 skb_record_rx_queue(skb
, cq
->ring
);
829 if (l2_tunnel
&& ip_summed
== CHECKSUM_UNNECESSARY
)
832 if (dev
->features
& NETIF_F_RXHASH
)
834 be32_to_cpu(cqe
->immed_rss_invalid
),
837 if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
838 MLX4_CQE_VLAN_PRESENT_MASK
) &&
839 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
840 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), be16_to_cpu(cqe
->sl_vid
));
842 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
843 timestamp
= mlx4_en_get_cqe_ts(cqe
);
844 mlx4_en_fill_hwtstamps(mdev
, skb_hwtstamps(skb
),
848 skb_mark_napi_id(skb
, &cq
->napi
);
850 if (!mlx4_en_cq_busy_polling(cq
))
851 napi_gro_receive(&cq
->napi
, skb
);
853 netif_receive_skb(skb
);
856 for (nr
= 0; nr
< priv
->num_frags
; nr
++)
857 mlx4_en_free_frag(priv
, frags
, nr
);
859 ++cq
->mcq
.cons_index
;
860 index
= (cq
->mcq
.cons_index
) & ring
->size_mask
;
861 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
862 if (++polled
== budget
)
867 AVG_PERF_COUNTER(priv
->pstats
.rx_coal_avg
, polled
);
868 mlx4_cq_set_ci(&cq
->mcq
);
869 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
870 ring
->cons
= cq
->mcq
.cons_index
;
871 mlx4_en_refill_rx_buffers(priv
, ring
);
872 mlx4_en_update_rx_prod_db(ring
);
877 void mlx4_en_rx_irq(struct mlx4_cq
*mcq
)
879 struct mlx4_en_cq
*cq
= container_of(mcq
, struct mlx4_en_cq
, mcq
);
880 struct mlx4_en_priv
*priv
= netdev_priv(cq
->dev
);
883 napi_schedule(&cq
->napi
);
885 mlx4_en_arm_cq(priv
, cq
);
888 /* Rx CQ polling - called by NAPI */
889 int mlx4_en_poll_rx_cq(struct napi_struct
*napi
, int budget
)
891 struct mlx4_en_cq
*cq
= container_of(napi
, struct mlx4_en_cq
, napi
);
892 struct net_device
*dev
= cq
->dev
;
893 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
896 if (!mlx4_en_cq_lock_napi(cq
))
899 done
= mlx4_en_process_rx_cq(dev
, cq
, budget
);
901 mlx4_en_cq_unlock_napi(cq
);
903 /* If we used up all the quota - we're probably not done yet... */
904 if (done
== budget
) {
906 const struct cpumask
*aff
;
908 INC_PERF_COUNTER(priv
->pstats
.napi_quota
);
910 cpu_curr
= smp_processor_id();
911 aff
= irq_desc_get_irq_data(cq
->irq_desc
)->affinity
;
913 if (unlikely(!cpumask_test_cpu(cpu_curr
, aff
))) {
914 /* Current cpu is not according to smp_irq_affinity -
915 * probably affinity changed. need to stop this NAPI
916 * poll, and restart it on the right CPU
919 mlx4_en_arm_cq(priv
, cq
);
925 mlx4_en_arm_cq(priv
, cq
);
930 static const int frag_sizes
[] = {
937 void mlx4_en_calc_rx_buf(struct net_device
*dev
)
939 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
940 int eff_mtu
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
;
944 while (buf_size
< eff_mtu
) {
945 priv
->frag_info
[i
].frag_size
=
946 (eff_mtu
> buf_size
+ frag_sizes
[i
]) ?
947 frag_sizes
[i
] : eff_mtu
- buf_size
;
948 priv
->frag_info
[i
].frag_prefix_size
= buf_size
;
950 priv
->frag_info
[i
].frag_align
= NET_IP_ALIGN
;
951 priv
->frag_info
[i
].frag_stride
=
952 ALIGN(frag_sizes
[i
] + NET_IP_ALIGN
, SMP_CACHE_BYTES
);
954 priv
->frag_info
[i
].frag_align
= 0;
955 priv
->frag_info
[i
].frag_stride
=
956 ALIGN(frag_sizes
[i
], SMP_CACHE_BYTES
);
958 buf_size
+= priv
->frag_info
[i
].frag_size
;
963 priv
->rx_skb_size
= eff_mtu
;
964 priv
->log_rx_info
= ROUNDUP_LOG2(i
* sizeof(struct mlx4_en_rx_alloc
));
966 en_dbg(DRV
, priv
, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
967 eff_mtu
, priv
->num_frags
);
968 for (i
= 0; i
< priv
->num_frags
; i
++) {
970 " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
972 priv
->frag_info
[i
].frag_size
,
973 priv
->frag_info
[i
].frag_prefix_size
,
974 priv
->frag_info
[i
].frag_align
,
975 priv
->frag_info
[i
].frag_stride
);
979 /* RSS related functions */
981 static int mlx4_en_config_rss_qp(struct mlx4_en_priv
*priv
, int qpn
,
982 struct mlx4_en_rx_ring
*ring
,
983 enum mlx4_qp_state
*state
,
986 struct mlx4_en_dev
*mdev
= priv
->mdev
;
987 struct mlx4_qp_context
*context
;
990 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
994 err
= mlx4_qp_alloc(mdev
->dev
, qpn
, qp
, GFP_KERNEL
);
996 en_err(priv
, "Failed to allocate qp #%x\n", qpn
);
999 qp
->event
= mlx4_en_sqp_event
;
1001 memset(context
, 0, sizeof *context
);
1002 mlx4_en_fill_qp_context(priv
, ring
->actual_size
, ring
->stride
, 0, 0,
1003 qpn
, ring
->cqn
, -1, context
);
1004 context
->db_rec_addr
= cpu_to_be64(ring
->wqres
.db
.dma
);
1006 /* Cancel FCS removal if FW allows */
1007 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
) {
1008 context
->param3
|= cpu_to_be32(1 << 29);
1009 ring
->fcs_del
= ETH_FCS_LEN
;
1013 err
= mlx4_qp_to_ready(mdev
->dev
, &ring
->wqres
.mtt
, context
, qp
, state
);
1015 mlx4_qp_remove(mdev
->dev
, qp
);
1016 mlx4_qp_free(mdev
->dev
, qp
);
1018 mlx4_en_update_rx_prod_db(ring
);
1024 int mlx4_en_create_drop_qp(struct mlx4_en_priv
*priv
)
1029 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, 1, 1, &qpn
);
1031 en_err(priv
, "Failed reserving drop qpn\n");
1034 err
= mlx4_qp_alloc(priv
->mdev
->dev
, qpn
, &priv
->drop_qp
, GFP_KERNEL
);
1036 en_err(priv
, "Failed allocating drop qp\n");
1037 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1044 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv
*priv
)
1048 qpn
= priv
->drop_qp
.qpn
;
1049 mlx4_qp_remove(priv
->mdev
->dev
, &priv
->drop_qp
);
1050 mlx4_qp_free(priv
->mdev
->dev
, &priv
->drop_qp
);
1051 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1054 /* Allocate rx qp's and configure them according to rss map */
1055 int mlx4_en_config_rss_steer(struct mlx4_en_priv
*priv
)
1057 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1058 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1059 struct mlx4_qp_context context
;
1060 struct mlx4_rss_context
*rss_context
;
1063 u8 rss_mask
= (MLX4_RSS_IPV4
| MLX4_RSS_TCP_IPV4
| MLX4_RSS_IPV6
|
1068 static const u32 rsskey
[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
1069 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
1070 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
1072 en_dbg(DRV
, priv
, "Configuring rss steering\n");
1073 err
= mlx4_qp_reserve_range(mdev
->dev
, priv
->rx_ring_num
,
1075 &rss_map
->base_qpn
);
1077 en_err(priv
, "Failed reserving %d qps\n", priv
->rx_ring_num
);
1081 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1082 qpn
= rss_map
->base_qpn
+ i
;
1083 err
= mlx4_en_config_rss_qp(priv
, qpn
, priv
->rx_ring
[i
],
1092 /* Configure RSS indirection qp */
1093 err
= mlx4_qp_alloc(mdev
->dev
, priv
->base_qpn
, &rss_map
->indir_qp
, GFP_KERNEL
);
1095 en_err(priv
, "Failed to allocate RSS indirection QP\n");
1098 rss_map
->indir_qp
.event
= mlx4_en_sqp_event
;
1099 mlx4_en_fill_qp_context(priv
, 0, 0, 0, 1, priv
->base_qpn
,
1100 priv
->rx_ring
[0]->cqn
, -1, &context
);
1102 if (!priv
->prof
->rss_rings
|| priv
->prof
->rss_rings
> priv
->rx_ring_num
)
1103 rss_rings
= priv
->rx_ring_num
;
1105 rss_rings
= priv
->prof
->rss_rings
;
1107 ptr
= ((void *) &context
) + offsetof(struct mlx4_qp_context
, pri_path
)
1108 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
1110 rss_context
->base_qpn
= cpu_to_be32(ilog2(rss_rings
) << 24 |
1111 (rss_map
->base_qpn
));
1112 rss_context
->default_qpn
= cpu_to_be32(rss_map
->base_qpn
);
1113 if (priv
->mdev
->profile
.udp_rss
) {
1114 rss_mask
|= MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
;
1115 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
1118 if (mdev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
1119 en_info(priv
, "Setting RSS context tunnel type to RSS on inner headers\n");
1120 rss_mask
|= MLX4_RSS_BY_INNER_HEADERS
;
1123 rss_context
->flags
= rss_mask
;
1124 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1125 for (i
= 0; i
< 10; i
++)
1126 rss_context
->rss_key
[i
] = cpu_to_be32(rsskey
[i
]);
1128 err
= mlx4_qp_to_ready(mdev
->dev
, &priv
->res
.mtt
, &context
,
1129 &rss_map
->indir_qp
, &rss_map
->indir_state
);
1136 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1137 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1138 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1139 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1141 for (i
= 0; i
< good_qps
; i
++) {
1142 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1143 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1144 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1145 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1147 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);
1151 void mlx4_en_release_rss_steer(struct mlx4_en_priv
*priv
)
1153 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1154 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1157 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1158 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1159 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1160 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1162 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1163 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1164 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1165 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1166 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1168 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);