2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
41 #define UPLINK_VPORT 0xFFFF
43 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
45 #define esw_info(dev, format, ...) \
46 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
48 #define esw_warn(dev, format, ...) \
49 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
51 #define esw_debug(dev, format, ...) \
52 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
60 /* E-Switch UC L2 table hash node */
62 struct l2addr_node node
;
67 /* E-Switch MC FDB table hash node */
68 struct esw_mc_addr
{ /* SRIOV only */
69 struct l2addr_node node
;
70 struct mlx5_flow_rule
*uplink_rule
; /* Forward to uplink rule */
74 /* Vport UC/MC hash node */
76 struct l2addr_node node
;
79 struct mlx5_flow_rule
*flow_rule
; /* SRIOV only */
80 /* A flag indicating that mac was added due to mc promiscuous vport */
85 UC_ADDR_CHANGE
= BIT(0),
86 MC_ADDR_CHANGE
= BIT(1),
87 PROMISC_CHANGE
= BIT(3),
90 /* Vport context events */
91 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
95 static int arm_vport_context_events_cmd(struct mlx5_core_dev
*dev
, u16 vport
,
98 int in
[MLX5_ST_SZ_DW(modify_nic_vport_context_in
)];
99 int out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
103 memset(out
, 0, sizeof(out
));
104 memset(in
, 0, sizeof(in
));
106 MLX5_SET(modify_nic_vport_context_in
, in
,
107 opcode
, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
108 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.change_event
, 1);
109 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
111 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, 1);
112 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
113 in
, nic_vport_context
);
115 MLX5_SET(nic_vport_context
, nic_vport_ctx
, arm_change_event
, 1);
117 if (events_mask
& UC_ADDR_CHANGE
)
118 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
119 event_on_uc_address_change
, 1);
120 if (events_mask
& MC_ADDR_CHANGE
)
121 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
122 event_on_mc_address_change
, 1);
123 if (events_mask
& PROMISC_CHANGE
)
124 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
125 event_on_promisc_change
, 1);
127 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
130 err
= mlx5_cmd_status_to_err_v2(out
);
138 /* E-Switch vport context HW commands */
139 static int query_esw_vport_context_cmd(struct mlx5_core_dev
*mdev
, u32 vport
,
140 u32
*out
, int outlen
)
142 u32 in
[MLX5_ST_SZ_DW(query_esw_vport_context_in
)];
144 memset(in
, 0, sizeof(in
));
146 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
147 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
);
149 MLX5_SET(query_esw_vport_context_in
, in
, vport_number
, vport
);
151 MLX5_SET(query_esw_vport_context_in
, in
, other_vport
, 1);
153 return mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
156 static int query_esw_vport_cvlan(struct mlx5_core_dev
*dev
, u32 vport
,
159 u32 out
[MLX5_ST_SZ_DW(query_esw_vport_context_out
)];
164 memset(out
, 0, sizeof(out
));
169 if (!MLX5_CAP_ESW(dev
, vport_cvlan_strip
) ||
170 !MLX5_CAP_ESW(dev
, vport_cvlan_insert_if_not_exist
))
173 err
= query_esw_vport_context_cmd(dev
, vport
, out
, sizeof(out
));
177 cvlan_strip
= MLX5_GET(query_esw_vport_context_out
, out
,
178 esw_vport_context
.vport_cvlan_strip
);
180 cvlan_insert
= MLX5_GET(query_esw_vport_context_out
, out
,
181 esw_vport_context
.vport_cvlan_insert
);
183 if (cvlan_strip
|| cvlan_insert
) {
184 *vlan
= MLX5_GET(query_esw_vport_context_out
, out
,
185 esw_vport_context
.cvlan_id
);
186 *qos
= MLX5_GET(query_esw_vport_context_out
, out
,
187 esw_vport_context
.cvlan_pcp
);
190 esw_debug(dev
, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
196 static int modify_esw_vport_context_cmd(struct mlx5_core_dev
*dev
, u16 vport
,
199 u32 out
[MLX5_ST_SZ_DW(modify_esw_vport_context_out
)];
201 memset(out
, 0, sizeof(out
));
203 MLX5_SET(modify_esw_vport_context_in
, in
, vport_number
, vport
);
205 MLX5_SET(modify_esw_vport_context_in
, in
, other_vport
, 1);
207 MLX5_SET(modify_esw_vport_context_in
, in
, opcode
,
208 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
);
210 return mlx5_cmd_exec_check_status(dev
, in
, inlen
,
214 static int modify_esw_vport_cvlan(struct mlx5_core_dev
*dev
, u32 vport
,
215 u16 vlan
, u8 qos
, bool set
)
217 u32 in
[MLX5_ST_SZ_DW(modify_esw_vport_context_in
)];
219 memset(in
, 0, sizeof(in
));
221 if (!MLX5_CAP_ESW(dev
, vport_cvlan_strip
) ||
222 !MLX5_CAP_ESW(dev
, vport_cvlan_insert_if_not_exist
))
225 esw_debug(dev
, "Set Vport[%d] VLAN %d qos %d set=%d\n",
226 vport
, vlan
, qos
, set
);
229 MLX5_SET(modify_esw_vport_context_in
, in
,
230 esw_vport_context
.vport_cvlan_strip
, 1);
231 /* insert only if no vlan in packet */
232 MLX5_SET(modify_esw_vport_context_in
, in
,
233 esw_vport_context
.vport_cvlan_insert
, 1);
234 MLX5_SET(modify_esw_vport_context_in
, in
,
235 esw_vport_context
.cvlan_pcp
, qos
);
236 MLX5_SET(modify_esw_vport_context_in
, in
,
237 esw_vport_context
.cvlan_id
, vlan
);
240 MLX5_SET(modify_esw_vport_context_in
, in
,
241 field_select
.vport_cvlan_strip
, 1);
242 MLX5_SET(modify_esw_vport_context_in
, in
,
243 field_select
.vport_cvlan_insert
, 1);
245 return modify_esw_vport_context_cmd(dev
, vport
, in
, sizeof(in
));
248 /* HW L2 Table (MPFS) management */
249 static int set_l2_table_entry_cmd(struct mlx5_core_dev
*dev
, u32 index
,
250 u8
*mac
, u8 vlan_valid
, u16 vlan
)
252 u32 in
[MLX5_ST_SZ_DW(set_l2_table_entry_in
)];
253 u32 out
[MLX5_ST_SZ_DW(set_l2_table_entry_out
)];
256 memset(in
, 0, sizeof(in
));
257 memset(out
, 0, sizeof(out
));
259 MLX5_SET(set_l2_table_entry_in
, in
, opcode
,
260 MLX5_CMD_OP_SET_L2_TABLE_ENTRY
);
261 MLX5_SET(set_l2_table_entry_in
, in
, table_index
, index
);
262 MLX5_SET(set_l2_table_entry_in
, in
, vlan_valid
, vlan_valid
);
263 MLX5_SET(set_l2_table_entry_in
, in
, vlan
, vlan
);
265 in_mac_addr
= MLX5_ADDR_OF(set_l2_table_entry_in
, in
, mac_address
);
266 ether_addr_copy(&in_mac_addr
[2], mac
);
268 return mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
),
272 static int del_l2_table_entry_cmd(struct mlx5_core_dev
*dev
, u32 index
)
274 u32 in
[MLX5_ST_SZ_DW(delete_l2_table_entry_in
)];
275 u32 out
[MLX5_ST_SZ_DW(delete_l2_table_entry_out
)];
277 memset(in
, 0, sizeof(in
));
278 memset(out
, 0, sizeof(out
));
280 MLX5_SET(delete_l2_table_entry_in
, in
, opcode
,
281 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
);
282 MLX5_SET(delete_l2_table_entry_in
, in
, table_index
, index
);
283 return mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
),
287 static int alloc_l2_table_index(struct mlx5_l2_table
*l2_table
, u32
*ix
)
291 *ix
= find_first_zero_bit(l2_table
->bitmap
, l2_table
->size
);
292 if (*ix
>= l2_table
->size
)
295 __set_bit(*ix
, l2_table
->bitmap
);
300 static void free_l2_table_index(struct mlx5_l2_table
*l2_table
, u32 ix
)
302 __clear_bit(ix
, l2_table
->bitmap
);
305 static int set_l2_table_entry(struct mlx5_core_dev
*dev
, u8
*mac
,
306 u8 vlan_valid
, u16 vlan
,
309 struct mlx5_l2_table
*l2_table
= &dev
->priv
.eswitch
->l2_table
;
312 err
= alloc_l2_table_index(l2_table
, index
);
316 err
= set_l2_table_entry_cmd(dev
, *index
, mac
, vlan_valid
, vlan
);
318 free_l2_table_index(l2_table
, *index
);
323 static void del_l2_table_entry(struct mlx5_core_dev
*dev
, u32 index
)
325 struct mlx5_l2_table
*l2_table
= &dev
->priv
.eswitch
->l2_table
;
327 del_l2_table_entry_cmd(dev
, index
);
328 free_l2_table_index(l2_table
, index
);
332 static struct mlx5_flow_rule
*
333 __esw_fdb_set_vport_rule(struct mlx5_eswitch
*esw
, u32 vport
, bool rx_rule
,
334 u8 mac_c
[ETH_ALEN
], u8 mac_v
[ETH_ALEN
])
336 int match_header
= (is_zero_ether_addr(mac_c
) ? 0 :
337 MLX5_MATCH_OUTER_HEADERS
);
338 struct mlx5_flow_rule
*flow_rule
= NULL
;
339 struct mlx5_flow_destination dest
;
340 void *mv_misc
= NULL
;
341 void *mc_misc
= NULL
;
348 match_header
|= MLX5_MATCH_MISC_PARAMETERS
;
349 match_v
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
350 match_c
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
351 if (!match_v
|| !match_c
) {
352 pr_warn("FDB: Failed to alloc match parameters\n");
356 dmac_v
= MLX5_ADDR_OF(fte_match_param
, match_v
,
357 outer_headers
.dmac_47_16
);
358 dmac_c
= MLX5_ADDR_OF(fte_match_param
, match_c
,
359 outer_headers
.dmac_47_16
);
361 if (match_header
& MLX5_MATCH_OUTER_HEADERS
) {
362 ether_addr_copy(dmac_v
, mac_v
);
363 ether_addr_copy(dmac_c
, mac_c
);
366 if (match_header
& MLX5_MATCH_MISC_PARAMETERS
) {
367 mv_misc
= MLX5_ADDR_OF(fte_match_param
, match_v
, misc_parameters
);
368 mc_misc
= MLX5_ADDR_OF(fte_match_param
, match_c
, misc_parameters
);
369 MLX5_SET(fte_match_set_misc
, mv_misc
, source_port
, UPLINK_VPORT
);
370 MLX5_SET_TO_ONES(fte_match_set_misc
, mc_misc
, source_port
);
373 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
374 dest
.vport_num
= vport
;
377 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
378 dmac_v
, dmac_c
, vport
);
380 mlx5_add_flow_rule(esw
->fdb_table
.fdb
,
384 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
386 if (IS_ERR_OR_NULL(flow_rule
)) {
388 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
389 dmac_v
, dmac_c
, vport
, PTR_ERR(flow_rule
));
398 static struct mlx5_flow_rule
*
399 esw_fdb_set_vport_rule(struct mlx5_eswitch
*esw
, u8 mac
[ETH_ALEN
], u32 vport
)
403 eth_broadcast_addr(mac_c
);
404 return __esw_fdb_set_vport_rule(esw
, vport
, false, mac_c
, mac
);
407 static struct mlx5_flow_rule
*
408 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch
*esw
, u32 vport
)
413 eth_zero_addr(mac_c
);
414 eth_zero_addr(mac_v
);
417 return __esw_fdb_set_vport_rule(esw
, vport
, false, mac_c
, mac_v
);
420 static struct mlx5_flow_rule
*
421 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch
*esw
, u32 vport
)
426 eth_zero_addr(mac_c
);
427 eth_zero_addr(mac_v
);
428 return __esw_fdb_set_vport_rule(esw
, vport
, true, mac_c
, mac_v
);
431 static int esw_create_fdb_table(struct mlx5_eswitch
*esw
, int nvports
)
433 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
434 struct mlx5_core_dev
*dev
= esw
->dev
;
435 struct mlx5_flow_namespace
*root_ns
;
436 struct mlx5_flow_table
*fdb
;
437 struct mlx5_flow_group
*g
;
438 void *match_criteria
;
444 esw_debug(dev
, "Create FDB log_max_size(%d)\n",
445 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
));
447 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
449 esw_warn(dev
, "Failed to get FDB flow namespace\n");
453 flow_group_in
= mlx5_vzalloc(inlen
);
456 memset(flow_group_in
, 0, inlen
);
458 table_size
= BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
));
459 fdb
= mlx5_create_flow_table(root_ns
, 0, table_size
, 0);
460 if (IS_ERR_OR_NULL(fdb
)) {
462 esw_warn(dev
, "Failed to create FDB Table err %d\n", err
);
465 esw
->fdb_table
.fdb
= fdb
;
467 /* Addresses group : Full match unicast/multicast addresses */
468 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
469 MLX5_MATCH_OUTER_HEADERS
);
470 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
471 dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
, outer_headers
.dmac_47_16
);
472 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
473 /* Preserve 2 entries for allmulti and promisc rules*/
474 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, table_size
- 3);
475 eth_broadcast_addr(dmac
);
476 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
477 if (IS_ERR_OR_NULL(g
)) {
479 esw_warn(dev
, "Failed to create flow group err(%d)\n", err
);
482 esw
->fdb_table
.addr_grp
= g
;
484 /* Allmulti group : One rule that forwards any mcast traffic */
485 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
486 MLX5_MATCH_OUTER_HEADERS
);
487 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, table_size
- 2);
488 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, table_size
- 2);
491 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
492 if (IS_ERR_OR_NULL(g
)) {
494 esw_warn(dev
, "Failed to create allmulti flow group err(%d)\n", err
);
497 esw
->fdb_table
.allmulti_grp
= g
;
499 /* Promiscuous group :
500 * One rule that forward all unmatched traffic from previous groups
503 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
504 MLX5_MATCH_MISC_PARAMETERS
);
505 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
506 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, table_size
- 1);
507 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, table_size
- 1);
508 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
509 if (IS_ERR_OR_NULL(g
)) {
511 esw_warn(dev
, "Failed to create promisc flow group err(%d)\n", err
);
514 esw
->fdb_table
.promisc_grp
= g
;
518 if (!IS_ERR_OR_NULL(esw
->fdb_table
.allmulti_grp
)) {
519 mlx5_destroy_flow_group(esw
->fdb_table
.allmulti_grp
);
520 esw
->fdb_table
.allmulti_grp
= NULL
;
522 if (!IS_ERR_OR_NULL(esw
->fdb_table
.addr_grp
)) {
523 mlx5_destroy_flow_group(esw
->fdb_table
.addr_grp
);
524 esw
->fdb_table
.addr_grp
= NULL
;
526 if (!IS_ERR_OR_NULL(esw
->fdb_table
.fdb
)) {
527 mlx5_destroy_flow_table(esw
->fdb_table
.fdb
);
528 esw
->fdb_table
.fdb
= NULL
;
532 kfree(flow_group_in
);
536 static void esw_destroy_fdb_table(struct mlx5_eswitch
*esw
)
538 if (!esw
->fdb_table
.fdb
)
541 esw_debug(esw
->dev
, "Destroy FDB Table\n");
542 mlx5_destroy_flow_group(esw
->fdb_table
.promisc_grp
);
543 mlx5_destroy_flow_group(esw
->fdb_table
.allmulti_grp
);
544 mlx5_destroy_flow_group(esw
->fdb_table
.addr_grp
);
545 mlx5_destroy_flow_table(esw
->fdb_table
.fdb
);
546 esw
->fdb_table
.fdb
= NULL
;
547 esw
->fdb_table
.addr_grp
= NULL
;
548 esw
->fdb_table
.allmulti_grp
= NULL
;
549 esw
->fdb_table
.promisc_grp
= NULL
;
552 /* E-Switch vport UC/MC lists management */
553 typedef int (*vport_addr_action
)(struct mlx5_eswitch
*esw
,
554 struct vport_addr
*vaddr
);
556 static int esw_add_uc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
558 struct hlist_head
*hash
= esw
->l2_table
.l2_hash
;
559 struct esw_uc_addr
*esw_uc
;
560 u8
*mac
= vaddr
->node
.addr
;
561 u32 vport
= vaddr
->vport
;
564 esw_uc
= l2addr_hash_find(hash
, mac
, struct esw_uc_addr
);
567 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
568 mac
, vport
, esw_uc
->vport
);
572 esw_uc
= l2addr_hash_add(hash
, mac
, struct esw_uc_addr
, GFP_KERNEL
);
575 esw_uc
->vport
= vport
;
577 err
= set_l2_table_entry(esw
->dev
, mac
, 0, 0, &esw_uc
->table_index
);
581 if (esw
->fdb_table
.fdb
) /* SRIOV is enabled: Forward UC MAC to vport */
582 vaddr
->flow_rule
= esw_fdb_set_vport_rule(esw
, mac
, vport
);
584 esw_debug(esw
->dev
, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
585 vport
, mac
, esw_uc
->table_index
, vaddr
->flow_rule
);
588 l2addr_hash_del(esw_uc
);
592 static int esw_del_uc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
594 struct hlist_head
*hash
= esw
->l2_table
.l2_hash
;
595 struct esw_uc_addr
*esw_uc
;
596 u8
*mac
= vaddr
->node
.addr
;
597 u32 vport
= vaddr
->vport
;
599 esw_uc
= l2addr_hash_find(hash
, mac
, struct esw_uc_addr
);
600 if (!esw_uc
|| esw_uc
->vport
!= vport
) {
602 "MAC(%pM) doesn't belong to vport (%d)\n",
606 esw_debug(esw
->dev
, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
607 vport
, mac
, esw_uc
->table_index
, vaddr
->flow_rule
);
609 del_l2_table_entry(esw
->dev
, esw_uc
->table_index
);
611 if (vaddr
->flow_rule
)
612 mlx5_del_flow_rule(vaddr
->flow_rule
);
613 vaddr
->flow_rule
= NULL
;
615 l2addr_hash_del(esw_uc
);
619 static void update_allmulti_vports(struct mlx5_eswitch
*esw
,
620 struct vport_addr
*vaddr
,
621 struct esw_mc_addr
*esw_mc
)
623 u8
*mac
= vaddr
->node
.addr
;
626 for (vport_idx
= 0; vport_idx
< esw
->total_vports
; vport_idx
++) {
627 struct mlx5_vport
*vport
= &esw
->vports
[vport_idx
];
628 struct hlist_head
*vport_hash
= vport
->mc_list
;
629 struct vport_addr
*iter_vaddr
=
630 l2addr_hash_find(vport_hash
,
633 if (IS_ERR_OR_NULL(vport
->allmulti_rule
) ||
634 vaddr
->vport
== vport_idx
)
636 switch (vaddr
->action
) {
637 case MLX5_ACTION_ADD
:
640 iter_vaddr
= l2addr_hash_add(vport_hash
, mac
,
645 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
649 iter_vaddr
->vport
= vport_idx
;
650 iter_vaddr
->flow_rule
=
651 esw_fdb_set_vport_rule(esw
,
655 case MLX5_ACTION_DEL
:
658 mlx5_del_flow_rule(iter_vaddr
->flow_rule
);
659 l2addr_hash_del(iter_vaddr
);
665 static int esw_add_mc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
667 struct hlist_head
*hash
= esw
->mc_table
;
668 struct esw_mc_addr
*esw_mc
;
669 u8
*mac
= vaddr
->node
.addr
;
670 u32 vport
= vaddr
->vport
;
672 if (!esw
->fdb_table
.fdb
)
675 esw_mc
= l2addr_hash_find(hash
, mac
, struct esw_mc_addr
);
679 esw_mc
= l2addr_hash_add(hash
, mac
, struct esw_mc_addr
, GFP_KERNEL
);
683 esw_mc
->uplink_rule
= /* Forward MC MAC to Uplink */
684 esw_fdb_set_vport_rule(esw
, mac
, UPLINK_VPORT
);
686 /* Add this multicast mac to all the mc promiscuous vports */
687 update_allmulti_vports(esw
, vaddr
, esw_mc
);
690 /* If the multicast mac is added as a result of mc promiscuous vport,
691 * don't increment the multicast ref count
693 if (!vaddr
->mc_promisc
)
696 /* Forward MC MAC to vport */
697 vaddr
->flow_rule
= esw_fdb_set_vport_rule(esw
, mac
, vport
);
699 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
700 vport
, mac
, vaddr
->flow_rule
,
701 esw_mc
->refcnt
, esw_mc
->uplink_rule
);
705 static int esw_del_mc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
707 struct hlist_head
*hash
= esw
->mc_table
;
708 struct esw_mc_addr
*esw_mc
;
709 u8
*mac
= vaddr
->node
.addr
;
710 u32 vport
= vaddr
->vport
;
712 if (!esw
->fdb_table
.fdb
)
715 esw_mc
= l2addr_hash_find(hash
, mac
, struct esw_mc_addr
);
718 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
723 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
724 vport
, mac
, vaddr
->flow_rule
, esw_mc
->refcnt
,
725 esw_mc
->uplink_rule
);
727 if (vaddr
->flow_rule
)
728 mlx5_del_flow_rule(vaddr
->flow_rule
);
729 vaddr
->flow_rule
= NULL
;
731 /* If the multicast mac is added as a result of mc promiscuous vport,
732 * don't decrement the multicast ref count.
734 if (vaddr
->mc_promisc
|| (--esw_mc
->refcnt
> 0))
737 /* Remove this multicast mac from all the mc promiscuous vports */
738 update_allmulti_vports(esw
, vaddr
, esw_mc
);
740 if (esw_mc
->uplink_rule
)
741 mlx5_del_flow_rule(esw_mc
->uplink_rule
);
743 l2addr_hash_del(esw_mc
);
747 /* Apply vport UC/MC list to HW l2 table and FDB table */
748 static void esw_apply_vport_addr_list(struct mlx5_eswitch
*esw
,
749 u32 vport_num
, int list_type
)
751 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
752 bool is_uc
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
;
753 vport_addr_action vport_addr_add
;
754 vport_addr_action vport_addr_del
;
755 struct vport_addr
*addr
;
756 struct l2addr_node
*node
;
757 struct hlist_head
*hash
;
758 struct hlist_node
*tmp
;
761 vport_addr_add
= is_uc
? esw_add_uc_addr
:
763 vport_addr_del
= is_uc
? esw_del_uc_addr
:
766 hash
= is_uc
? vport
->uc_list
: vport
->mc_list
;
767 for_each_l2hash_node(node
, tmp
, hash
, hi
) {
768 addr
= container_of(node
, struct vport_addr
, node
);
769 switch (addr
->action
) {
770 case MLX5_ACTION_ADD
:
771 vport_addr_add(esw
, addr
);
772 addr
->action
= MLX5_ACTION_NONE
;
774 case MLX5_ACTION_DEL
:
775 vport_addr_del(esw
, addr
);
776 l2addr_hash_del(addr
);
782 /* Sync vport UC/MC list from vport context */
783 static void esw_update_vport_addr_list(struct mlx5_eswitch
*esw
,
784 u32 vport_num
, int list_type
)
786 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
787 bool is_uc
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
;
788 u8 (*mac_list
)[ETH_ALEN
];
789 struct l2addr_node
*node
;
790 struct vport_addr
*addr
;
791 struct hlist_head
*hash
;
792 struct hlist_node
*tmp
;
798 size
= is_uc
? MLX5_MAX_UC_PER_VPORT(esw
->dev
) :
799 MLX5_MAX_MC_PER_VPORT(esw
->dev
);
801 mac_list
= kcalloc(size
, ETH_ALEN
, GFP_KERNEL
);
805 hash
= is_uc
? vport
->uc_list
: vport
->mc_list
;
807 for_each_l2hash_node(node
, tmp
, hash
, hi
) {
808 addr
= container_of(node
, struct vport_addr
, node
);
809 addr
->action
= MLX5_ACTION_DEL
;
815 err
= mlx5_query_nic_vport_mac_list(esw
->dev
, vport_num
, list_type
,
819 esw_debug(esw
->dev
, "vport[%d] context update %s list size (%d)\n",
820 vport_num
, is_uc
? "UC" : "MC", size
);
822 for (i
= 0; i
< size
; i
++) {
823 if (is_uc
&& !is_valid_ether_addr(mac_list
[i
]))
826 if (!is_uc
&& !is_multicast_ether_addr(mac_list
[i
]))
829 addr
= l2addr_hash_find(hash
, mac_list
[i
], struct vport_addr
);
831 addr
->action
= MLX5_ACTION_NONE
;
832 /* If this mac was previously added because of allmulti
833 * promiscuous rx mode, its now converted to be original
836 if (addr
->mc_promisc
) {
837 struct esw_mc_addr
*esw_mc
=
838 l2addr_hash_find(esw
->mc_table
,
843 "Failed to MAC(%pM) in mcast DB\n",
848 addr
->mc_promisc
= false;
853 addr
= l2addr_hash_add(hash
, mac_list
[i
], struct vport_addr
,
857 "Failed to add MAC(%pM) to vport[%d] DB\n",
858 mac_list
[i
], vport_num
);
861 addr
->vport
= vport_num
;
862 addr
->action
= MLX5_ACTION_ADD
;
868 /* Sync vport UC/MC list from vport context
869 * Must be called after esw_update_vport_addr_list
871 static void esw_update_vport_mc_promisc(struct mlx5_eswitch
*esw
, u32 vport_num
)
873 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
874 struct l2addr_node
*node
;
875 struct vport_addr
*addr
;
876 struct hlist_head
*hash
;
877 struct hlist_node
*tmp
;
880 hash
= vport
->mc_list
;
882 for_each_l2hash_node(node
, tmp
, esw
->mc_table
, hi
) {
883 u8
*mac
= node
->addr
;
885 addr
= l2addr_hash_find(hash
, mac
, struct vport_addr
);
887 if (addr
->action
== MLX5_ACTION_DEL
)
888 addr
->action
= MLX5_ACTION_NONE
;
891 addr
= l2addr_hash_add(hash
, mac
, struct vport_addr
,
895 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
899 addr
->vport
= vport_num
;
900 addr
->action
= MLX5_ACTION_ADD
;
901 addr
->mc_promisc
= true;
905 /* Apply vport rx mode to HW FDB table */
906 static void esw_apply_vport_rx_mode(struct mlx5_eswitch
*esw
, u32 vport_num
,
907 bool promisc
, bool mc_promisc
)
909 struct esw_mc_addr
*allmulti_addr
= esw
->mc_promisc
;
910 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
912 if (IS_ERR_OR_NULL(vport
->allmulti_rule
) != mc_promisc
)
916 vport
->allmulti_rule
=
917 esw_fdb_set_vport_allmulti_rule(esw
, vport_num
);
918 if (!allmulti_addr
->uplink_rule
)
919 allmulti_addr
->uplink_rule
=
920 esw_fdb_set_vport_allmulti_rule(esw
,
922 allmulti_addr
->refcnt
++;
923 } else if (vport
->allmulti_rule
) {
924 mlx5_del_flow_rule(vport
->allmulti_rule
);
925 vport
->allmulti_rule
= NULL
;
927 if (--allmulti_addr
->refcnt
> 0)
930 if (allmulti_addr
->uplink_rule
)
931 mlx5_del_flow_rule(allmulti_addr
->uplink_rule
);
932 allmulti_addr
->uplink_rule
= NULL
;
936 if (IS_ERR_OR_NULL(vport
->promisc_rule
) != promisc
)
940 vport
->promisc_rule
= esw_fdb_set_vport_promisc_rule(esw
,
942 } else if (vport
->promisc_rule
) {
943 mlx5_del_flow_rule(vport
->promisc_rule
);
944 vport
->promisc_rule
= NULL
;
948 /* Sync vport rx mode from vport context */
949 static void esw_update_vport_rx_mode(struct mlx5_eswitch
*esw
, u32 vport_num
)
951 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
957 err
= mlx5_query_nic_vport_promisc(esw
->dev
,
964 esw_debug(esw
->dev
, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
965 vport_num
, promisc_all
, promisc_mc
);
967 if (!vport
->trusted
|| !vport
->enabled
) {
973 esw_apply_vport_rx_mode(esw
, vport_num
, promisc_all
,
974 (promisc_all
|| promisc_mc
));
977 static void esw_vport_change_handle_locked(struct mlx5_vport
*vport
)
979 struct mlx5_core_dev
*dev
= vport
->dev
;
980 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
983 mlx5_query_nic_vport_mac_address(dev
, vport
->vport
, mac
);
984 esw_debug(dev
, "vport[%d] Context Changed: perm mac: %pM\n",
987 if (vport
->enabled_events
& UC_ADDR_CHANGE
) {
988 esw_update_vport_addr_list(esw
, vport
->vport
,
989 MLX5_NVPRT_LIST_TYPE_UC
);
990 esw_apply_vport_addr_list(esw
, vport
->vport
,
991 MLX5_NVPRT_LIST_TYPE_UC
);
994 if (vport
->enabled_events
& MC_ADDR_CHANGE
) {
995 esw_update_vport_addr_list(esw
, vport
->vport
,
996 MLX5_NVPRT_LIST_TYPE_MC
);
999 if (vport
->enabled_events
& PROMISC_CHANGE
) {
1000 esw_update_vport_rx_mode(esw
, vport
->vport
);
1001 if (!IS_ERR_OR_NULL(vport
->allmulti_rule
))
1002 esw_update_vport_mc_promisc(esw
, vport
->vport
);
1005 if (vport
->enabled_events
& (PROMISC_CHANGE
| MC_ADDR_CHANGE
)) {
1006 esw_apply_vport_addr_list(esw
, vport
->vport
,
1007 MLX5_NVPRT_LIST_TYPE_MC
);
1010 esw_debug(esw
->dev
, "vport[%d] Context Changed: Done\n", vport
->vport
);
1012 arm_vport_context_events_cmd(dev
, vport
->vport
,
1013 vport
->enabled_events
);
1016 static void esw_vport_change_handler(struct work_struct
*work
)
1018 struct mlx5_vport
*vport
=
1019 container_of(work
, struct mlx5_vport
, vport_change_handler
);
1020 struct mlx5_eswitch
*esw
= vport
->dev
->priv
.eswitch
;
1022 mutex_lock(&esw
->state_lock
);
1023 esw_vport_change_handle_locked(vport
);
1024 mutex_unlock(&esw
->state_lock
);
1027 static void esw_vport_enable_egress_acl(struct mlx5_eswitch
*esw
,
1028 struct mlx5_vport
*vport
)
1030 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1031 struct mlx5_flow_group
*vlan_grp
= NULL
;
1032 struct mlx5_flow_group
*drop_grp
= NULL
;
1033 struct mlx5_core_dev
*dev
= esw
->dev
;
1034 struct mlx5_flow_namespace
*root_ns
;
1035 struct mlx5_flow_table
*acl
;
1036 void *match_criteria
;
1038 /* The egress acl table contains 2 rules:
1039 * 1)Allow traffic with vlan_tag=vst_vlan_id
1040 * 2)Drop all other traffic.
1045 if (!MLX5_CAP_ESW_EGRESS_ACL(dev
, ft_support
) ||
1046 !IS_ERR_OR_NULL(vport
->egress
.acl
))
1049 esw_debug(dev
, "Create vport[%d] egress ACL log_max_size(%d)\n",
1050 vport
->vport
, MLX5_CAP_ESW_EGRESS_ACL(dev
, log_max_ft_size
));
1052 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_ESW_EGRESS
);
1054 esw_warn(dev
, "Failed to get E-Switch egress flow namespace\n");
1058 flow_group_in
= mlx5_vzalloc(inlen
);
1062 acl
= mlx5_create_vport_flow_table(root_ns
, 0, table_size
, 0, vport
->vport
);
1063 if (IS_ERR_OR_NULL(acl
)) {
1065 esw_warn(dev
, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
1070 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1071 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1072 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.vlan_tag
);
1073 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.first_vid
);
1074 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1075 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 0);
1077 vlan_grp
= mlx5_create_flow_group(acl
, flow_group_in
);
1078 if (IS_ERR_OR_NULL(vlan_grp
)) {
1079 err
= PTR_ERR(vlan_grp
);
1080 esw_warn(dev
, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
1085 memset(flow_group_in
, 0, inlen
);
1086 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 1);
1087 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 1);
1088 drop_grp
= mlx5_create_flow_group(acl
, flow_group_in
);
1089 if (IS_ERR_OR_NULL(drop_grp
)) {
1090 err
= PTR_ERR(drop_grp
);
1091 esw_warn(dev
, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
1096 vport
->egress
.acl
= acl
;
1097 vport
->egress
.drop_grp
= drop_grp
;
1098 vport
->egress
.allowed_vlans_grp
= vlan_grp
;
1100 kfree(flow_group_in
);
1101 if (err
&& !IS_ERR_OR_NULL(vlan_grp
))
1102 mlx5_destroy_flow_group(vlan_grp
);
1103 if (err
&& !IS_ERR_OR_NULL(acl
))
1104 mlx5_destroy_flow_table(acl
);
1107 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch
*esw
,
1108 struct mlx5_vport
*vport
)
1110 if (!IS_ERR_OR_NULL(vport
->egress
.allowed_vlan
))
1111 mlx5_del_flow_rule(vport
->egress
.allowed_vlan
);
1113 if (!IS_ERR_OR_NULL(vport
->egress
.drop_rule
))
1114 mlx5_del_flow_rule(vport
->egress
.drop_rule
);
1116 vport
->egress
.allowed_vlan
= NULL
;
1117 vport
->egress
.drop_rule
= NULL
;
1120 static void esw_vport_disable_egress_acl(struct mlx5_eswitch
*esw
,
1121 struct mlx5_vport
*vport
)
1123 if (IS_ERR_OR_NULL(vport
->egress
.acl
))
1126 esw_debug(esw
->dev
, "Destroy vport[%d] E-Switch egress ACL\n", vport
->vport
);
1128 esw_vport_cleanup_egress_rules(esw
, vport
);
1129 mlx5_destroy_flow_group(vport
->egress
.allowed_vlans_grp
);
1130 mlx5_destroy_flow_group(vport
->egress
.drop_grp
);
1131 mlx5_destroy_flow_table(vport
->egress
.acl
);
1132 vport
->egress
.allowed_vlans_grp
= NULL
;
1133 vport
->egress
.drop_grp
= NULL
;
1134 vport
->egress
.acl
= NULL
;
1137 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch
*esw
,
1138 struct mlx5_vport
*vport
)
1140 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1141 struct mlx5_core_dev
*dev
= esw
->dev
;
1142 struct mlx5_flow_namespace
*root_ns
;
1143 struct mlx5_flow_table
*acl
;
1144 struct mlx5_flow_group
*g
;
1145 void *match_criteria
;
1147 /* The ingress acl table contains 4 groups
1148 * (2 active rules at the same time -
1149 * 1 allow rule from one of the first 3 groups.
1150 * 1 drop rule from the last group):
1151 * 1)Allow untagged traffic with smac=original mac.
1152 * 2)Allow untagged traffic.
1153 * 3)Allow traffic with smac=original mac.
1154 * 4)Drop all other traffic.
1159 if (!MLX5_CAP_ESW_INGRESS_ACL(dev
, ft_support
) ||
1160 !IS_ERR_OR_NULL(vport
->ingress
.acl
))
1163 esw_debug(dev
, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1164 vport
->vport
, MLX5_CAP_ESW_INGRESS_ACL(dev
, log_max_ft_size
));
1166 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_ESW_INGRESS
);
1168 esw_warn(dev
, "Failed to get E-Switch ingress flow namespace\n");
1172 flow_group_in
= mlx5_vzalloc(inlen
);
1176 acl
= mlx5_create_vport_flow_table(root_ns
, 0, table_size
, 0, vport
->vport
);
1177 if (IS_ERR_OR_NULL(acl
)) {
1179 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1183 vport
->ingress
.acl
= acl
;
1185 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1187 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1188 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.vlan_tag
);
1189 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_47_16
);
1190 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_15_0
);
1191 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1192 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 0);
1194 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1195 if (IS_ERR_OR_NULL(g
)) {
1197 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1201 vport
->ingress
.allow_untagged_spoofchk_grp
= g
;
1203 memset(flow_group_in
, 0, inlen
);
1204 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1205 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.vlan_tag
);
1206 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 1);
1207 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 1);
1209 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1210 if (IS_ERR_OR_NULL(g
)) {
1212 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1216 vport
->ingress
.allow_untagged_only_grp
= g
;
1218 memset(flow_group_in
, 0, inlen
);
1219 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1220 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_47_16
);
1221 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_15_0
);
1222 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 2);
1223 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 2);
1225 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1226 if (IS_ERR_OR_NULL(g
)) {
1228 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1232 vport
->ingress
.allow_spoofchk_only_grp
= g
;
1234 memset(flow_group_in
, 0, inlen
);
1235 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 3);
1236 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 3);
1238 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1239 if (IS_ERR_OR_NULL(g
)) {
1241 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1245 vport
->ingress
.drop_grp
= g
;
1249 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_spoofchk_only_grp
))
1250 mlx5_destroy_flow_group(
1251 vport
->ingress
.allow_spoofchk_only_grp
);
1252 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_untagged_only_grp
))
1253 mlx5_destroy_flow_group(
1254 vport
->ingress
.allow_untagged_only_grp
);
1255 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_untagged_spoofchk_grp
))
1256 mlx5_destroy_flow_group(
1257 vport
->ingress
.allow_untagged_spoofchk_grp
);
1258 if (!IS_ERR_OR_NULL(vport
->ingress
.acl
))
1259 mlx5_destroy_flow_table(vport
->ingress
.acl
);
1262 kfree(flow_group_in
);
1265 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch
*esw
,
1266 struct mlx5_vport
*vport
)
1268 if (!IS_ERR_OR_NULL(vport
->ingress
.drop_rule
))
1269 mlx5_del_flow_rule(vport
->ingress
.drop_rule
);
1271 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_rule
))
1272 mlx5_del_flow_rule(vport
->ingress
.allow_rule
);
1274 vport
->ingress
.drop_rule
= NULL
;
1275 vport
->ingress
.allow_rule
= NULL
;
1278 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch
*esw
,
1279 struct mlx5_vport
*vport
)
1281 if (IS_ERR_OR_NULL(vport
->ingress
.acl
))
1284 esw_debug(esw
->dev
, "Destroy vport[%d] E-Switch ingress ACL\n", vport
->vport
);
1286 esw_vport_cleanup_ingress_rules(esw
, vport
);
1287 mlx5_destroy_flow_group(vport
->ingress
.allow_spoofchk_only_grp
);
1288 mlx5_destroy_flow_group(vport
->ingress
.allow_untagged_only_grp
);
1289 mlx5_destroy_flow_group(vport
->ingress
.allow_untagged_spoofchk_grp
);
1290 mlx5_destroy_flow_group(vport
->ingress
.drop_grp
);
1291 mlx5_destroy_flow_table(vport
->ingress
.acl
);
1292 vport
->ingress
.acl
= NULL
;
1293 vport
->ingress
.drop_grp
= NULL
;
1294 vport
->ingress
.allow_spoofchk_only_grp
= NULL
;
1295 vport
->ingress
.allow_untagged_only_grp
= NULL
;
1296 vport
->ingress
.allow_untagged_spoofchk_grp
= NULL
;
1299 static int esw_vport_ingress_config(struct mlx5_eswitch
*esw
,
1300 struct mlx5_vport
*vport
)
1308 if (vport
->spoofchk
) {
1309 err
= mlx5_query_nic_vport_mac_address(esw
->dev
, vport
->vport
, smac
);
1312 "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
1317 if (!is_valid_ether_addr(smac
)) {
1318 mlx5_core_warn(esw
->dev
,
1319 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1325 esw_vport_cleanup_ingress_rules(esw
, vport
);
1327 if (!vport
->vlan
&& !vport
->qos
&& !vport
->spoofchk
) {
1328 esw_vport_disable_ingress_acl(esw
, vport
);
1332 esw_vport_enable_ingress_acl(esw
, vport
);
1335 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1336 vport
->vport
, vport
->vlan
, vport
->qos
);
1338 match_v
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
1339 match_c
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
1340 if (!match_v
|| !match_c
) {
1342 esw_warn(esw
->dev
, "vport[%d] configure ingress rules failed, err(%d)\n",
1347 if (vport
->vlan
|| vport
->qos
)
1348 MLX5_SET_TO_ONES(fte_match_param
, match_c
, outer_headers
.vlan_tag
);
1350 if (vport
->spoofchk
) {
1351 MLX5_SET_TO_ONES(fte_match_param
, match_c
, outer_headers
.smac_47_16
);
1352 MLX5_SET_TO_ONES(fte_match_param
, match_c
, outer_headers
.smac_15_0
);
1353 smac_v
= MLX5_ADDR_OF(fte_match_param
,
1355 outer_headers
.smac_47_16
);
1356 ether_addr_copy(smac_v
, smac
);
1359 vport
->ingress
.allow_rule
=
1360 mlx5_add_flow_rule(vport
->ingress
.acl
,
1361 MLX5_MATCH_OUTER_HEADERS
,
1364 MLX5_FLOW_CONTEXT_ACTION_ALLOW
,
1366 if (IS_ERR_OR_NULL(vport
->ingress
.allow_rule
)) {
1367 err
= PTR_ERR(vport
->ingress
.allow_rule
);
1368 pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
1370 vport
->ingress
.allow_rule
= NULL
;
1374 memset(match_c
, 0, MLX5_ST_SZ_BYTES(fte_match_param
));
1375 memset(match_v
, 0, MLX5_ST_SZ_BYTES(fte_match_param
));
1376 vport
->ingress
.drop_rule
=
1377 mlx5_add_flow_rule(vport
->ingress
.acl
,
1381 MLX5_FLOW_CONTEXT_ACTION_DROP
,
1383 if (IS_ERR_OR_NULL(vport
->ingress
.drop_rule
)) {
1384 err
= PTR_ERR(vport
->ingress
.drop_rule
);
1385 pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
1387 vport
->ingress
.drop_rule
= NULL
;
1393 esw_vport_cleanup_ingress_rules(esw
, vport
);
1400 static int esw_vport_egress_config(struct mlx5_eswitch
*esw
,
1401 struct mlx5_vport
*vport
)
1407 esw_vport_cleanup_egress_rules(esw
, vport
);
1409 if (!vport
->vlan
&& !vport
->qos
) {
1410 esw_vport_disable_egress_acl(esw
, vport
);
1414 esw_vport_enable_egress_acl(esw
, vport
);
1417 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1418 vport
->vport
, vport
->vlan
, vport
->qos
);
1420 match_v
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
1421 match_c
= kzalloc(MLX5_ST_SZ_BYTES(fte_match_param
), GFP_KERNEL
);
1422 if (!match_v
|| !match_c
) {
1424 esw_warn(esw
->dev
, "vport[%d] configure egress rules failed, err(%d)\n",
1429 /* Allowed vlan rule */
1430 MLX5_SET_TO_ONES(fte_match_param
, match_c
, outer_headers
.vlan_tag
);
1431 MLX5_SET_TO_ONES(fte_match_param
, match_v
, outer_headers
.vlan_tag
);
1432 MLX5_SET_TO_ONES(fte_match_param
, match_c
, outer_headers
.first_vid
);
1433 MLX5_SET(fte_match_param
, match_v
, outer_headers
.first_vid
, vport
->vlan
);
1435 vport
->egress
.allowed_vlan
=
1436 mlx5_add_flow_rule(vport
->egress
.acl
,
1437 MLX5_MATCH_OUTER_HEADERS
,
1440 MLX5_FLOW_CONTEXT_ACTION_ALLOW
,
1442 if (IS_ERR_OR_NULL(vport
->egress
.allowed_vlan
)) {
1443 err
= PTR_ERR(vport
->egress
.allowed_vlan
);
1444 pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1446 vport
->egress
.allowed_vlan
= NULL
;
1450 /* Drop others rule (star rule) */
1451 memset(match_c
, 0, MLX5_ST_SZ_BYTES(fte_match_param
));
1452 memset(match_v
, 0, MLX5_ST_SZ_BYTES(fte_match_param
));
1453 vport
->egress
.drop_rule
=
1454 mlx5_add_flow_rule(vport
->egress
.acl
,
1458 MLX5_FLOW_CONTEXT_ACTION_DROP
,
1460 if (IS_ERR_OR_NULL(vport
->egress
.drop_rule
)) {
1461 err
= PTR_ERR(vport
->egress
.drop_rule
);
1462 pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
1464 vport
->egress
.drop_rule
= NULL
;
1472 static void esw_enable_vport(struct mlx5_eswitch
*esw
, int vport_num
,
1475 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1477 mutex_lock(&esw
->state_lock
);
1478 WARN_ON(vport
->enabled
);
1480 esw_debug(esw
->dev
, "Enabling VPORT(%d)\n", vport_num
);
1482 if (vport_num
) { /* Only VFs need ACLs for VST and spoofchk filtering */
1483 esw_vport_ingress_config(esw
, vport
);
1484 esw_vport_egress_config(esw
, vport
);
1487 mlx5_modify_vport_admin_state(esw
->dev
,
1488 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT
,
1490 MLX5_ESW_VPORT_ADMIN_STATE_AUTO
);
1492 /* Sync with current vport context */
1493 vport
->enabled_events
= enable_events
;
1494 esw_vport_change_handle_locked(vport
);
1496 vport
->enabled
= true;
1498 /* only PF is trusted by default */
1499 vport
->trusted
= (vport_num
) ? false : true;
1501 arm_vport_context_events_cmd(esw
->dev
, vport_num
, enable_events
);
1503 esw
->enabled_vports
++;
1504 esw_debug(esw
->dev
, "Enabled VPORT(%d)\n", vport_num
);
1505 mutex_unlock(&esw
->state_lock
);
1508 static void esw_disable_vport(struct mlx5_eswitch
*esw
, int vport_num
)
1510 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1512 if (!vport
->enabled
)
1515 esw_debug(esw
->dev
, "Disabling vport(%d)\n", vport_num
);
1516 /* Mark this vport as disabled to discard new events */
1517 vport
->enabled
= false;
1519 synchronize_irq(mlx5_get_msix_vec(esw
->dev
, MLX5_EQ_VEC_ASYNC
));
1521 mlx5_modify_vport_admin_state(esw
->dev
,
1522 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT
,
1524 MLX5_ESW_VPORT_ADMIN_STATE_DOWN
);
1525 /* Wait for current already scheduled events to complete */
1526 flush_workqueue(esw
->work_queue
);
1527 /* Disable events from this vport */
1528 arm_vport_context_events_cmd(esw
->dev
, vport
->vport
, 0);
1529 mutex_lock(&esw
->state_lock
);
1530 /* We don't assume VFs will cleanup after themselves.
1531 * Calling vport change handler while vport is disabled will cleanup
1532 * the vport resources.
1534 esw_vport_change_handle_locked(vport
);
1535 vport
->enabled_events
= 0;
1537 esw_vport_disable_egress_acl(esw
, vport
);
1538 esw_vport_disable_ingress_acl(esw
, vport
);
1540 esw
->enabled_vports
--;
1541 mutex_unlock(&esw
->state_lock
);
1544 /* Public E-Switch API */
1545 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch
*esw
, int nvfs
)
1550 if (!esw
|| !MLX5_CAP_GEN(esw
->dev
, vport_group_manager
) ||
1551 MLX5_CAP_GEN(esw
->dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1554 if (!MLX5_CAP_GEN(esw
->dev
, eswitch_flow_table
) ||
1555 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, ft_support
)) {
1556 esw_warn(esw
->dev
, "E-Switch FDB is not supported, aborting ...\n");
1560 if (!MLX5_CAP_ESW_INGRESS_ACL(esw
->dev
, ft_support
))
1561 esw_warn(esw
->dev
, "E-Switch ingress ACL is not supported by FW\n");
1563 if (!MLX5_CAP_ESW_EGRESS_ACL(esw
->dev
, ft_support
))
1564 esw_warn(esw
->dev
, "E-Switch engress ACL is not supported by FW\n");
1566 esw_info(esw
->dev
, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs
);
1568 esw_disable_vport(esw
, 0);
1570 err
= esw_create_fdb_table(esw
, nvfs
+ 1);
1574 for (i
= 0; i
<= nvfs
; i
++)
1575 esw_enable_vport(esw
, i
, SRIOV_VPORT_EVENTS
);
1577 esw_info(esw
->dev
, "SRIOV enabled: active vports(%d)\n",
1578 esw
->enabled_vports
);
1582 esw_enable_vport(esw
, 0, UC_ADDR_CHANGE
);
1586 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch
*esw
)
1588 struct esw_mc_addr
*mc_promisc
;
1591 if (!esw
|| !MLX5_CAP_GEN(esw
->dev
, vport_group_manager
) ||
1592 MLX5_CAP_GEN(esw
->dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1595 esw_info(esw
->dev
, "disable SRIOV: active vports(%d)\n",
1596 esw
->enabled_vports
);
1598 mc_promisc
= esw
->mc_promisc
;
1600 for (i
= 0; i
< esw
->total_vports
; i
++)
1601 esw_disable_vport(esw
, i
);
1603 if (mc_promisc
&& mc_promisc
->uplink_rule
)
1604 mlx5_del_flow_rule(mc_promisc
->uplink_rule
);
1606 esw_destroy_fdb_table(esw
);
1608 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1609 esw_enable_vport(esw
, 0, UC_ADDR_CHANGE
);
1612 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
)
1614 int l2_table_size
= 1 << MLX5_CAP_GEN(dev
, log_max_l2_table
);
1615 int total_vports
= MLX5_TOTAL_VPORTS(dev
);
1616 struct esw_mc_addr
*mc_promisc
;
1617 struct mlx5_eswitch
*esw
;
1621 if (!MLX5_CAP_GEN(dev
, vport_group_manager
) ||
1622 MLX5_CAP_GEN(dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1626 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1627 total_vports
, l2_table_size
,
1628 MLX5_MAX_UC_PER_VPORT(dev
),
1629 MLX5_MAX_MC_PER_VPORT(dev
));
1631 esw
= kzalloc(sizeof(*esw
), GFP_KERNEL
);
1637 esw
->l2_table
.bitmap
= kcalloc(BITS_TO_LONGS(l2_table_size
),
1638 sizeof(uintptr_t), GFP_KERNEL
);
1639 if (!esw
->l2_table
.bitmap
) {
1643 esw
->l2_table
.size
= l2_table_size
;
1645 mc_promisc
= kzalloc(sizeof(*mc_promisc
), GFP_KERNEL
);
1650 esw
->mc_promisc
= mc_promisc
;
1652 esw
->work_queue
= create_singlethread_workqueue("mlx5_esw_wq");
1653 if (!esw
->work_queue
) {
1658 esw
->vports
= kcalloc(total_vports
, sizeof(struct mlx5_vport
),
1665 mutex_init(&esw
->state_lock
);
1667 for (vport_num
= 0; vport_num
< total_vports
; vport_num
++) {
1668 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1670 vport
->vport
= vport_num
;
1672 INIT_WORK(&vport
->vport_change_handler
,
1673 esw_vport_change_handler
);
1676 esw
->total_vports
= total_vports
;
1677 esw
->enabled_vports
= 0;
1679 dev
->priv
.eswitch
= esw
;
1680 esw_enable_vport(esw
, 0, UC_ADDR_CHANGE
);
1681 /* VF Vports will be enabled when SRIOV is enabled */
1684 if (esw
->work_queue
)
1685 destroy_workqueue(esw
->work_queue
);
1686 kfree(esw
->l2_table
.bitmap
);
1692 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
)
1694 if (!esw
|| !MLX5_CAP_GEN(esw
->dev
, vport_group_manager
) ||
1695 MLX5_CAP_GEN(esw
->dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1698 esw_info(esw
->dev
, "cleanup\n");
1699 esw_disable_vport(esw
, 0);
1701 esw
->dev
->priv
.eswitch
= NULL
;
1702 destroy_workqueue(esw
->work_queue
);
1703 kfree(esw
->l2_table
.bitmap
);
1704 kfree(esw
->mc_promisc
);
1709 void mlx5_eswitch_vport_event(struct mlx5_eswitch
*esw
, struct mlx5_eqe
*eqe
)
1711 struct mlx5_eqe_vport_change
*vc_eqe
= &eqe
->data
.vport_change
;
1712 u16 vport_num
= be16_to_cpu(vc_eqe
->vport_num
);
1713 struct mlx5_vport
*vport
;
1716 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1721 vport
= &esw
->vports
[vport_num
];
1723 queue_work(esw
->work_queue
, &vport
->vport_change_handler
);
1726 /* Vport Administration */
1727 #define ESW_ALLOWED(esw) \
1728 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1729 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1731 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
1732 int vport
, u8 mac
[ETH_ALEN
])
1735 struct mlx5_vport
*evport
;
1737 if (!ESW_ALLOWED(esw
))
1739 if (!LEGAL_VPORT(esw
, vport
))
1742 evport
= &esw
->vports
[vport
];
1744 if (evport
->spoofchk
&& !is_valid_ether_addr(mac
)) {
1745 mlx5_core_warn(esw
->dev
,
1746 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1751 err
= mlx5_modify_nic_vport_mac_address(esw
->dev
, vport
, mac
);
1753 mlx5_core_warn(esw
->dev
,
1754 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1759 mutex_lock(&esw
->state_lock
);
1760 if (evport
->enabled
)
1761 err
= esw_vport_ingress_config(esw
, evport
);
1762 mutex_unlock(&esw
->state_lock
);
1767 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
1768 int vport
, int link_state
)
1770 if (!ESW_ALLOWED(esw
))
1772 if (!LEGAL_VPORT(esw
, vport
))
1775 return mlx5_modify_vport_admin_state(esw
->dev
,
1776 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT
,
1780 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
1781 int vport
, struct ifla_vf_info
*ivi
)
1783 struct mlx5_vport
*evport
;
1787 if (!ESW_ALLOWED(esw
))
1789 if (!LEGAL_VPORT(esw
, vport
))
1792 evport
= &esw
->vports
[vport
];
1794 memset(ivi
, 0, sizeof(*ivi
));
1795 ivi
->vf
= vport
- 1;
1797 mlx5_query_nic_vport_mac_address(esw
->dev
, vport
, ivi
->mac
);
1798 ivi
->linkstate
= mlx5_query_vport_admin_state(esw
->dev
,
1799 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT
,
1801 query_esw_vport_cvlan(esw
->dev
, vport
, &vlan
, &qos
);
1804 ivi
->spoofchk
= evport
->spoofchk
;
1809 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
1810 int vport
, u16 vlan
, u8 qos
)
1812 struct mlx5_vport
*evport
;
1816 if (!ESW_ALLOWED(esw
))
1818 if (!LEGAL_VPORT(esw
, vport
) || (vlan
> 4095) || (qos
> 7))
1824 evport
= &esw
->vports
[vport
];
1826 err
= modify_esw_vport_cvlan(esw
->dev
, vport
, vlan
, qos
, set
);
1830 mutex_lock(&esw
->state_lock
);
1831 evport
->vlan
= vlan
;
1833 if (evport
->enabled
) {
1834 err
= esw_vport_ingress_config(esw
, evport
);
1837 err
= esw_vport_egress_config(esw
, evport
);
1841 mutex_unlock(&esw
->state_lock
);
1845 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
1846 int vport
, bool spoofchk
)
1848 struct mlx5_vport
*evport
;
1852 if (!ESW_ALLOWED(esw
))
1854 if (!LEGAL_VPORT(esw
, vport
))
1857 evport
= &esw
->vports
[vport
];
1859 mutex_lock(&esw
->state_lock
);
1860 pschk
= evport
->spoofchk
;
1861 evport
->spoofchk
= spoofchk
;
1862 if (evport
->enabled
)
1863 err
= esw_vport_ingress_config(esw
, evport
);
1865 evport
->spoofchk
= pschk
;
1866 mutex_unlock(&esw
->state_lock
);
1871 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
1872 int vport
, bool setting
)
1874 struct mlx5_vport
*evport
;
1876 if (!ESW_ALLOWED(esw
))
1878 if (!LEGAL_VPORT(esw
, vport
))
1881 evport
= &esw
->vports
[vport
];
1883 mutex_lock(&esw
->state_lock
);
1884 evport
->trusted
= setting
;
1885 if (evport
->enabled
)
1886 esw_vport_change_handle_locked(evport
);
1887 mutex_unlock(&esw
->state_lock
);
1892 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
1894 struct ifla_vf_stats
*vf_stats
)
1896 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
1897 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)];
1901 if (!ESW_ALLOWED(esw
))
1903 if (!LEGAL_VPORT(esw
, vport
))
1906 out
= mlx5_vzalloc(outlen
);
1910 memset(in
, 0, sizeof(in
));
1912 MLX5_SET(query_vport_counter_in
, in
, opcode
,
1913 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
1914 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
1915 MLX5_SET(query_vport_counter_in
, in
, vport_number
, vport
);
1917 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 1);
1919 memset(out
, 0, outlen
);
1920 err
= mlx5_cmd_exec(esw
->dev
, in
, sizeof(in
), out
, outlen
);
1924 #define MLX5_GET_CTR(p, x) \
1925 MLX5_GET64(query_vport_counter_out, p, x)
1927 memset(vf_stats
, 0, sizeof(*vf_stats
));
1928 vf_stats
->rx_packets
=
1929 MLX5_GET_CTR(out
, received_eth_unicast
.packets
) +
1930 MLX5_GET_CTR(out
, received_eth_multicast
.packets
) +
1931 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);
1933 vf_stats
->rx_bytes
=
1934 MLX5_GET_CTR(out
, received_eth_unicast
.octets
) +
1935 MLX5_GET_CTR(out
, received_eth_multicast
.octets
) +
1936 MLX5_GET_CTR(out
, received_eth_broadcast
.octets
);
1938 vf_stats
->tx_packets
=
1939 MLX5_GET_CTR(out
, transmitted_eth_unicast
.packets
) +
1940 MLX5_GET_CTR(out
, transmitted_eth_multicast
.packets
) +
1941 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.packets
);
1943 vf_stats
->tx_bytes
=
1944 MLX5_GET_CTR(out
, transmitted_eth_unicast
.octets
) +
1945 MLX5_GET_CTR(out
, transmitted_eth_multicast
.octets
) +
1946 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.octets
);
1948 vf_stats
->multicast
=
1949 MLX5_GET_CTR(out
, received_eth_multicast
.packets
);
1951 vf_stats
->broadcast
=
1952 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);