2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/flow_table.h>
54 MLX5E_ACTION_NONE
= 0,
59 struct mlx5e_eth_addr_hash_node
{
60 struct hlist_node hlist
;
62 struct mlx5e_eth_addr_info ai
;
65 static inline int mlx5e_hash_eth_addr(u8
*addr
)
70 static void mlx5e_add_eth_addr_to_hash(struct hlist_head
*hash
, u8
*addr
)
72 struct mlx5e_eth_addr_hash_node
*hn
;
73 int ix
= mlx5e_hash_eth_addr(addr
);
76 hlist_for_each_entry(hn
, &hash
[ix
], hlist
)
77 if (ether_addr_equal_64bits(hn
->ai
.addr
, addr
)) {
83 hn
->action
= MLX5E_ACTION_NONE
;
87 hn
= kzalloc(sizeof(*hn
), GFP_ATOMIC
);
91 ether_addr_copy(hn
->ai
.addr
, addr
);
92 hn
->action
= MLX5E_ACTION_ADD
;
94 hlist_add_head(&hn
->hlist
, &hash
[ix
]);
97 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node
*hn
)
99 hlist_del(&hn
->hlist
);
103 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv
*priv
,
104 struct mlx5e_eth_addr_info
*ai
)
106 void *ft
= priv
->ft
.main
;
108 if (ai
->tt_vec
& (1 << MLX5E_TT_IPV6_TCP
))
109 mlx5_del_flow_table_entry(ft
, ai
->ft_ix
[MLX5E_TT_IPV6_TCP
]);
111 if (ai
->tt_vec
& (1 << MLX5E_TT_IPV4_TCP
))
112 mlx5_del_flow_table_entry(ft
, ai
->ft_ix
[MLX5E_TT_IPV4_TCP
]);
114 if (ai
->tt_vec
& (1 << MLX5E_TT_IPV6_UDP
))
115 mlx5_del_flow_table_entry(ft
, ai
->ft_ix
[MLX5E_TT_IPV6_UDP
]);
117 if (ai
->tt_vec
& (1 << MLX5E_TT_IPV4_UDP
))
118 mlx5_del_flow_table_entry(ft
, ai
->ft_ix
[MLX5E_TT_IPV4_UDP
]);
120 if (ai
->tt_vec
& (1 << MLX5E_TT_IPV6
))
121 mlx5_del_flow_table_entry(ft
, ai
->ft_ix
[MLX5E_TT_IPV6
]);
123 if (ai
->tt_vec
& (1 << MLX5E_TT_IPV4
))
124 mlx5_del_flow_table_entry(ft
, ai
->ft_ix
[MLX5E_TT_IPV4
]);
126 if (ai
->tt_vec
& (1 << MLX5E_TT_ANY
))
127 mlx5_del_flow_table_entry(ft
, ai
->ft_ix
[MLX5E_TT_ANY
]);
130 static int mlx5e_get_eth_addr_type(u8
*addr
)
132 if (is_unicast_ether_addr(addr
))
135 if ((addr
[0] == 0x01) &&
139 return MLX5E_MC_IPV4
;
141 if ((addr
[0] == 0x33) &&
143 return MLX5E_MC_IPV6
;
145 return MLX5E_MC_OTHER
;
148 static u32
mlx5e_get_tt_vec(struct mlx5e_eth_addr_info
*ai
, int type
)
154 case MLX5E_FULLMATCH
:
155 eth_addr_type
= mlx5e_get_eth_addr_type(ai
->addr
);
156 switch (eth_addr_type
) {
159 (1 << MLX5E_TT_IPV4_TCP
) |
160 (1 << MLX5E_TT_IPV6_TCP
) |
161 (1 << MLX5E_TT_IPV4_UDP
) |
162 (1 << MLX5E_TT_IPV6_UDP
) |
163 (1 << MLX5E_TT_IPV4
) |
164 (1 << MLX5E_TT_IPV6
) |
165 (1 << MLX5E_TT_ANY
) |
171 (1 << MLX5E_TT_IPV4_UDP
) |
172 (1 << MLX5E_TT_IPV4
) |
178 (1 << MLX5E_TT_IPV6_UDP
) |
179 (1 << MLX5E_TT_IPV6
) |
185 (1 << MLX5E_TT_ANY
) |
194 (1 << MLX5E_TT_IPV4_UDP
) |
195 (1 << MLX5E_TT_IPV6_UDP
) |
196 (1 << MLX5E_TT_IPV4
) |
197 (1 << MLX5E_TT_IPV6
) |
198 (1 << MLX5E_TT_ANY
) |
202 default: /* MLX5E_PROMISC */
204 (1 << MLX5E_TT_IPV4_TCP
) |
205 (1 << MLX5E_TT_IPV6_TCP
) |
206 (1 << MLX5E_TT_IPV4_UDP
) |
207 (1 << MLX5E_TT_IPV6_UDP
) |
208 (1 << MLX5E_TT_IPV4
) |
209 (1 << MLX5E_TT_IPV6
) |
210 (1 << MLX5E_TT_ANY
) |
218 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv
*priv
,
219 struct mlx5e_eth_addr_info
*ai
, int type
,
220 void *flow_context
, void *match_criteria
)
222 u8 match_criteria_enable
= 0;
226 u8
*match_criteria_dmac
;
227 void *ft
= priv
->ft
.main
;
228 u32
*tirn
= priv
->tirn
;
232 match_value
= MLX5_ADDR_OF(flow_context
, flow_context
, match_value
);
233 dmac
= MLX5_ADDR_OF(fte_match_param
, match_value
,
234 outer_headers
.dmac_47_16
);
235 match_criteria_dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
236 outer_headers
.dmac_47_16
);
237 dest
= MLX5_ADDR_OF(flow_context
, flow_context
, destination
);
239 MLX5_SET(flow_context
, flow_context
, action
,
240 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
241 MLX5_SET(flow_context
, flow_context
, destination_list_size
, 1);
242 MLX5_SET(dest_format_struct
, dest
, destination_type
,
243 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR
);
246 case MLX5E_FULLMATCH
:
247 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
248 memset(match_criteria_dmac
, 0xff, ETH_ALEN
);
249 ether_addr_copy(dmac
, ai
->addr
);
253 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
254 match_criteria_dmac
[0] = 0x01;
262 tt_vec
= mlx5e_get_tt_vec(ai
, type
);
264 if (tt_vec
& (1 << MLX5E_TT_ANY
)) {
265 MLX5_SET(dest_format_struct
, dest
, destination_id
,
267 err
= mlx5_add_flow_table_entry(ft
, match_criteria_enable
,
268 match_criteria
, flow_context
,
269 &ai
->ft_ix
[MLX5E_TT_ANY
]);
271 mlx5e_del_eth_addr_from_flow_table(priv
, ai
);
274 ai
->tt_vec
|= (1 << MLX5E_TT_ANY
);
277 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
278 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
279 outer_headers
.ethertype
);
281 if (tt_vec
& (1 << MLX5E_TT_IPV4
)) {
282 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ethertype
,
284 MLX5_SET(dest_format_struct
, dest
, destination_id
,
285 tirn
[MLX5E_TT_IPV4
]);
286 err
= mlx5_add_flow_table_entry(ft
, match_criteria_enable
,
287 match_criteria
, flow_context
,
288 &ai
->ft_ix
[MLX5E_TT_IPV4
]);
290 mlx5e_del_eth_addr_from_flow_table(priv
, ai
);
293 ai
->tt_vec
|= (1 << MLX5E_TT_IPV4
);
296 if (tt_vec
& (1 << MLX5E_TT_IPV6
)) {
297 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ethertype
,
299 MLX5_SET(dest_format_struct
, dest
, destination_id
,
300 tirn
[MLX5E_TT_IPV6
]);
301 err
= mlx5_add_flow_table_entry(ft
, match_criteria_enable
,
302 match_criteria
, flow_context
,
303 &ai
->ft_ix
[MLX5E_TT_IPV6
]);
305 mlx5e_del_eth_addr_from_flow_table(priv
, ai
);
308 ai
->tt_vec
|= (1 << MLX5E_TT_IPV6
);
311 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
312 outer_headers
.ip_protocol
);
313 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ip_protocol
,
316 if (tt_vec
& (1 << MLX5E_TT_IPV4_UDP
)) {
317 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ethertype
,
319 MLX5_SET(dest_format_struct
, dest
, destination_id
,
320 tirn
[MLX5E_TT_IPV4_UDP
]);
321 err
= mlx5_add_flow_table_entry(ft
, match_criteria_enable
,
322 match_criteria
, flow_context
,
323 &ai
->ft_ix
[MLX5E_TT_IPV4_UDP
]);
325 mlx5e_del_eth_addr_from_flow_table(priv
, ai
);
328 ai
->tt_vec
|= (1 << MLX5E_TT_IPV4_UDP
);
331 if (tt_vec
& (1 << MLX5E_TT_IPV6_UDP
)) {
332 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ethertype
,
334 MLX5_SET(dest_format_struct
, dest
, destination_id
,
335 tirn
[MLX5E_TT_IPV6_UDP
]);
336 err
= mlx5_add_flow_table_entry(ft
, match_criteria_enable
,
337 match_criteria
, flow_context
,
338 &ai
->ft_ix
[MLX5E_TT_IPV6_UDP
]);
340 mlx5e_del_eth_addr_from_flow_table(priv
, ai
);
343 ai
->tt_vec
|= (1 << MLX5E_TT_IPV6_UDP
);
346 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ip_protocol
,
349 if (tt_vec
& (1 << MLX5E_TT_IPV4_TCP
)) {
350 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ethertype
,
352 MLX5_SET(dest_format_struct
, dest
, destination_id
,
353 tirn
[MLX5E_TT_IPV4_TCP
]);
354 err
= mlx5_add_flow_table_entry(ft
, match_criteria_enable
,
355 match_criteria
, flow_context
,
356 &ai
->ft_ix
[MLX5E_TT_IPV4_TCP
]);
358 mlx5e_del_eth_addr_from_flow_table(priv
, ai
);
361 ai
->tt_vec
|= (1 << MLX5E_TT_IPV4_TCP
);
364 if (tt_vec
& (1 << MLX5E_TT_IPV6_TCP
)) {
365 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ethertype
,
367 MLX5_SET(dest_format_struct
, dest
, destination_id
,
368 tirn
[MLX5E_TT_IPV6_TCP
]);
369 err
= mlx5_add_flow_table_entry(ft
, match_criteria_enable
,
370 match_criteria
, flow_context
,
371 &ai
->ft_ix
[MLX5E_TT_IPV6_TCP
]);
373 mlx5e_del_eth_addr_from_flow_table(priv
, ai
);
376 ai
->tt_vec
|= (1 << MLX5E_TT_IPV6_TCP
);
382 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv
*priv
,
383 struct mlx5e_eth_addr_info
*ai
, int type
)
389 flow_context
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context
) +
390 MLX5_ST_SZ_BYTES(dest_format_struct
));
391 match_criteria
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
392 if (!flow_context
|| !match_criteria
) {
393 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
395 goto add_eth_addr_rule_out
;
398 err
= __mlx5e_add_eth_addr_rule(priv
, ai
, type
, flow_context
,
401 netdev_err(priv
->netdev
, "%s: failed\n", __func__
);
403 add_eth_addr_rule_out
:
404 kvfree(match_criteria
);
405 kvfree(flow_context
);
409 enum mlx5e_vlan_rule_type
{
410 MLX5E_VLAN_RULE_TYPE_UNTAGGED
,
411 MLX5E_VLAN_RULE_TYPE_ANY_VID
,
412 MLX5E_VLAN_RULE_TYPE_MATCH_VID
,
415 static int mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
416 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
418 u8 match_criteria_enable
= 0;
426 flow_context
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context
) +
427 MLX5_ST_SZ_BYTES(dest_format_struct
));
428 match_criteria
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
429 if (!flow_context
|| !match_criteria
) {
430 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
432 goto add_vlan_rule_out
;
434 match_value
= MLX5_ADDR_OF(flow_context
, flow_context
, match_value
);
435 dest
= MLX5_ADDR_OF(flow_context
, flow_context
, destination
);
437 MLX5_SET(flow_context
, flow_context
, action
,
438 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
439 MLX5_SET(flow_context
, flow_context
, destination_list_size
, 1);
440 MLX5_SET(dest_format_struct
, dest
, destination_type
,
441 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE
);
442 MLX5_SET(dest_format_struct
, dest
, destination_id
,
443 mlx5_get_flow_table_id(priv
->ft
.main
));
445 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
446 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
447 outer_headers
.vlan_tag
);
450 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
451 ft_ix
= &priv
->vlan
.untagged_rule_ft_ix
;
453 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
454 ft_ix
= &priv
->vlan
.any_vlan_rule_ft_ix
;
455 MLX5_SET(fte_match_param
, match_value
, outer_headers
.vlan_tag
,
458 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
459 ft_ix
= &priv
->vlan
.active_vlans_ft_ix
[vid
];
460 MLX5_SET(fte_match_param
, match_value
, outer_headers
.vlan_tag
,
462 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
463 outer_headers
.first_vid
);
464 MLX5_SET(fte_match_param
, match_value
, outer_headers
.first_vid
,
469 err
= mlx5_add_flow_table_entry(priv
->ft
.vlan
, match_criteria_enable
,
470 match_criteria
, flow_context
, ft_ix
);
472 netdev_err(priv
->netdev
, "%s: failed\n", __func__
);
475 kvfree(match_criteria
);
476 kvfree(flow_context
);
480 static void mlx5e_del_vlan_rule(struct mlx5e_priv
*priv
,
481 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
484 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
485 mlx5_del_flow_table_entry(priv
->ft
.vlan
,
486 priv
->vlan
.untagged_rule_ft_ix
);
488 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
489 mlx5_del_flow_table_entry(priv
->ft
.vlan
,
490 priv
->vlan
.any_vlan_rule_ft_ix
);
492 case MLX5E_VLAN_RULE_TYPE_MATCH_VID
:
493 mlx5_del_flow_table_entry(priv
->ft
.vlan
,
494 priv
->vlan
.active_vlans_ft_ix
[vid
]);
499 void mlx5e_enable_vlan_filter(struct mlx5e_priv
*priv
)
501 WARN_ON(!mutex_is_locked(&priv
->state_lock
));
503 if (priv
->vlan
.filter_disabled
) {
504 priv
->vlan
.filter_disabled
= false;
505 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
506 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
511 void mlx5e_disable_vlan_filter(struct mlx5e_priv
*priv
)
513 WARN_ON(!mutex_is_locked(&priv
->state_lock
));
515 if (!priv
->vlan
.filter_disabled
) {
516 priv
->vlan
.filter_disabled
= true;
517 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
518 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
523 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
526 struct mlx5e_priv
*priv
= netdev_priv(dev
);
529 mutex_lock(&priv
->state_lock
);
531 set_bit(vid
, priv
->vlan
.active_vlans
);
532 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
533 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
,
536 mutex_unlock(&priv
->state_lock
);
541 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
544 struct mlx5e_priv
*priv
= netdev_priv(dev
);
546 mutex_lock(&priv
->state_lock
);
548 clear_bit(vid
, priv
->vlan
.active_vlans
);
549 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
550 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
552 mutex_unlock(&priv
->state_lock
);
557 int mlx5e_add_all_vlan_rules(struct mlx5e_priv
*priv
)
562 for_each_set_bit(vid
, priv
->vlan
.active_vlans
, VLAN_N_VID
) {
563 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
,
569 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
573 if (priv
->vlan
.filter_disabled
) {
574 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
583 void mlx5e_del_all_vlan_rules(struct mlx5e_priv
*priv
)
587 if (priv
->vlan
.filter_disabled
)
588 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
590 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
592 for_each_set_bit(vid
, priv
->vlan
.active_vlans
, VLAN_N_VID
)
593 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
596 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
597 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
598 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
600 static void mlx5e_execute_action(struct mlx5e_priv
*priv
,
601 struct mlx5e_eth_addr_hash_node
*hn
)
603 switch (hn
->action
) {
604 case MLX5E_ACTION_ADD
:
605 mlx5e_add_eth_addr_rule(priv
, &hn
->ai
, MLX5E_FULLMATCH
);
606 hn
->action
= MLX5E_ACTION_NONE
;
609 case MLX5E_ACTION_DEL
:
610 mlx5e_del_eth_addr_from_flow_table(priv
, &hn
->ai
);
611 mlx5e_del_eth_addr_from_hash(hn
);
616 static void mlx5e_sync_netdev_addr(struct mlx5e_priv
*priv
)
618 struct net_device
*netdev
= priv
->netdev
;
619 struct netdev_hw_addr
*ha
;
621 netif_addr_lock_bh(netdev
);
623 mlx5e_add_eth_addr_to_hash(priv
->eth_addr
.netdev_uc
,
624 priv
->netdev
->dev_addr
);
626 netdev_for_each_uc_addr(ha
, netdev
)
627 mlx5e_add_eth_addr_to_hash(priv
->eth_addr
.netdev_uc
, ha
->addr
);
629 netdev_for_each_mc_addr(ha
, netdev
)
630 mlx5e_add_eth_addr_to_hash(priv
->eth_addr
.netdev_mc
, ha
->addr
);
632 netif_addr_unlock_bh(netdev
);
635 static void mlx5e_apply_netdev_addr(struct mlx5e_priv
*priv
)
637 struct mlx5e_eth_addr_hash_node
*hn
;
638 struct hlist_node
*tmp
;
641 mlx5e_for_each_hash_node(hn
, tmp
, priv
->eth_addr
.netdev_uc
, i
)
642 mlx5e_execute_action(priv
, hn
);
644 mlx5e_for_each_hash_node(hn
, tmp
, priv
->eth_addr
.netdev_mc
, i
)
645 mlx5e_execute_action(priv
, hn
);
648 static void mlx5e_handle_netdev_addr(struct mlx5e_priv
*priv
)
650 struct mlx5e_eth_addr_hash_node
*hn
;
651 struct hlist_node
*tmp
;
654 mlx5e_for_each_hash_node(hn
, tmp
, priv
->eth_addr
.netdev_uc
, i
)
655 hn
->action
= MLX5E_ACTION_DEL
;
656 mlx5e_for_each_hash_node(hn
, tmp
, priv
->eth_addr
.netdev_mc
, i
)
657 hn
->action
= MLX5E_ACTION_DEL
;
659 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
660 mlx5e_sync_netdev_addr(priv
);
662 mlx5e_apply_netdev_addr(priv
);
665 void mlx5e_set_rx_mode_core(struct mlx5e_priv
*priv
)
667 struct mlx5e_eth_addr_db
*ea
= &priv
->eth_addr
;
668 struct net_device
*ndev
= priv
->netdev
;
670 bool rx_mode_enable
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
671 bool promisc_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_PROMISC
);
672 bool allmulti_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_ALLMULTI
);
673 bool broadcast_enabled
= rx_mode_enable
;
675 bool enable_promisc
= !ea
->promisc_enabled
&& promisc_enabled
;
676 bool disable_promisc
= ea
->promisc_enabled
&& !promisc_enabled
;
677 bool enable_allmulti
= !ea
->allmulti_enabled
&& allmulti_enabled
;
678 bool disable_allmulti
= ea
->allmulti_enabled
&& !allmulti_enabled
;
679 bool enable_broadcast
= !ea
->broadcast_enabled
&& broadcast_enabled
;
680 bool disable_broadcast
= ea
->broadcast_enabled
&& !broadcast_enabled
;
683 mlx5e_add_eth_addr_rule(priv
, &ea
->promisc
, MLX5E_PROMISC
);
685 mlx5e_add_eth_addr_rule(priv
, &ea
->allmulti
, MLX5E_ALLMULTI
);
686 if (enable_broadcast
)
687 mlx5e_add_eth_addr_rule(priv
, &ea
->broadcast
, MLX5E_FULLMATCH
);
689 mlx5e_handle_netdev_addr(priv
);
691 if (disable_broadcast
)
692 mlx5e_del_eth_addr_from_flow_table(priv
, &ea
->broadcast
);
693 if (disable_allmulti
)
694 mlx5e_del_eth_addr_from_flow_table(priv
, &ea
->allmulti
);
696 mlx5e_del_eth_addr_from_flow_table(priv
, &ea
->promisc
);
698 ea
->promisc_enabled
= promisc_enabled
;
699 ea
->allmulti_enabled
= allmulti_enabled
;
700 ea
->broadcast_enabled
= broadcast_enabled
;
703 void mlx5e_set_rx_mode_work(struct work_struct
*work
)
705 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
708 mutex_lock(&priv
->state_lock
);
709 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
710 mlx5e_set_rx_mode_core(priv
);
711 mutex_unlock(&priv
->state_lock
);
714 void mlx5e_init_eth_addr(struct mlx5e_priv
*priv
)
716 ether_addr_copy(priv
->eth_addr
.broadcast
.addr
, priv
->netdev
->broadcast
);
719 static int mlx5e_create_main_flow_table(struct mlx5e_priv
*priv
)
721 struct mlx5_flow_table_group
*g
;
724 g
= kcalloc(9, sizeof(*g
), GFP_KERNEL
);
727 g
[0].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
728 MLX5_SET_TO_ONES(fte_match_param
, g
[0].match_criteria
,
729 outer_headers
.ethertype
);
730 MLX5_SET_TO_ONES(fte_match_param
, g
[0].match_criteria
,
731 outer_headers
.ip_protocol
);
734 g
[1].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
735 MLX5_SET_TO_ONES(fte_match_param
, g
[1].match_criteria
,
736 outer_headers
.ethertype
);
741 g
[3].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
742 dmac
= MLX5_ADDR_OF(fte_match_param
, g
[3].match_criteria
,
743 outer_headers
.dmac_47_16
);
744 memset(dmac
, 0xff, ETH_ALEN
);
745 MLX5_SET_TO_ONES(fte_match_param
, g
[3].match_criteria
,
746 outer_headers
.ethertype
);
747 MLX5_SET_TO_ONES(fte_match_param
, g
[3].match_criteria
,
748 outer_headers
.ip_protocol
);
751 g
[4].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
752 dmac
= MLX5_ADDR_OF(fte_match_param
, g
[4].match_criteria
,
753 outer_headers
.dmac_47_16
);
754 memset(dmac
, 0xff, ETH_ALEN
);
755 MLX5_SET_TO_ONES(fte_match_param
, g
[4].match_criteria
,
756 outer_headers
.ethertype
);
759 g
[5].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
760 dmac
= MLX5_ADDR_OF(fte_match_param
, g
[5].match_criteria
,
761 outer_headers
.dmac_47_16
);
762 memset(dmac
, 0xff, ETH_ALEN
);
765 g
[6].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
766 dmac
= MLX5_ADDR_OF(fte_match_param
, g
[6].match_criteria
,
767 outer_headers
.dmac_47_16
);
769 MLX5_SET_TO_ONES(fte_match_param
, g
[6].match_criteria
,
770 outer_headers
.ethertype
);
771 MLX5_SET_TO_ONES(fte_match_param
, g
[6].match_criteria
,
772 outer_headers
.ip_protocol
);
775 g
[7].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
776 dmac
= MLX5_ADDR_OF(fte_match_param
, g
[7].match_criteria
,
777 outer_headers
.dmac_47_16
);
779 MLX5_SET_TO_ONES(fte_match_param
, g
[7].match_criteria
,
780 outer_headers
.ethertype
);
783 g
[8].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
784 dmac
= MLX5_ADDR_OF(fte_match_param
, g
[8].match_criteria
,
785 outer_headers
.dmac_47_16
);
787 priv
->ft
.main
= mlx5_create_flow_table(priv
->mdev
, 1,
788 MLX5_FLOW_TABLE_TYPE_NIC_RCV
,
792 return priv
->ft
.main
? 0 : -ENOMEM
;
795 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv
*priv
)
797 mlx5_destroy_flow_table(priv
->ft
.main
);
800 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv
*priv
)
802 struct mlx5_flow_table_group
*g
;
804 g
= kcalloc(2, sizeof(*g
), GFP_KERNEL
);
809 g
[0].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
810 MLX5_SET_TO_ONES(fte_match_param
, g
[0].match_criteria
,
811 outer_headers
.vlan_tag
);
812 MLX5_SET_TO_ONES(fte_match_param
, g
[0].match_criteria
,
813 outer_headers
.first_vid
);
815 /* untagged + any vlan id */
817 g
[1].match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
818 MLX5_SET_TO_ONES(fte_match_param
, g
[1].match_criteria
,
819 outer_headers
.vlan_tag
);
821 priv
->ft
.vlan
= mlx5_create_flow_table(priv
->mdev
, 0,
822 MLX5_FLOW_TABLE_TYPE_NIC_RCV
,
826 return priv
->ft
.vlan
? 0 : -ENOMEM
;
829 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv
*priv
)
831 mlx5_destroy_flow_table(priv
->ft
.vlan
);
834 int mlx5e_open_flow_table(struct mlx5e_priv
*priv
)
838 err
= mlx5e_create_main_flow_table(priv
);
842 err
= mlx5e_create_vlan_flow_table(priv
);
844 goto err_destroy_main_flow_table
;
848 err_destroy_main_flow_table
:
849 mlx5e_destroy_main_flow_table(priv
);
854 void mlx5e_close_flow_table(struct mlx5e_priv
*priv
)
856 mlx5e_destroy_vlan_flow_table(priv
);
857 mlx5e_destroy_main_flow_table(priv
);