2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
41 struct mlx5e_l2_rule
*ai
, int type
);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
43 struct mlx5e_l2_rule
*ai
);
59 MLX5E_ACTION_NONE
= 0,
64 struct mlx5e_l2_hash_node
{
65 struct hlist_node hlist
;
67 struct mlx5e_l2_rule ai
;
70 static inline int mlx5e_hash_l2(u8
*addr
)
75 static void mlx5e_add_l2_to_hash(struct hlist_head
*hash
, u8
*addr
)
77 struct mlx5e_l2_hash_node
*hn
;
78 int ix
= mlx5e_hash_l2(addr
);
81 hlist_for_each_entry(hn
, &hash
[ix
], hlist
)
82 if (ether_addr_equal_64bits(hn
->ai
.addr
, addr
)) {
88 hn
->action
= MLX5E_ACTION_NONE
;
92 hn
= kzalloc(sizeof(*hn
), GFP_ATOMIC
);
96 ether_addr_copy(hn
->ai
.addr
, addr
);
97 hn
->action
= MLX5E_ACTION_ADD
;
99 hlist_add_head(&hn
->hlist
, &hash
[ix
]);
102 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node
*hn
)
104 hlist_del(&hn
->hlist
);
108 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv
*priv
)
110 struct net_device
*ndev
= priv
->netdev
;
119 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
)
122 max_list_size
= 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_vlan_list
);
124 if (list_size
> max_list_size
) {
126 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
127 list_size
, max_list_size
);
128 list_size
= max_list_size
;
131 vlans
= kcalloc(list_size
, sizeof(*vlans
), GFP_KERNEL
);
136 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
142 err
= mlx5_modify_nic_vport_vlans(priv
->mdev
, vlans
, list_size
);
144 netdev_err(ndev
, "Failed to modify vport vlans list err(%d)\n",
151 enum mlx5e_vlan_rule_type
{
152 MLX5E_VLAN_RULE_TYPE_UNTAGGED
,
153 MLX5E_VLAN_RULE_TYPE_ANY_VID
,
154 MLX5E_VLAN_RULE_TYPE_MATCH_VID
,
157 static int __mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
158 enum mlx5e_vlan_rule_type rule_type
,
159 u16 vid
, struct mlx5_flow_spec
*spec
)
161 struct mlx5_flow_table
*ft
= priv
->fs
.vlan
.ft
.t
;
162 struct mlx5_flow_destination dest
;
163 struct mlx5_flow_rule
**rule_p
;
166 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
167 dest
.ft
= priv
->fs
.l2
.ft
.t
;
169 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
170 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.vlan_tag
);
173 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
174 rule_p
= &priv
->fs
.vlan
.untagged_rule
;
176 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
177 rule_p
= &priv
->fs
.vlan
.any_vlan_rule
;
178 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.vlan_tag
, 1);
180 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
181 rule_p
= &priv
->fs
.vlan
.active_vlans_rule
[vid
];
182 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.vlan_tag
, 1);
183 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
184 outer_headers
.first_vid
);
185 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.first_vid
,
190 *rule_p
= mlx5_add_flow_rule(ft
, spec
,
191 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
192 MLX5_FS_DEFAULT_FLOW_TAG
,
195 if (IS_ERR(*rule_p
)) {
196 err
= PTR_ERR(*rule_p
);
198 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
204 static int mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
205 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
207 struct mlx5_flow_spec
*spec
;
210 spec
= mlx5_vzalloc(sizeof(*spec
));
212 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
216 if (rule_type
== MLX5E_VLAN_RULE_TYPE_MATCH_VID
)
217 mlx5e_vport_context_update_vlans(priv
);
219 err
= __mlx5e_add_vlan_rule(priv
, rule_type
, vid
, spec
);
226 static void mlx5e_del_vlan_rule(struct mlx5e_priv
*priv
,
227 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
230 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
231 if (priv
->fs
.vlan
.untagged_rule
) {
232 mlx5_del_flow_rule(priv
->fs
.vlan
.untagged_rule
);
233 priv
->fs
.vlan
.untagged_rule
= NULL
;
236 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
237 if (priv
->fs
.vlan
.any_vlan_rule
) {
238 mlx5_del_flow_rule(priv
->fs
.vlan
.any_vlan_rule
);
239 priv
->fs
.vlan
.any_vlan_rule
= NULL
;
242 case MLX5E_VLAN_RULE_TYPE_MATCH_VID
:
243 mlx5e_vport_context_update_vlans(priv
);
244 if (priv
->fs
.vlan
.active_vlans_rule
[vid
]) {
245 mlx5_del_flow_rule(priv
->fs
.vlan
.active_vlans_rule
[vid
]);
246 priv
->fs
.vlan
.active_vlans_rule
[vid
] = NULL
;
248 mlx5e_vport_context_update_vlans(priv
);
253 void mlx5e_enable_vlan_filter(struct mlx5e_priv
*priv
)
255 if (!priv
->fs
.vlan
.filter_disabled
)
258 priv
->fs
.vlan
.filter_disabled
= false;
259 if (priv
->netdev
->flags
& IFF_PROMISC
)
261 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
264 void mlx5e_disable_vlan_filter(struct mlx5e_priv
*priv
)
266 if (priv
->fs
.vlan
.filter_disabled
)
269 priv
->fs
.vlan
.filter_disabled
= true;
270 if (priv
->netdev
->flags
& IFF_PROMISC
)
272 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
275 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
278 struct mlx5e_priv
*priv
= netdev_priv(dev
);
280 set_bit(vid
, priv
->fs
.vlan
.active_vlans
);
282 return mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
285 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
288 struct mlx5e_priv
*priv
= netdev_priv(dev
);
290 clear_bit(vid
, priv
->fs
.vlan
.active_vlans
);
292 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
297 static void mlx5e_add_vlan_rules(struct mlx5e_priv
*priv
)
301 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
303 for_each_set_bit(i
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
304 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, i
);
307 if (priv
->fs
.vlan
.filter_disabled
&&
308 !(priv
->netdev
->flags
& IFF_PROMISC
))
309 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
312 static void mlx5e_del_vlan_rules(struct mlx5e_priv
*priv
)
316 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
318 for_each_set_bit(i
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
319 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, i
);
322 if (priv
->fs
.vlan
.filter_disabled
&&
323 !(priv
->netdev
->flags
& IFF_PROMISC
))
324 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
327 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
328 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
329 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
331 static void mlx5e_execute_l2_action(struct mlx5e_priv
*priv
,
332 struct mlx5e_l2_hash_node
*hn
)
334 switch (hn
->action
) {
335 case MLX5E_ACTION_ADD
:
336 mlx5e_add_l2_flow_rule(priv
, &hn
->ai
, MLX5E_FULLMATCH
);
337 hn
->action
= MLX5E_ACTION_NONE
;
340 case MLX5E_ACTION_DEL
:
341 mlx5e_del_l2_flow_rule(priv
, &hn
->ai
);
342 mlx5e_del_l2_from_hash(hn
);
347 static void mlx5e_sync_netdev_addr(struct mlx5e_priv
*priv
)
349 struct net_device
*netdev
= priv
->netdev
;
350 struct netdev_hw_addr
*ha
;
352 netif_addr_lock_bh(netdev
);
354 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
,
355 priv
->netdev
->dev_addr
);
357 netdev_for_each_uc_addr(ha
, netdev
)
358 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
, ha
->addr
);
360 netdev_for_each_mc_addr(ha
, netdev
)
361 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_mc
, ha
->addr
);
363 netif_addr_unlock_bh(netdev
);
366 static void mlx5e_fill_addr_array(struct mlx5e_priv
*priv
, int list_type
,
367 u8 addr_array
[][ETH_ALEN
], int size
)
369 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
370 struct net_device
*ndev
= priv
->netdev
;
371 struct mlx5e_l2_hash_node
*hn
;
372 struct hlist_head
*addr_list
;
373 struct hlist_node
*tmp
;
377 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
379 if (is_uc
) /* Make sure our own address is pushed first */
380 ether_addr_copy(addr_array
[i
++], ndev
->dev_addr
);
381 else if (priv
->fs
.l2
.broadcast_enabled
)
382 ether_addr_copy(addr_array
[i
++], ndev
->broadcast
);
384 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
) {
385 if (ether_addr_equal(ndev
->dev_addr
, hn
->ai
.addr
))
389 ether_addr_copy(addr_array
[i
++], hn
->ai
.addr
);
393 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv
*priv
,
396 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
397 struct mlx5e_l2_hash_node
*hn
;
398 u8 (*addr_array
)[ETH_ALEN
] = NULL
;
399 struct hlist_head
*addr_list
;
400 struct hlist_node
*tmp
;
406 size
= is_uc
? 0 : (priv
->fs
.l2
.broadcast_enabled
? 1 : 0);
408 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_uc_list
) :
409 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_mc_list
);
411 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
412 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
)
415 if (size
> max_size
) {
416 netdev_warn(priv
->netdev
,
417 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
418 is_uc
? "UC" : "MC", size
, max_size
);
423 addr_array
= kcalloc(size
, ETH_ALEN
, GFP_KERNEL
);
428 mlx5e_fill_addr_array(priv
, list_type
, addr_array
, size
);
431 err
= mlx5_modify_nic_vport_mac_list(priv
->mdev
, list_type
, addr_array
, size
);
434 netdev_err(priv
->netdev
,
435 "Failed to modify vport %s list err(%d)\n",
436 is_uc
? "UC" : "MC", err
);
440 static void mlx5e_vport_context_update(struct mlx5e_priv
*priv
)
442 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
444 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_UC
);
445 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_MC
);
446 mlx5_modify_nic_vport_promisc(priv
->mdev
, 0,
447 ea
->allmulti_enabled
,
448 ea
->promisc_enabled
);
451 static void mlx5e_apply_netdev_addr(struct mlx5e_priv
*priv
)
453 struct mlx5e_l2_hash_node
*hn
;
454 struct hlist_node
*tmp
;
457 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
458 mlx5e_execute_l2_action(priv
, hn
);
460 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
461 mlx5e_execute_l2_action(priv
, hn
);
464 static void mlx5e_handle_netdev_addr(struct mlx5e_priv
*priv
)
466 struct mlx5e_l2_hash_node
*hn
;
467 struct hlist_node
*tmp
;
470 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
471 hn
->action
= MLX5E_ACTION_DEL
;
472 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
473 hn
->action
= MLX5E_ACTION_DEL
;
475 if (!test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
))
476 mlx5e_sync_netdev_addr(priv
);
478 mlx5e_apply_netdev_addr(priv
);
481 void mlx5e_set_rx_mode_work(struct work_struct
*work
)
483 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
486 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
487 struct net_device
*ndev
= priv
->netdev
;
489 bool rx_mode_enable
= !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
490 bool promisc_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_PROMISC
);
491 bool allmulti_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_ALLMULTI
);
492 bool broadcast_enabled
= rx_mode_enable
;
494 bool enable_promisc
= !ea
->promisc_enabled
&& promisc_enabled
;
495 bool disable_promisc
= ea
->promisc_enabled
&& !promisc_enabled
;
496 bool enable_allmulti
= !ea
->allmulti_enabled
&& allmulti_enabled
;
497 bool disable_allmulti
= ea
->allmulti_enabled
&& !allmulti_enabled
;
498 bool enable_broadcast
= !ea
->broadcast_enabled
&& broadcast_enabled
;
499 bool disable_broadcast
= ea
->broadcast_enabled
&& !broadcast_enabled
;
501 if (enable_promisc
) {
502 mlx5e_add_l2_flow_rule(priv
, &ea
->promisc
, MLX5E_PROMISC
);
503 if (!priv
->fs
.vlan
.filter_disabled
)
504 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
508 mlx5e_add_l2_flow_rule(priv
, &ea
->allmulti
, MLX5E_ALLMULTI
);
509 if (enable_broadcast
)
510 mlx5e_add_l2_flow_rule(priv
, &ea
->broadcast
, MLX5E_FULLMATCH
);
512 mlx5e_handle_netdev_addr(priv
);
514 if (disable_broadcast
)
515 mlx5e_del_l2_flow_rule(priv
, &ea
->broadcast
);
516 if (disable_allmulti
)
517 mlx5e_del_l2_flow_rule(priv
, &ea
->allmulti
);
518 if (disable_promisc
) {
519 if (!priv
->fs
.vlan
.filter_disabled
)
520 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
522 mlx5e_del_l2_flow_rule(priv
, &ea
->promisc
);
525 ea
->promisc_enabled
= promisc_enabled
;
526 ea
->allmulti_enabled
= allmulti_enabled
;
527 ea
->broadcast_enabled
= broadcast_enabled
;
529 mlx5e_vport_context_update(priv
);
532 static void mlx5e_destroy_groups(struct mlx5e_flow_table
*ft
)
536 for (i
= ft
->num_groups
- 1; i
>= 0; i
--) {
537 if (!IS_ERR_OR_NULL(ft
->g
[i
]))
538 mlx5_destroy_flow_group(ft
->g
[i
]);
544 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
)
546 ether_addr_copy(priv
->fs
.l2
.broadcast
.addr
, priv
->netdev
->broadcast
);
549 void mlx5e_destroy_flow_table(struct mlx5e_flow_table
*ft
)
551 mlx5e_destroy_groups(ft
);
553 mlx5_destroy_flow_table(ft
->t
);
557 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table
*ttc
)
561 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
562 if (!IS_ERR_OR_NULL(ttc
->rules
[i
])) {
563 mlx5_del_flow_rule(ttc
->rules
[i
]);
564 ttc
->rules
[i
] = NULL
;
573 [MLX5E_TT_IPV4_TCP
] = {
575 .proto
= IPPROTO_TCP
,
577 [MLX5E_TT_IPV6_TCP
] = {
579 .proto
= IPPROTO_TCP
,
581 [MLX5E_TT_IPV4_UDP
] = {
583 .proto
= IPPROTO_UDP
,
585 [MLX5E_TT_IPV6_UDP
] = {
587 .proto
= IPPROTO_UDP
,
589 [MLX5E_TT_IPV4_IPSEC_AH
] = {
593 [MLX5E_TT_IPV6_IPSEC_AH
] = {
597 [MLX5E_TT_IPV4_IPSEC_ESP
] = {
599 .proto
= IPPROTO_ESP
,
601 [MLX5E_TT_IPV6_IPSEC_ESP
] = {
603 .proto
= IPPROTO_ESP
,
619 static struct mlx5_flow_rule
*mlx5e_generate_ttc_rule(struct mlx5e_priv
*priv
,
620 struct mlx5_flow_table
*ft
,
621 struct mlx5_flow_destination
*dest
,
625 struct mlx5_flow_rule
*rule
;
626 struct mlx5_flow_spec
*spec
;
629 spec
= mlx5_vzalloc(sizeof(*spec
));
631 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
632 return ERR_PTR(-ENOMEM
);
636 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
637 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ip_protocol
);
638 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ip_protocol
, proto
);
641 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
642 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ethertype
);
643 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ethertype
, etype
);
646 rule
= mlx5_add_flow_rule(ft
, spec
,
647 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
648 MLX5_FS_DEFAULT_FLOW_TAG
,
652 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
656 return err
? ERR_PTR(err
) : rule
;
659 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv
*priv
)
661 struct mlx5_flow_destination dest
;
662 struct mlx5e_ttc_table
*ttc
;
663 struct mlx5_flow_rule
**rules
;
664 struct mlx5_flow_table
*ft
;
672 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
673 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
674 if (tt
== MLX5E_TT_ANY
)
675 dest
.tir_num
= priv
->direct_tir
[0].tirn
;
677 dest
.tir_num
= priv
->indir_tir
[tt
].tirn
;
678 rules
[tt
] = mlx5e_generate_ttc_rule(priv
, ft
, &dest
,
680 ttc_rules
[tt
].proto
);
681 if (IS_ERR(rules
[tt
]))
688 err
= PTR_ERR(rules
[tt
]);
690 mlx5e_cleanup_ttc_rules(ttc
);
694 #define MLX5E_TTC_NUM_GROUPS 3
695 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
696 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
697 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
698 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
699 MLX5E_TTC_GROUP2_SIZE +\
700 MLX5E_TTC_GROUP3_SIZE)
701 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table
*ttc
)
703 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
704 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
710 ft
->g
= kcalloc(MLX5E_TTC_NUM_GROUPS
,
711 sizeof(*ft
->g
), GFP_KERNEL
);
714 in
= mlx5_vzalloc(inlen
);
721 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
722 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ip_protocol
);
723 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ethertype
);
724 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
725 MLX5_SET_CFG(in
, start_flow_index
, ix
);
726 ix
+= MLX5E_TTC_GROUP1_SIZE
;
727 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
728 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
729 if (IS_ERR(ft
->g
[ft
->num_groups
]))
734 MLX5_SET(fte_match_param
, mc
, outer_headers
.ip_protocol
, 0);
735 MLX5_SET_CFG(in
, start_flow_index
, ix
);
736 ix
+= MLX5E_TTC_GROUP2_SIZE
;
737 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
738 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
739 if (IS_ERR(ft
->g
[ft
->num_groups
]))
744 memset(in
, 0, inlen
);
745 MLX5_SET_CFG(in
, start_flow_index
, ix
);
746 ix
+= MLX5E_TTC_GROUP3_SIZE
;
747 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
748 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
749 if (IS_ERR(ft
->g
[ft
->num_groups
]))
757 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
758 ft
->g
[ft
->num_groups
] = NULL
;
764 static void mlx5e_destroy_ttc_table(struct mlx5e_priv
*priv
)
766 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
768 mlx5e_cleanup_ttc_rules(ttc
);
769 mlx5e_destroy_flow_table(&ttc
->ft
);
772 static int mlx5e_create_ttc_table(struct mlx5e_priv
*priv
)
774 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
775 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
778 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
779 MLX5E_TTC_TABLE_SIZE
, MLX5E_TTC_FT_LEVEL
);
781 err
= PTR_ERR(ft
->t
);
786 err
= mlx5e_create_ttc_table_groups(ttc
);
790 err
= mlx5e_generate_ttc_table_rules(priv
);
796 mlx5e_destroy_flow_table(ft
);
800 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
801 struct mlx5e_l2_rule
*ai
)
803 if (!IS_ERR_OR_NULL(ai
->rule
)) {
804 mlx5_del_flow_rule(ai
->rule
);
809 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
810 struct mlx5e_l2_rule
*ai
, int type
)
812 struct mlx5_flow_table
*ft
= priv
->fs
.l2
.ft
.t
;
813 struct mlx5_flow_destination dest
;
814 struct mlx5_flow_spec
*spec
;
819 spec
= mlx5_vzalloc(sizeof(*spec
));
821 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
825 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
826 outer_headers
.dmac_47_16
);
827 mv_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
828 outer_headers
.dmac_47_16
);
830 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
831 dest
.ft
= priv
->fs
.ttc
.ft
.t
;
834 case MLX5E_FULLMATCH
:
835 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
836 eth_broadcast_addr(mc_dmac
);
837 ether_addr_copy(mv_dmac
, ai
->addr
);
841 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
850 ai
->rule
= mlx5_add_flow_rule(ft
, spec
,
851 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
852 MLX5_FS_DEFAULT_FLOW_TAG
, &dest
);
853 if (IS_ERR(ai
->rule
)) {
854 netdev_err(priv
->netdev
, "%s: add l2 rule(mac:%pM) failed\n",
856 err
= PTR_ERR(ai
->rule
);
865 #define MLX5E_NUM_L2_GROUPS 3
866 #define MLX5E_L2_GROUP1_SIZE BIT(0)
867 #define MLX5E_L2_GROUP2_SIZE BIT(15)
868 #define MLX5E_L2_GROUP3_SIZE BIT(0)
869 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
870 MLX5E_L2_GROUP2_SIZE +\
871 MLX5E_L2_GROUP3_SIZE)
872 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table
*l2_table
)
874 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
875 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
882 ft
->g
= kcalloc(MLX5E_NUM_L2_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
885 in
= mlx5_vzalloc(inlen
);
891 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
892 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, mc
,
893 outer_headers
.dmac_47_16
);
894 /* Flow Group for promiscuous */
895 MLX5_SET_CFG(in
, start_flow_index
, ix
);
896 ix
+= MLX5E_L2_GROUP1_SIZE
;
897 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
898 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
899 if (IS_ERR(ft
->g
[ft
->num_groups
]))
900 goto err_destroy_groups
;
903 /* Flow Group for full match */
904 eth_broadcast_addr(mc_dmac
);
905 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
906 MLX5_SET_CFG(in
, start_flow_index
, ix
);
907 ix
+= MLX5E_L2_GROUP2_SIZE
;
908 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
909 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
910 if (IS_ERR(ft
->g
[ft
->num_groups
]))
911 goto err_destroy_groups
;
914 /* Flow Group for allmulti */
915 eth_zero_addr(mc_dmac
);
917 MLX5_SET_CFG(in
, start_flow_index
, ix
);
918 ix
+= MLX5E_L2_GROUP3_SIZE
;
919 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
920 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
921 if (IS_ERR(ft
->g
[ft
->num_groups
]))
922 goto err_destroy_groups
;
929 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
930 ft
->g
[ft
->num_groups
] = NULL
;
931 mlx5e_destroy_groups(ft
);
937 static void mlx5e_destroy_l2_table(struct mlx5e_priv
*priv
)
939 mlx5e_destroy_flow_table(&priv
->fs
.l2
.ft
);
942 static int mlx5e_create_l2_table(struct mlx5e_priv
*priv
)
944 struct mlx5e_l2_table
*l2_table
= &priv
->fs
.l2
;
945 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
949 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
950 MLX5E_L2_TABLE_SIZE
, MLX5E_L2_FT_LEVEL
);
953 err
= PTR_ERR(ft
->t
);
958 err
= mlx5e_create_l2_table_groups(l2_table
);
960 goto err_destroy_flow_table
;
964 err_destroy_flow_table
:
965 mlx5_destroy_flow_table(ft
->t
);
971 #define MLX5E_NUM_VLAN_GROUPS 2
972 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
973 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
974 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
975 MLX5E_VLAN_GROUP1_SIZE)
977 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
, u32
*in
,
982 u8
*mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
984 memset(in
, 0, inlen
);
985 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
986 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.vlan_tag
);
987 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.first_vid
);
988 MLX5_SET_CFG(in
, start_flow_index
, ix
);
989 ix
+= MLX5E_VLAN_GROUP0_SIZE
;
990 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
991 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
992 if (IS_ERR(ft
->g
[ft
->num_groups
]))
993 goto err_destroy_groups
;
996 memset(in
, 0, inlen
);
997 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
998 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.vlan_tag
);
999 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1000 ix
+= MLX5E_VLAN_GROUP1_SIZE
;
1001 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1002 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1003 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1004 goto err_destroy_groups
;
1010 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1011 ft
->g
[ft
->num_groups
] = NULL
;
1012 mlx5e_destroy_groups(ft
);
1017 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
)
1020 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1023 in
= mlx5_vzalloc(inlen
);
1027 err
= __mlx5e_create_vlan_table_groups(ft
, in
, inlen
);
1033 static int mlx5e_create_vlan_table(struct mlx5e_priv
*priv
)
1035 struct mlx5e_flow_table
*ft
= &priv
->fs
.vlan
.ft
;
1039 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
1040 MLX5E_VLAN_TABLE_SIZE
, MLX5E_VLAN_FT_LEVEL
);
1042 if (IS_ERR(ft
->t
)) {
1043 err
= PTR_ERR(ft
->t
);
1047 ft
->g
= kcalloc(MLX5E_NUM_VLAN_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1050 goto err_destroy_vlan_table
;
1053 err
= mlx5e_create_vlan_table_groups(ft
);
1057 mlx5e_add_vlan_rules(priv
);
1063 err_destroy_vlan_table
:
1064 mlx5_destroy_flow_table(ft
->t
);
1070 static void mlx5e_destroy_vlan_table(struct mlx5e_priv
*priv
)
1072 mlx5e_del_vlan_rules(priv
);
1073 mlx5e_destroy_flow_table(&priv
->fs
.vlan
.ft
);
1076 int mlx5e_create_flow_steering(struct mlx5e_priv
*priv
)
1080 priv
->fs
.ns
= mlx5_get_flow_namespace(priv
->mdev
,
1081 MLX5_FLOW_NAMESPACE_KERNEL
);
1086 err
= mlx5e_arfs_create_tables(priv
);
1088 netdev_err(priv
->netdev
, "Failed to create arfs tables, err=%d\n",
1090 priv
->netdev
->hw_features
&= ~NETIF_F_NTUPLE
;
1093 err
= mlx5e_create_ttc_table(priv
);
1095 netdev_err(priv
->netdev
, "Failed to create ttc table, err=%d\n",
1097 goto err_destroy_arfs_tables
;
1100 err
= mlx5e_create_l2_table(priv
);
1102 netdev_err(priv
->netdev
, "Failed to create l2 table, err=%d\n",
1104 goto err_destroy_ttc_table
;
1107 err
= mlx5e_create_vlan_table(priv
);
1109 netdev_err(priv
->netdev
, "Failed to create vlan table, err=%d\n",
1111 goto err_destroy_l2_table
;
1114 mlx5e_ethtool_init_steering(priv
);
1118 err_destroy_l2_table
:
1119 mlx5e_destroy_l2_table(priv
);
1120 err_destroy_ttc_table
:
1121 mlx5e_destroy_ttc_table(priv
);
1122 err_destroy_arfs_tables
:
1123 mlx5e_arfs_destroy_tables(priv
);
1128 void mlx5e_destroy_flow_steering(struct mlx5e_priv
*priv
)
1130 mlx5e_destroy_vlan_table(priv
);
1131 mlx5e_destroy_l2_table(priv
);
1132 mlx5e_destroy_ttc_table(priv
);
1133 mlx5e_arfs_destroy_tables(priv
);
1134 mlx5e_ethtool_cleanup_steering(priv
);