2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
41 struct mlx5e_l2_rule
*ai
, int type
);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
43 struct mlx5e_l2_rule
*ai
);
59 MLX5E_ACTION_NONE
= 0,
64 struct mlx5e_l2_hash_node
{
65 struct hlist_node hlist
;
67 struct mlx5e_l2_rule ai
;
70 static inline int mlx5e_hash_l2(u8
*addr
)
75 static void mlx5e_add_l2_to_hash(struct hlist_head
*hash
, u8
*addr
)
77 struct mlx5e_l2_hash_node
*hn
;
78 int ix
= mlx5e_hash_l2(addr
);
81 hlist_for_each_entry(hn
, &hash
[ix
], hlist
)
82 if (ether_addr_equal_64bits(hn
->ai
.addr
, addr
)) {
88 hn
->action
= MLX5E_ACTION_NONE
;
92 hn
= kzalloc(sizeof(*hn
), GFP_ATOMIC
);
96 ether_addr_copy(hn
->ai
.addr
, addr
);
97 hn
->action
= MLX5E_ACTION_ADD
;
99 hlist_add_head(&hn
->hlist
, &hash
[ix
]);
102 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node
*hn
)
104 hlist_del(&hn
->hlist
);
108 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv
*priv
)
110 struct net_device
*ndev
= priv
->netdev
;
119 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
)
122 max_list_size
= 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_vlan_list
);
124 if (list_size
> max_list_size
) {
126 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
127 list_size
, max_list_size
);
128 list_size
= max_list_size
;
131 vlans
= kcalloc(list_size
, sizeof(*vlans
), GFP_KERNEL
);
136 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
142 err
= mlx5_modify_nic_vport_vlans(priv
->mdev
, vlans
, list_size
);
144 netdev_err(ndev
, "Failed to modify vport vlans list err(%d)\n",
151 enum mlx5e_vlan_rule_type
{
152 MLX5E_VLAN_RULE_TYPE_UNTAGGED
,
153 MLX5E_VLAN_RULE_TYPE_ANY_VID
,
154 MLX5E_VLAN_RULE_TYPE_MATCH_VID
,
157 static int __mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
158 enum mlx5e_vlan_rule_type rule_type
,
159 u16 vid
, struct mlx5_flow_spec
*spec
)
161 struct mlx5_flow_table
*ft
= priv
->fs
.vlan
.ft
.t
;
162 struct mlx5_flow_destination dest
;
163 struct mlx5_flow_rule
**rule_p
;
166 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
167 dest
.ft
= priv
->fs
.l2
.ft
.t
;
169 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
170 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.vlan_tag
);
173 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
174 rule_p
= &priv
->fs
.vlan
.untagged_rule
;
176 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
177 rule_p
= &priv
->fs
.vlan
.any_vlan_rule
;
178 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.vlan_tag
, 1);
180 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
181 rule_p
= &priv
->fs
.vlan
.active_vlans_rule
[vid
];
182 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.vlan_tag
, 1);
183 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
184 outer_headers
.first_vid
);
185 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.first_vid
,
190 *rule_p
= mlx5_add_flow_rule(ft
, spec
,
191 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
192 MLX5_FS_DEFAULT_FLOW_TAG
,
195 if (IS_ERR(*rule_p
)) {
196 err
= PTR_ERR(*rule_p
);
198 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
204 static int mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
205 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
207 struct mlx5_flow_spec
*spec
;
210 spec
= mlx5_vzalloc(sizeof(*spec
));
212 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
216 if (rule_type
== MLX5E_VLAN_RULE_TYPE_MATCH_VID
)
217 mlx5e_vport_context_update_vlans(priv
);
219 err
= __mlx5e_add_vlan_rule(priv
, rule_type
, vid
, spec
);
226 static void mlx5e_del_vlan_rule(struct mlx5e_priv
*priv
,
227 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
230 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
231 if (priv
->fs
.vlan
.untagged_rule
) {
232 mlx5_del_flow_rule(priv
->fs
.vlan
.untagged_rule
);
233 priv
->fs
.vlan
.untagged_rule
= NULL
;
236 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
237 if (priv
->fs
.vlan
.any_vlan_rule
) {
238 mlx5_del_flow_rule(priv
->fs
.vlan
.any_vlan_rule
);
239 priv
->fs
.vlan
.any_vlan_rule
= NULL
;
242 case MLX5E_VLAN_RULE_TYPE_MATCH_VID
:
243 mlx5e_vport_context_update_vlans(priv
);
244 if (priv
->fs
.vlan
.active_vlans_rule
[vid
]) {
245 mlx5_del_flow_rule(priv
->fs
.vlan
.active_vlans_rule
[vid
]);
246 priv
->fs
.vlan
.active_vlans_rule
[vid
] = NULL
;
248 mlx5e_vport_context_update_vlans(priv
);
253 void mlx5e_enable_vlan_filter(struct mlx5e_priv
*priv
)
255 if (!priv
->fs
.vlan
.filter_disabled
)
258 priv
->fs
.vlan
.filter_disabled
= false;
259 if (priv
->netdev
->flags
& IFF_PROMISC
)
261 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
264 void mlx5e_disable_vlan_filter(struct mlx5e_priv
*priv
)
266 if (priv
->fs
.vlan
.filter_disabled
)
269 priv
->fs
.vlan
.filter_disabled
= true;
270 if (priv
->netdev
->flags
& IFF_PROMISC
)
272 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
275 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
278 struct mlx5e_priv
*priv
= netdev_priv(dev
);
280 set_bit(vid
, priv
->fs
.vlan
.active_vlans
);
282 return mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
285 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
288 struct mlx5e_priv
*priv
= netdev_priv(dev
);
290 clear_bit(vid
, priv
->fs
.vlan
.active_vlans
);
292 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
297 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
298 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
299 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
301 static void mlx5e_execute_l2_action(struct mlx5e_priv
*priv
,
302 struct mlx5e_l2_hash_node
*hn
)
304 switch (hn
->action
) {
305 case MLX5E_ACTION_ADD
:
306 mlx5e_add_l2_flow_rule(priv
, &hn
->ai
, MLX5E_FULLMATCH
);
307 hn
->action
= MLX5E_ACTION_NONE
;
310 case MLX5E_ACTION_DEL
:
311 mlx5e_del_l2_flow_rule(priv
, &hn
->ai
);
312 mlx5e_del_l2_from_hash(hn
);
317 static void mlx5e_sync_netdev_addr(struct mlx5e_priv
*priv
)
319 struct net_device
*netdev
= priv
->netdev
;
320 struct netdev_hw_addr
*ha
;
322 netif_addr_lock_bh(netdev
);
324 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
,
325 priv
->netdev
->dev_addr
);
327 netdev_for_each_uc_addr(ha
, netdev
)
328 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
, ha
->addr
);
330 netdev_for_each_mc_addr(ha
, netdev
)
331 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_mc
, ha
->addr
);
333 netif_addr_unlock_bh(netdev
);
336 static void mlx5e_fill_addr_array(struct mlx5e_priv
*priv
, int list_type
,
337 u8 addr_array
[][ETH_ALEN
], int size
)
339 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
340 struct net_device
*ndev
= priv
->netdev
;
341 struct mlx5e_l2_hash_node
*hn
;
342 struct hlist_head
*addr_list
;
343 struct hlist_node
*tmp
;
347 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
349 if (is_uc
) /* Make sure our own address is pushed first */
350 ether_addr_copy(addr_array
[i
++], ndev
->dev_addr
);
351 else if (priv
->fs
.l2
.broadcast_enabled
)
352 ether_addr_copy(addr_array
[i
++], ndev
->broadcast
);
354 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
) {
355 if (ether_addr_equal(ndev
->dev_addr
, hn
->ai
.addr
))
359 ether_addr_copy(addr_array
[i
++], hn
->ai
.addr
);
363 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv
*priv
,
366 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
367 struct mlx5e_l2_hash_node
*hn
;
368 u8 (*addr_array
)[ETH_ALEN
] = NULL
;
369 struct hlist_head
*addr_list
;
370 struct hlist_node
*tmp
;
376 size
= is_uc
? 0 : (priv
->fs
.l2
.broadcast_enabled
? 1 : 0);
378 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_uc_list
) :
379 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_mc_list
);
381 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
382 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
)
385 if (size
> max_size
) {
386 netdev_warn(priv
->netdev
,
387 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
388 is_uc
? "UC" : "MC", size
, max_size
);
393 addr_array
= kcalloc(size
, ETH_ALEN
, GFP_KERNEL
);
398 mlx5e_fill_addr_array(priv
, list_type
, addr_array
, size
);
401 err
= mlx5_modify_nic_vport_mac_list(priv
->mdev
, list_type
, addr_array
, size
);
404 netdev_err(priv
->netdev
,
405 "Failed to modify vport %s list err(%d)\n",
406 is_uc
? "UC" : "MC", err
);
410 static void mlx5e_vport_context_update(struct mlx5e_priv
*priv
)
412 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
414 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_UC
);
415 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_MC
);
416 mlx5_modify_nic_vport_promisc(priv
->mdev
, 0,
417 ea
->allmulti_enabled
,
418 ea
->promisc_enabled
);
421 static void mlx5e_apply_netdev_addr(struct mlx5e_priv
*priv
)
423 struct mlx5e_l2_hash_node
*hn
;
424 struct hlist_node
*tmp
;
427 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
428 mlx5e_execute_l2_action(priv
, hn
);
430 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
431 mlx5e_execute_l2_action(priv
, hn
);
434 static void mlx5e_handle_netdev_addr(struct mlx5e_priv
*priv
)
436 struct mlx5e_l2_hash_node
*hn
;
437 struct hlist_node
*tmp
;
440 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
441 hn
->action
= MLX5E_ACTION_DEL
;
442 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
443 hn
->action
= MLX5E_ACTION_DEL
;
445 if (!test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
))
446 mlx5e_sync_netdev_addr(priv
);
448 mlx5e_apply_netdev_addr(priv
);
451 void mlx5e_set_rx_mode_work(struct work_struct
*work
)
453 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
456 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
457 struct net_device
*ndev
= priv
->netdev
;
459 bool rx_mode_enable
= !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
460 bool promisc_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_PROMISC
);
461 bool allmulti_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_ALLMULTI
);
462 bool broadcast_enabled
= rx_mode_enable
;
464 bool enable_promisc
= !ea
->promisc_enabled
&& promisc_enabled
;
465 bool disable_promisc
= ea
->promisc_enabled
&& !promisc_enabled
;
466 bool enable_allmulti
= !ea
->allmulti_enabled
&& allmulti_enabled
;
467 bool disable_allmulti
= ea
->allmulti_enabled
&& !allmulti_enabled
;
468 bool enable_broadcast
= !ea
->broadcast_enabled
&& broadcast_enabled
;
469 bool disable_broadcast
= ea
->broadcast_enabled
&& !broadcast_enabled
;
471 if (enable_promisc
) {
472 mlx5e_add_l2_flow_rule(priv
, &ea
->promisc
, MLX5E_PROMISC
);
473 if (!priv
->fs
.vlan
.filter_disabled
)
474 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
478 mlx5e_add_l2_flow_rule(priv
, &ea
->allmulti
, MLX5E_ALLMULTI
);
479 if (enable_broadcast
)
480 mlx5e_add_l2_flow_rule(priv
, &ea
->broadcast
, MLX5E_FULLMATCH
);
482 mlx5e_handle_netdev_addr(priv
);
484 if (disable_broadcast
)
485 mlx5e_del_l2_flow_rule(priv
, &ea
->broadcast
);
486 if (disable_allmulti
)
487 mlx5e_del_l2_flow_rule(priv
, &ea
->allmulti
);
488 if (disable_promisc
) {
489 if (!priv
->fs
.vlan
.filter_disabled
)
490 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
492 mlx5e_del_l2_flow_rule(priv
, &ea
->promisc
);
495 ea
->promisc_enabled
= promisc_enabled
;
496 ea
->allmulti_enabled
= allmulti_enabled
;
497 ea
->broadcast_enabled
= broadcast_enabled
;
499 mlx5e_vport_context_update(priv
);
502 static void mlx5e_destroy_groups(struct mlx5e_flow_table
*ft
)
506 for (i
= ft
->num_groups
- 1; i
>= 0; i
--) {
507 if (!IS_ERR_OR_NULL(ft
->g
[i
]))
508 mlx5_destroy_flow_group(ft
->g
[i
]);
514 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
)
516 ether_addr_copy(priv
->fs
.l2
.broadcast
.addr
, priv
->netdev
->broadcast
);
519 void mlx5e_destroy_flow_table(struct mlx5e_flow_table
*ft
)
521 mlx5e_destroy_groups(ft
);
523 mlx5_destroy_flow_table(ft
->t
);
527 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table
*ttc
)
531 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
532 if (!IS_ERR_OR_NULL(ttc
->rules
[i
])) {
533 mlx5_del_flow_rule(ttc
->rules
[i
]);
534 ttc
->rules
[i
] = NULL
;
543 [MLX5E_TT_IPV4_TCP
] = {
545 .proto
= IPPROTO_TCP
,
547 [MLX5E_TT_IPV6_TCP
] = {
549 .proto
= IPPROTO_TCP
,
551 [MLX5E_TT_IPV4_UDP
] = {
553 .proto
= IPPROTO_UDP
,
555 [MLX5E_TT_IPV6_UDP
] = {
557 .proto
= IPPROTO_UDP
,
559 [MLX5E_TT_IPV4_IPSEC_AH
] = {
563 [MLX5E_TT_IPV6_IPSEC_AH
] = {
567 [MLX5E_TT_IPV4_IPSEC_ESP
] = {
569 .proto
= IPPROTO_ESP
,
571 [MLX5E_TT_IPV6_IPSEC_ESP
] = {
573 .proto
= IPPROTO_ESP
,
589 static struct mlx5_flow_rule
*mlx5e_generate_ttc_rule(struct mlx5e_priv
*priv
,
590 struct mlx5_flow_table
*ft
,
591 struct mlx5_flow_destination
*dest
,
595 struct mlx5_flow_rule
*rule
;
596 struct mlx5_flow_spec
*spec
;
599 spec
= mlx5_vzalloc(sizeof(*spec
));
601 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
602 return ERR_PTR(-ENOMEM
);
606 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
607 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ip_protocol
);
608 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ip_protocol
, proto
);
611 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
612 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ethertype
);
613 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ethertype
, etype
);
616 rule
= mlx5_add_flow_rule(ft
, spec
,
617 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
618 MLX5_FS_DEFAULT_FLOW_TAG
,
622 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
626 return err
? ERR_PTR(err
) : rule
;
629 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv
*priv
)
631 struct mlx5_flow_destination dest
;
632 struct mlx5e_ttc_table
*ttc
;
633 struct mlx5_flow_rule
**rules
;
634 struct mlx5_flow_table
*ft
;
642 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
643 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
644 if (tt
== MLX5E_TT_ANY
)
645 dest
.tir_num
= priv
->direct_tir
[0].tirn
;
647 dest
.tir_num
= priv
->indir_tir
[tt
].tirn
;
648 rules
[tt
] = mlx5e_generate_ttc_rule(priv
, ft
, &dest
,
650 ttc_rules
[tt
].proto
);
651 if (IS_ERR(rules
[tt
]))
658 err
= PTR_ERR(rules
[tt
]);
660 mlx5e_cleanup_ttc_rules(ttc
);
664 #define MLX5E_TTC_NUM_GROUPS 3
665 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
666 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
667 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
668 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
669 MLX5E_TTC_GROUP2_SIZE +\
670 MLX5E_TTC_GROUP3_SIZE)
671 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table
*ttc
)
673 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
674 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
680 ft
->g
= kcalloc(MLX5E_TTC_NUM_GROUPS
,
681 sizeof(*ft
->g
), GFP_KERNEL
);
684 in
= mlx5_vzalloc(inlen
);
691 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
692 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ip_protocol
);
693 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ethertype
);
694 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
695 MLX5_SET_CFG(in
, start_flow_index
, ix
);
696 ix
+= MLX5E_TTC_GROUP1_SIZE
;
697 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
698 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
699 if (IS_ERR(ft
->g
[ft
->num_groups
]))
704 MLX5_SET(fte_match_param
, mc
, outer_headers
.ip_protocol
, 0);
705 MLX5_SET_CFG(in
, start_flow_index
, ix
);
706 ix
+= MLX5E_TTC_GROUP2_SIZE
;
707 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
708 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
709 if (IS_ERR(ft
->g
[ft
->num_groups
]))
714 memset(in
, 0, inlen
);
715 MLX5_SET_CFG(in
, start_flow_index
, ix
);
716 ix
+= MLX5E_TTC_GROUP3_SIZE
;
717 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
718 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
719 if (IS_ERR(ft
->g
[ft
->num_groups
]))
727 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
728 ft
->g
[ft
->num_groups
] = NULL
;
734 static void mlx5e_destroy_ttc_table(struct mlx5e_priv
*priv
)
736 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
738 mlx5e_cleanup_ttc_rules(ttc
);
739 mlx5e_destroy_flow_table(&ttc
->ft
);
742 static int mlx5e_create_ttc_table(struct mlx5e_priv
*priv
)
744 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
745 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
748 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
749 MLX5E_TTC_TABLE_SIZE
, MLX5E_TTC_FT_LEVEL
);
751 err
= PTR_ERR(ft
->t
);
756 err
= mlx5e_create_ttc_table_groups(ttc
);
760 err
= mlx5e_generate_ttc_table_rules(priv
);
766 mlx5e_destroy_flow_table(ft
);
770 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
771 struct mlx5e_l2_rule
*ai
)
773 if (!IS_ERR_OR_NULL(ai
->rule
)) {
774 mlx5_del_flow_rule(ai
->rule
);
779 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
780 struct mlx5e_l2_rule
*ai
, int type
)
782 struct mlx5_flow_table
*ft
= priv
->fs
.l2
.ft
.t
;
783 struct mlx5_flow_destination dest
;
784 struct mlx5_flow_spec
*spec
;
789 spec
= mlx5_vzalloc(sizeof(*spec
));
791 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
795 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
796 outer_headers
.dmac_47_16
);
797 mv_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
798 outer_headers
.dmac_47_16
);
800 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
801 dest
.ft
= priv
->fs
.ttc
.ft
.t
;
804 case MLX5E_FULLMATCH
:
805 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
806 eth_broadcast_addr(mc_dmac
);
807 ether_addr_copy(mv_dmac
, ai
->addr
);
811 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
820 ai
->rule
= mlx5_add_flow_rule(ft
, spec
,
821 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
822 MLX5_FS_DEFAULT_FLOW_TAG
, &dest
);
823 if (IS_ERR(ai
->rule
)) {
824 netdev_err(priv
->netdev
, "%s: add l2 rule(mac:%pM) failed\n",
826 err
= PTR_ERR(ai
->rule
);
835 #define MLX5E_NUM_L2_GROUPS 3
836 #define MLX5E_L2_GROUP1_SIZE BIT(0)
837 #define MLX5E_L2_GROUP2_SIZE BIT(15)
838 #define MLX5E_L2_GROUP3_SIZE BIT(0)
839 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
840 MLX5E_L2_GROUP2_SIZE +\
841 MLX5E_L2_GROUP3_SIZE)
842 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table
*l2_table
)
844 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
845 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
852 ft
->g
= kcalloc(MLX5E_NUM_L2_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
855 in
= mlx5_vzalloc(inlen
);
861 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
862 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, mc
,
863 outer_headers
.dmac_47_16
);
864 /* Flow Group for promiscuous */
865 MLX5_SET_CFG(in
, start_flow_index
, ix
);
866 ix
+= MLX5E_L2_GROUP1_SIZE
;
867 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
868 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
869 if (IS_ERR(ft
->g
[ft
->num_groups
]))
870 goto err_destroy_groups
;
873 /* Flow Group for full match */
874 eth_broadcast_addr(mc_dmac
);
875 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
876 MLX5_SET_CFG(in
, start_flow_index
, ix
);
877 ix
+= MLX5E_L2_GROUP2_SIZE
;
878 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
879 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
880 if (IS_ERR(ft
->g
[ft
->num_groups
]))
881 goto err_destroy_groups
;
884 /* Flow Group for allmulti */
885 eth_zero_addr(mc_dmac
);
887 MLX5_SET_CFG(in
, start_flow_index
, ix
);
888 ix
+= MLX5E_L2_GROUP3_SIZE
;
889 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
890 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
891 if (IS_ERR(ft
->g
[ft
->num_groups
]))
892 goto err_destroy_groups
;
899 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
900 ft
->g
[ft
->num_groups
] = NULL
;
901 mlx5e_destroy_groups(ft
);
907 static void mlx5e_destroy_l2_table(struct mlx5e_priv
*priv
)
909 mlx5e_destroy_flow_table(&priv
->fs
.l2
.ft
);
912 static int mlx5e_create_l2_table(struct mlx5e_priv
*priv
)
914 struct mlx5e_l2_table
*l2_table
= &priv
->fs
.l2
;
915 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
919 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
920 MLX5E_L2_TABLE_SIZE
, MLX5E_L2_FT_LEVEL
);
923 err
= PTR_ERR(ft
->t
);
928 err
= mlx5e_create_l2_table_groups(l2_table
);
930 goto err_destroy_flow_table
;
934 err_destroy_flow_table
:
935 mlx5_destroy_flow_table(ft
->t
);
941 #define MLX5E_NUM_VLAN_GROUPS 2
942 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
943 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
944 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
945 MLX5E_VLAN_GROUP1_SIZE)
947 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
, u32
*in
,
952 u8
*mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
954 memset(in
, 0, inlen
);
955 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
956 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.vlan_tag
);
957 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.first_vid
);
958 MLX5_SET_CFG(in
, start_flow_index
, ix
);
959 ix
+= MLX5E_VLAN_GROUP0_SIZE
;
960 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
961 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
962 if (IS_ERR(ft
->g
[ft
->num_groups
]))
963 goto err_destroy_groups
;
966 memset(in
, 0, inlen
);
967 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
968 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.vlan_tag
);
969 MLX5_SET_CFG(in
, start_flow_index
, ix
);
970 ix
+= MLX5E_VLAN_GROUP1_SIZE
;
971 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
972 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
973 if (IS_ERR(ft
->g
[ft
->num_groups
]))
974 goto err_destroy_groups
;
980 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
981 ft
->g
[ft
->num_groups
] = NULL
;
982 mlx5e_destroy_groups(ft
);
987 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
)
990 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
993 in
= mlx5_vzalloc(inlen
);
997 err
= __mlx5e_create_vlan_table_groups(ft
, in
, inlen
);
1003 static int mlx5e_create_vlan_table(struct mlx5e_priv
*priv
)
1005 struct mlx5e_flow_table
*ft
= &priv
->fs
.vlan
.ft
;
1009 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
1010 MLX5E_VLAN_TABLE_SIZE
, MLX5E_VLAN_FT_LEVEL
);
1012 if (IS_ERR(ft
->t
)) {
1013 err
= PTR_ERR(ft
->t
);
1017 ft
->g
= kcalloc(MLX5E_NUM_VLAN_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1020 goto err_destroy_vlan_table
;
1023 err
= mlx5e_create_vlan_table_groups(ft
);
1027 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
1029 goto err_destroy_vlan_flow_groups
;
1033 err_destroy_vlan_flow_groups
:
1034 mlx5e_destroy_groups(ft
);
1037 err_destroy_vlan_table
:
1038 mlx5_destroy_flow_table(ft
->t
);
1044 static void mlx5e_destroy_vlan_table(struct mlx5e_priv
*priv
)
1046 mlx5e_destroy_flow_table(&priv
->fs
.vlan
.ft
);
1049 int mlx5e_create_flow_steering(struct mlx5e_priv
*priv
)
1053 priv
->fs
.ns
= mlx5_get_flow_namespace(priv
->mdev
,
1054 MLX5_FLOW_NAMESPACE_KERNEL
);
1059 err
= mlx5e_arfs_create_tables(priv
);
1061 netdev_err(priv
->netdev
, "Failed to create arfs tables, err=%d\n",
1063 priv
->netdev
->hw_features
&= ~NETIF_F_NTUPLE
;
1066 err
= mlx5e_create_ttc_table(priv
);
1068 netdev_err(priv
->netdev
, "Failed to create ttc table, err=%d\n",
1070 goto err_destroy_arfs_tables
;
1073 err
= mlx5e_create_l2_table(priv
);
1075 netdev_err(priv
->netdev
, "Failed to create l2 table, err=%d\n",
1077 goto err_destroy_ttc_table
;
1080 err
= mlx5e_create_vlan_table(priv
);
1082 netdev_err(priv
->netdev
, "Failed to create vlan table, err=%d\n",
1084 goto err_destroy_l2_table
;
1087 mlx5e_ethtool_init_steering(priv
);
1091 err_destroy_l2_table
:
1092 mlx5e_destroy_l2_table(priv
);
1093 err_destroy_ttc_table
:
1094 mlx5e_destroy_ttc_table(priv
);
1095 err_destroy_arfs_tables
:
1096 mlx5e_arfs_destroy_tables(priv
);
1101 void mlx5e_destroy_flow_steering(struct mlx5e_priv
*priv
)
1103 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
1104 mlx5e_destroy_vlan_table(priv
);
1105 mlx5e_destroy_l2_table(priv
);
1106 mlx5e_destroy_ttc_table(priv
);
1107 mlx5e_arfs_destroy_tables(priv
);
1108 mlx5e_ethtool_cleanup_steering(priv
);