2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <net/devlink.h>
39 #include <linux/mlx5/device.h>
41 #define MLX5_MAX_UC_PER_VPORT(dev) \
42 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
44 #define MLX5_MAX_MC_PER_VPORT(dev) \
45 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
47 #define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
48 #define MLX5_L2_ADDR_HASH(addr) (addr[5])
50 #define FDB_UPLINK_VPORT 0xffff
52 /* L2 -mac address based- hash helpers */
54 struct hlist_node hlist
;
58 #define for_each_l2hash_node(hn, tmp, hash, i) \
59 for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
60 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
62 #define l2addr_hash_find(hash, mac, type) ({ \
63 int ix = MLX5_L2_ADDR_HASH(mac); \
67 hlist_for_each_entry(ptr, &hash[ix], node.hlist) \
68 if (ether_addr_equal(ptr->node.addr, mac)) {\
77 #define l2addr_hash_add(hash, mac, type, gfp) ({ \
78 int ix = MLX5_L2_ADDR_HASH(mac); \
81 ptr = kzalloc(sizeof(type), gfp); \
83 ether_addr_copy(ptr->node.addr, mac); \
84 hlist_add_head(&ptr->node.hlist, &hash[ix]);\
89 #define l2addr_hash_del(ptr) ({ \
90 hlist_del(&ptr->node.hlist); \
94 struct vport_ingress
{
95 struct mlx5_flow_table
*acl
;
96 struct mlx5_flow_group
*allow_untagged_spoofchk_grp
;
97 struct mlx5_flow_group
*allow_spoofchk_only_grp
;
98 struct mlx5_flow_group
*allow_untagged_only_grp
;
99 struct mlx5_flow_group
*drop_grp
;
100 struct mlx5_flow_rule
*allow_rule
;
101 struct mlx5_flow_rule
*drop_rule
;
104 struct vport_egress
{
105 struct mlx5_flow_table
*acl
;
106 struct mlx5_flow_group
*allowed_vlans_grp
;
107 struct mlx5_flow_group
*drop_grp
;
108 struct mlx5_flow_rule
*allowed_vlan
;
109 struct mlx5_flow_rule
*drop_rule
;
113 struct mlx5_core_dev
*dev
;
115 struct hlist_head uc_list
[MLX5_L2_ADDR_HASH_SIZE
];
116 struct hlist_head mc_list
[MLX5_L2_ADDR_HASH_SIZE
];
117 struct mlx5_flow_rule
*promisc_rule
;
118 struct mlx5_flow_rule
*allmulti_rule
;
119 struct work_struct vport_change_handler
;
121 struct vport_ingress ingress
;
122 struct vport_egress egress
;
132 struct mlx5_l2_table
{
133 struct hlist_head l2_hash
[MLX5_L2_ADDR_HASH_SIZE
];
135 unsigned long *bitmap
;
138 struct mlx5_eswitch_fdb
{
142 struct mlx5_flow_group
*addr_grp
;
143 struct mlx5_flow_group
*allmulti_grp
;
144 struct mlx5_flow_group
*promisc_grp
;
147 struct offloads_fdb
{
148 struct mlx5_flow_table
*fdb
;
149 struct mlx5_flow_group
*send_to_vport_grp
;
150 struct mlx5_flow_group
*miss_grp
;
151 struct mlx5_flow_rule
*miss_rule
;
163 struct mlx5_flow_rule
*send_to_vport_rule
;
164 struct list_head list
;
167 struct mlx5_eswitch_rep
{
168 int (*load
)(struct mlx5_eswitch
*esw
,
169 struct mlx5_eswitch_rep
*rep
);
170 void (*unload
)(struct mlx5_eswitch
*esw
,
171 struct mlx5_eswitch_rep
*rep
);
173 struct mlx5_flow_rule
*vport_rx_rule
;
175 struct list_head vport_sqs_list
;
180 struct mlx5_esw_offload
{
181 struct mlx5_flow_table
*ft_offloads
;
182 struct mlx5_flow_group
*vport_rx_group
;
183 struct mlx5_eswitch_rep
*vport_reps
;
186 struct mlx5_eswitch
{
187 struct mlx5_core_dev
*dev
;
188 struct mlx5_l2_table l2_table
;
189 struct mlx5_eswitch_fdb fdb_table
;
190 struct hlist_head mc_table
[MLX5_L2_ADDR_HASH_SIZE
];
191 struct workqueue_struct
*work_queue
;
192 struct mlx5_vport
*vports
;
195 /* Synchronize between vport change events
196 * and async SRIOV admin state changes
198 struct mutex state_lock
;
199 struct esw_mc_addr
*mc_promisc
;
200 struct mlx5_esw_offload offloads
;
205 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
);
206 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
);
207 void mlx5_eswitch_vport_event(struct mlx5_eswitch
*esw
, struct mlx5_eqe
*eqe
);
208 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch
*esw
, int nvfs
, int mode
);
209 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch
*esw
);
210 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
211 int vport
, u8 mac
[ETH_ALEN
]);
212 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
213 int vport
, int link_state
);
214 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
215 int vport
, u16 vlan
, u8 qos
);
216 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
217 int vport
, bool spoofchk
);
218 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
219 int vport_num
, bool setting
);
220 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
221 int vport
, struct ifla_vf_info
*ivi
);
222 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
224 struct ifla_vf_stats
*vf_stats
);
226 struct mlx5_flow_spec
;
228 struct mlx5_flow_rule
*
229 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
230 struct mlx5_flow_spec
*spec
,
231 u32 action
, u32 src_vport
, u32 dst_vport
);
232 struct mlx5_flow_rule
*
233 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
, u32 tirn
);
235 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch
*esw
,
236 struct mlx5_eswitch_rep
*rep
,
237 u16
*sqns_array
, int sqns_num
);
238 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch
*esw
,
239 struct mlx5_eswitch_rep
*rep
);
241 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
);
242 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
);
243 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch
*esw
,
244 struct mlx5_eswitch_rep
*rep
);
245 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch
*esw
,
248 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
250 #define esw_info(dev, format, ...) \
251 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
253 #define esw_warn(dev, format, ...) \
254 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
256 #define esw_debug(dev, format, ...) \
257 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
258 #endif /* __MLX5_ESWITCH_H__ */