Commit | Line | Data |
---|---|---|
afb736e9 AV |
1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/list.h> | |
34 | #include <linux/ip.h> | |
35 | #include <linux/ipv6.h> | |
36 | #include <linux/tcp.h> | |
86d722ad | 37 | #include <linux/mlx5/fs.h> |
afb736e9 AV |
38 | #include "en.h" |
39 | ||
86d722ad MG |
40 | #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) |
41 | ||
afb736e9 AV |
42 | enum { |
43 | MLX5E_FULLMATCH = 0, | |
44 | MLX5E_ALLMULTI = 1, | |
45 | MLX5E_PROMISC = 2, | |
46 | }; | |
47 | ||
48 | enum { | |
49 | MLX5E_UC = 0, | |
50 | MLX5E_MC_IPV4 = 1, | |
51 | MLX5E_MC_IPV6 = 2, | |
52 | MLX5E_MC_OTHER = 3, | |
53 | }; | |
54 | ||
55 | enum { | |
56 | MLX5E_ACTION_NONE = 0, | |
57 | MLX5E_ACTION_ADD = 1, | |
58 | MLX5E_ACTION_DEL = 2, | |
59 | }; | |
60 | ||
61 | struct mlx5e_eth_addr_hash_node { | |
62 | struct hlist_node hlist; | |
63 | u8 action; | |
64 | struct mlx5e_eth_addr_info ai; | |
65 | }; | |
66 | ||
67 | static inline int mlx5e_hash_eth_addr(u8 *addr) | |
68 | { | |
69 | return addr[5]; | |
70 | } | |
71 | ||
72 | static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) | |
73 | { | |
74 | struct mlx5e_eth_addr_hash_node *hn; | |
75 | int ix = mlx5e_hash_eth_addr(addr); | |
76 | int found = 0; | |
77 | ||
78 | hlist_for_each_entry(hn, &hash[ix], hlist) | |
79 | if (ether_addr_equal_64bits(hn->ai.addr, addr)) { | |
80 | found = 1; | |
81 | break; | |
82 | } | |
83 | ||
84 | if (found) { | |
85 | hn->action = MLX5E_ACTION_NONE; | |
86 | return; | |
87 | } | |
88 | ||
89 | hn = kzalloc(sizeof(*hn), GFP_ATOMIC); | |
90 | if (!hn) | |
91 | return; | |
92 | ||
93 | ether_addr_copy(hn->ai.addr, addr); | |
94 | hn->action = MLX5E_ACTION_ADD; | |
95 | ||
96 | hlist_add_head(&hn->hlist, &hash[ix]); | |
97 | } | |
98 | ||
99 | static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) | |
100 | { | |
101 | hlist_del(&hn->hlist); | |
102 | kfree(hn); | |
103 | } | |
104 | ||
105 | static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, | |
106 | struct mlx5e_eth_addr_info *ai) | |
107 | { | |
a741749f | 108 | if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) |
86d722ad | 109 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); |
a741749f AS |
110 | |
111 | if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) | |
86d722ad | 112 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); |
a741749f AS |
113 | |
114 | if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) | |
86d722ad | 115 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); |
a741749f AS |
116 | |
117 | if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) | |
86d722ad | 118 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); |
a741749f | 119 | |
5a6f8aef | 120 | if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) |
86d722ad | 121 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); |
afb736e9 | 122 | |
5a6f8aef | 123 | if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) |
86d722ad | 124 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); |
afb736e9 | 125 | |
5a6f8aef | 126 | if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) |
86d722ad | 127 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); |
afb736e9 | 128 | |
5a6f8aef | 129 | if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) |
86d722ad | 130 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); |
afb736e9 | 131 | |
5a6f8aef | 132 | if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) |
86d722ad | 133 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); |
afb736e9 | 134 | |
5a6f8aef | 135 | if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) |
86d722ad | 136 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); |
afb736e9 | 137 | |
5a6f8aef | 138 | if (ai->tt_vec & BIT(MLX5E_TT_ANY)) |
86d722ad | 139 | mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); |
afb736e9 AV |
140 | } |
141 | ||
142 | static int mlx5e_get_eth_addr_type(u8 *addr) | |
143 | { | |
144 | if (is_unicast_ether_addr(addr)) | |
145 | return MLX5E_UC; | |
146 | ||
147 | if ((addr[0] == 0x01) && | |
148 | (addr[1] == 0x00) && | |
149 | (addr[2] == 0x5e) && | |
150 | !(addr[3] & 0x80)) | |
151 | return MLX5E_MC_IPV4; | |
152 | ||
153 | if ((addr[0] == 0x33) && | |
154 | (addr[1] == 0x33)) | |
155 | return MLX5E_MC_IPV6; | |
156 | ||
157 | return MLX5E_MC_OTHER; | |
158 | } | |
159 | ||
160 | static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) | |
161 | { | |
162 | int eth_addr_type; | |
163 | u32 ret; | |
164 | ||
165 | switch (type) { | |
166 | case MLX5E_FULLMATCH: | |
167 | eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); | |
168 | switch (eth_addr_type) { | |
169 | case MLX5E_UC: | |
170 | ret = | |
5a6f8aef AS |
171 | BIT(MLX5E_TT_IPV4_TCP) | |
172 | BIT(MLX5E_TT_IPV6_TCP) | | |
173 | BIT(MLX5E_TT_IPV4_UDP) | | |
174 | BIT(MLX5E_TT_IPV6_UDP) | | |
a741749f AS |
175 | BIT(MLX5E_TT_IPV4_IPSEC_AH) | |
176 | BIT(MLX5E_TT_IPV6_IPSEC_AH) | | |
177 | BIT(MLX5E_TT_IPV4_IPSEC_ESP) | | |
178 | BIT(MLX5E_TT_IPV6_IPSEC_ESP) | | |
5a6f8aef AS |
179 | BIT(MLX5E_TT_IPV4) | |
180 | BIT(MLX5E_TT_IPV6) | | |
181 | BIT(MLX5E_TT_ANY) | | |
afb736e9 AV |
182 | 0; |
183 | break; | |
184 | ||
185 | case MLX5E_MC_IPV4: | |
186 | ret = | |
5a6f8aef AS |
187 | BIT(MLX5E_TT_IPV4_UDP) | |
188 | BIT(MLX5E_TT_IPV4) | | |
afb736e9 AV |
189 | 0; |
190 | break; | |
191 | ||
192 | case MLX5E_MC_IPV6: | |
193 | ret = | |
5a6f8aef AS |
194 | BIT(MLX5E_TT_IPV6_UDP) | |
195 | BIT(MLX5E_TT_IPV6) | | |
afb736e9 AV |
196 | 0; |
197 | break; | |
198 | ||
199 | case MLX5E_MC_OTHER: | |
200 | ret = | |
5a6f8aef | 201 | BIT(MLX5E_TT_ANY) | |
afb736e9 AV |
202 | 0; |
203 | break; | |
204 | } | |
205 | ||
206 | break; | |
207 | ||
208 | case MLX5E_ALLMULTI: | |
209 | ret = | |
5a6f8aef AS |
210 | BIT(MLX5E_TT_IPV4_UDP) | |
211 | BIT(MLX5E_TT_IPV6_UDP) | | |
212 | BIT(MLX5E_TT_IPV4) | | |
213 | BIT(MLX5E_TT_IPV6) | | |
214 | BIT(MLX5E_TT_ANY) | | |
afb736e9 AV |
215 | 0; |
216 | break; | |
217 | ||
218 | default: /* MLX5E_PROMISC */ | |
219 | ret = | |
5a6f8aef AS |
220 | BIT(MLX5E_TT_IPV4_TCP) | |
221 | BIT(MLX5E_TT_IPV6_TCP) | | |
222 | BIT(MLX5E_TT_IPV4_UDP) | | |
223 | BIT(MLX5E_TT_IPV6_UDP) | | |
a741749f AS |
224 | BIT(MLX5E_TT_IPV4_IPSEC_AH) | |
225 | BIT(MLX5E_TT_IPV6_IPSEC_AH) | | |
226 | BIT(MLX5E_TT_IPV4_IPSEC_ESP) | | |
227 | BIT(MLX5E_TT_IPV6_IPSEC_ESP) | | |
5a6f8aef AS |
228 | BIT(MLX5E_TT_IPV4) | |
229 | BIT(MLX5E_TT_IPV6) | | |
230 | BIT(MLX5E_TT_ANY) | | |
afb736e9 AV |
231 | 0; |
232 | break; | |
233 | } | |
234 | ||
235 | return ret; | |
236 | } | |
237 | ||
238 | static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, | |
86d722ad MG |
239 | struct mlx5e_eth_addr_info *ai, |
240 | int type, u32 *mc, u32 *mv) | |
afb736e9 | 241 | { |
86d722ad | 242 | struct mlx5_flow_destination dest; |
afb736e9 | 243 | u8 match_criteria_enable = 0; |
86d722ad MG |
244 | struct mlx5_flow_rule **rule_p; |
245 | struct mlx5_flow_table *ft = priv->fts.main.t; | |
246 | u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, | |
247 | outer_headers.dmac_47_16); | |
248 | u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, | |
249 | outer_headers.dmac_47_16); | |
250 | u32 *tirn = priv->tirn; | |
251 | u32 tt_vec; | |
252 | int err = 0; | |
253 | ||
254 | dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; | |
afb736e9 AV |
255 | |
256 | switch (type) { | |
257 | case MLX5E_FULLMATCH: | |
258 | match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; | |
86d722ad MG |
259 | eth_broadcast_addr(mc_dmac); |
260 | ether_addr_copy(mv_dmac, ai->addr); | |
afb736e9 AV |
261 | break; |
262 | ||
263 | case MLX5E_ALLMULTI: | |
264 | match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; | |
86d722ad MG |
265 | mc_dmac[0] = 0x01; |
266 | mv_dmac[0] = 0x01; | |
afb736e9 AV |
267 | break; |
268 | ||
269 | case MLX5E_PROMISC: | |
270 | break; | |
271 | } | |
272 | ||
273 | tt_vec = mlx5e_get_tt_vec(ai, type); | |
274 | ||
5a6f8aef | 275 | if (tt_vec & BIT(MLX5E_TT_ANY)) { |
86d722ad MG |
276 | rule_p = &ai->ft_rule[MLX5E_TT_ANY]; |
277 | dest.tir_num = tirn[MLX5E_TT_ANY]; | |
278 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, | |
279 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
280 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
281 | if (IS_ERR_OR_NULL(*rule_p)) | |
5a6f8aef | 282 | goto err_del_ai; |
5a6f8aef | 283 | ai->tt_vec |= BIT(MLX5E_TT_ANY); |
afb736e9 AV |
284 | } |
285 | ||
286 | match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; | |
86d722ad | 287 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); |
afb736e9 | 288 | |
5a6f8aef | 289 | if (tt_vec & BIT(MLX5E_TT_IPV4)) { |
86d722ad MG |
290 | rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; |
291 | dest.tir_num = tirn[MLX5E_TT_IPV4]; | |
292 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
afb736e9 | 293 | ETH_P_IP); |
86d722ad MG |
294 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
295 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
296 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
297 | if (IS_ERR_OR_NULL(*rule_p)) | |
5a6f8aef | 298 | goto err_del_ai; |
5a6f8aef | 299 | ai->tt_vec |= BIT(MLX5E_TT_IPV4); |
afb736e9 AV |
300 | } |
301 | ||
5a6f8aef | 302 | if (tt_vec & BIT(MLX5E_TT_IPV6)) { |
86d722ad MG |
303 | rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; |
304 | dest.tir_num = tirn[MLX5E_TT_IPV6]; | |
305 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
afb736e9 | 306 | ETH_P_IPV6); |
86d722ad MG |
307 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
308 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
309 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
310 | if (IS_ERR_OR_NULL(*rule_p)) | |
5a6f8aef | 311 | goto err_del_ai; |
5a6f8aef | 312 | ai->tt_vec |= BIT(MLX5E_TT_IPV6); |
afb736e9 AV |
313 | } |
314 | ||
86d722ad MG |
315 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); |
316 | MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); | |
afb736e9 | 317 | |
5a6f8aef | 318 | if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { |
86d722ad MG |
319 | rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; |
320 | dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; | |
321 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
afb736e9 | 322 | ETH_P_IP); |
86d722ad MG |
323 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
324 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
325 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
326 | if (IS_ERR_OR_NULL(*rule_p)) | |
5a6f8aef | 327 | goto err_del_ai; |
5a6f8aef | 328 | ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); |
afb736e9 AV |
329 | } |
330 | ||
5a6f8aef | 331 | if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { |
86d722ad MG |
332 | rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; |
333 | dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; | |
334 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
afb736e9 | 335 | ETH_P_IPV6); |
86d722ad MG |
336 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
337 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
338 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
339 | if (IS_ERR_OR_NULL(*rule_p)) | |
5a6f8aef | 340 | goto err_del_ai; |
5a6f8aef | 341 | ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); |
afb736e9 AV |
342 | } |
343 | ||
86d722ad | 344 | MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); |
afb736e9 | 345 | |
5a6f8aef | 346 | if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { |
86d722ad MG |
347 | rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; |
348 | dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; | |
349 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
afb736e9 | 350 | ETH_P_IP); |
86d722ad MG |
351 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
352 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
353 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
354 | if (IS_ERR_OR_NULL(*rule_p)) | |
5a6f8aef | 355 | goto err_del_ai; |
5a6f8aef | 356 | ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); |
afb736e9 AV |
357 | } |
358 | ||
5a6f8aef | 359 | if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { |
86d722ad MG |
360 | rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; |
361 | dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; | |
362 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
afb736e9 | 363 | ETH_P_IPV6); |
86d722ad MG |
364 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
365 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
366 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
367 | if (IS_ERR_OR_NULL(*rule_p)) | |
5a6f8aef AS |
368 | goto err_del_ai; |
369 | ||
370 | ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); | |
afb736e9 AV |
371 | } |
372 | ||
86d722ad | 373 | MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); |
a741749f | 374 | |
a741749f | 375 | if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { |
86d722ad MG |
376 | rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; |
377 | dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; | |
378 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
a741749f | 379 | ETH_P_IP); |
86d722ad MG |
380 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
381 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
382 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
383 | if (IS_ERR_OR_NULL(*rule_p)) | |
a741749f | 384 | goto err_del_ai; |
a741749f AS |
385 | ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); |
386 | } | |
387 | ||
a741749f | 388 | if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { |
86d722ad MG |
389 | rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; |
390 | dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; | |
391 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
a741749f | 392 | ETH_P_IPV6); |
86d722ad MG |
393 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
394 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
395 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
396 | if (IS_ERR_OR_NULL(*rule_p)) | |
a741749f | 397 | goto err_del_ai; |
a741749f AS |
398 | ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); |
399 | } | |
400 | ||
86d722ad | 401 | MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); |
a741749f | 402 | |
a741749f | 403 | if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { |
86d722ad MG |
404 | rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; |
405 | dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; | |
406 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
a741749f | 407 | ETH_P_IP); |
86d722ad MG |
408 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
409 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
410 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
411 | if (IS_ERR_OR_NULL(*rule_p)) | |
a741749f | 412 | goto err_del_ai; |
a741749f AS |
413 | ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); |
414 | } | |
415 | ||
a741749f | 416 | if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { |
86d722ad MG |
417 | rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; |
418 | dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; | |
419 | MLX5_SET(fte_match_param, mv, outer_headers.ethertype, | |
a741749f | 420 | ETH_P_IPV6); |
86d722ad MG |
421 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
422 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
423 | MLX5_FS_DEFAULT_FLOW_TAG, &dest); | |
424 | if (IS_ERR_OR_NULL(*rule_p)) | |
a741749f | 425 | goto err_del_ai; |
a741749f AS |
426 | ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); |
427 | } | |
428 | ||
afb736e9 | 429 | return 0; |
5a6f8aef AS |
430 | |
431 | err_del_ai: | |
86d722ad MG |
432 | err = PTR_ERR(*rule_p); |
433 | *rule_p = NULL; | |
5a6f8aef AS |
434 | mlx5e_del_eth_addr_from_flow_table(priv, ai); |
435 | ||
436 | return err; | |
afb736e9 AV |
437 | } |
438 | ||
439 | static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, | |
440 | struct mlx5e_eth_addr_info *ai, int type) | |
441 | { | |
afb736e9 | 442 | u32 *match_criteria; |
86d722ad MG |
443 | u32 *match_value; |
444 | int err = 0; | |
afb736e9 | 445 | |
86d722ad MG |
446 | match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); |
447 | match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); | |
448 | if (!match_value || !match_criteria) { | |
afb736e9 AV |
449 | netdev_err(priv->netdev, "%s: alloc failed\n", __func__); |
450 | err = -ENOMEM; | |
451 | goto add_eth_addr_rule_out; | |
452 | } | |
453 | ||
86d722ad MG |
454 | err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria, |
455 | match_value); | |
afb736e9 AV |
456 | |
457 | add_eth_addr_rule_out: | |
458 | kvfree(match_criteria); | |
86d722ad MG |
459 | kvfree(match_value); |
460 | ||
afb736e9 AV |
461 | return err; |
462 | } | |
463 | ||
aad9e6e4 SM |
464 | static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) |
465 | { | |
466 | struct net_device *ndev = priv->netdev; | |
467 | int max_list_size; | |
468 | int list_size; | |
469 | u16 *vlans; | |
470 | int vlan; | |
471 | int err; | |
472 | int i; | |
473 | ||
474 | list_size = 0; | |
475 | for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) | |
476 | list_size++; | |
477 | ||
478 | max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); | |
479 | ||
480 | if (list_size > max_list_size) { | |
481 | netdev_warn(ndev, | |
482 | "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", | |
483 | list_size, max_list_size); | |
484 | list_size = max_list_size; | |
485 | } | |
486 | ||
487 | vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); | |
488 | if (!vlans) | |
489 | return -ENOMEM; | |
490 | ||
491 | i = 0; | |
492 | for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { | |
493 | if (i >= list_size) | |
494 | break; | |
495 | vlans[i++] = vlan; | |
496 | } | |
497 | ||
498 | err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); | |
499 | if (err) | |
500 | netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n", | |
501 | err); | |
502 | ||
503 | kfree(vlans); | |
504 | return err; | |
505 | } | |
506 | ||
afb736e9 AV |
507 | enum mlx5e_vlan_rule_type { |
508 | MLX5E_VLAN_RULE_TYPE_UNTAGGED, | |
509 | MLX5E_VLAN_RULE_TYPE_ANY_VID, | |
510 | MLX5E_VLAN_RULE_TYPE_MATCH_VID, | |
511 | }; | |
512 | ||
86d722ad MG |
513 | static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, |
514 | enum mlx5e_vlan_rule_type rule_type, | |
515 | u16 vid, u32 *mc, u32 *mv) | |
afb736e9 | 516 | { |
86d722ad MG |
517 | struct mlx5_flow_table *ft = priv->fts.vlan.t; |
518 | struct mlx5_flow_destination dest; | |
afb736e9 | 519 | u8 match_criteria_enable = 0; |
86d722ad MG |
520 | struct mlx5_flow_rule **rule_p; |
521 | int err = 0; | |
afb736e9 | 522 | |
86d722ad MG |
523 | dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
524 | dest.ft = priv->fts.main.t; | |
afb736e9 AV |
525 | |
526 | match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; | |
86d722ad | 527 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); |
afb736e9 AV |
528 | |
529 | switch (rule_type) { | |
530 | case MLX5E_VLAN_RULE_TYPE_UNTAGGED: | |
86d722ad | 531 | rule_p = &priv->vlan.untagged_rule; |
afb736e9 AV |
532 | break; |
533 | case MLX5E_VLAN_RULE_TYPE_ANY_VID: | |
86d722ad MG |
534 | rule_p = &priv->vlan.any_vlan_rule; |
535 | MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); | |
afb736e9 AV |
536 | break; |
537 | default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ | |
86d722ad MG |
538 | rule_p = &priv->vlan.active_vlans_rule[vid]; |
539 | MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); | |
540 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); | |
541 | MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); | |
afb736e9 AV |
542 | break; |
543 | } | |
544 | ||
86d722ad MG |
545 | *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, |
546 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
547 | MLX5_FS_DEFAULT_FLOW_TAG, | |
548 | &dest); | |
549 | ||
550 | if (IS_ERR(*rule_p)) { | |
551 | err = PTR_ERR(*rule_p); | |
552 | *rule_p = NULL; | |
553 | netdev_err(priv->netdev, "%s: add rule failed\n", __func__); | |
554 | } | |
555 | ||
556 | return err; | |
557 | } | |
558 | ||
559 | static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, | |
560 | enum mlx5e_vlan_rule_type rule_type, u16 vid) | |
561 | { | |
562 | u32 *match_criteria; | |
563 | u32 *match_value; | |
564 | int err = 0; | |
565 | ||
566 | match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); | |
567 | match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); | |
568 | if (!match_value || !match_criteria) { | |
569 | netdev_err(priv->netdev, "%s: alloc failed\n", __func__); | |
570 | err = -ENOMEM; | |
571 | goto add_vlan_rule_out; | |
572 | } | |
573 | ||
574 | if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) | |
575 | mlx5e_vport_context_update_vlans(priv); | |
576 | ||
577 | err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, | |
578 | match_value); | |
afb736e9 AV |
579 | |
580 | add_vlan_rule_out: | |
581 | kvfree(match_criteria); | |
86d722ad MG |
582 | kvfree(match_value); |
583 | ||
afb736e9 AV |
584 | return err; |
585 | } | |
586 | ||
587 | static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, | |
588 | enum mlx5e_vlan_rule_type rule_type, u16 vid) | |
589 | { | |
590 | switch (rule_type) { | |
591 | case MLX5E_VLAN_RULE_TYPE_UNTAGGED: | |
86d722ad MG |
592 | if (priv->vlan.untagged_rule) { |
593 | mlx5_del_flow_rule(priv->vlan.untagged_rule); | |
594 | priv->vlan.untagged_rule = NULL; | |
595 | } | |
afb736e9 AV |
596 | break; |
597 | case MLX5E_VLAN_RULE_TYPE_ANY_VID: | |
86d722ad MG |
598 | if (priv->vlan.any_vlan_rule) { |
599 | mlx5_del_flow_rule(priv->vlan.any_vlan_rule); | |
600 | priv->vlan.any_vlan_rule = NULL; | |
601 | } | |
afb736e9 AV |
602 | break; |
603 | case MLX5E_VLAN_RULE_TYPE_MATCH_VID: | |
86d722ad MG |
604 | mlx5e_vport_context_update_vlans(priv); |
605 | if (priv->vlan.active_vlans_rule[vid]) { | |
606 | mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]); | |
607 | priv->vlan.active_vlans_rule[vid] = NULL; | |
608 | } | |
aad9e6e4 | 609 | mlx5e_vport_context_update_vlans(priv); |
afb736e9 AV |
610 | break; |
611 | } | |
612 | } | |
613 | ||
614 | void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) | |
615 | { | |
9b37b07f AS |
616 | if (!priv->vlan.filter_disabled) |
617 | return; | |
afb736e9 | 618 | |
9b37b07f | 619 | priv->vlan.filter_disabled = false; |
c0754343 AS |
620 | if (priv->netdev->flags & IFF_PROMISC) |
621 | return; | |
9b37b07f | 622 | mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); |
afb736e9 AV |
623 | } |
624 | ||
625 | void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) | |
626 | { | |
9b37b07f AS |
627 | if (priv->vlan.filter_disabled) |
628 | return; | |
afb736e9 | 629 | |
9b37b07f | 630 | priv->vlan.filter_disabled = true; |
c0754343 AS |
631 | if (priv->netdev->flags & IFF_PROMISC) |
632 | return; | |
9b37b07f | 633 | mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); |
afb736e9 AV |
634 | } |
635 | ||
636 | int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, | |
637 | u16 vid) | |
638 | { | |
639 | struct mlx5e_priv *priv = netdev_priv(dev); | |
afb736e9 | 640 | |
aad9e6e4 SM |
641 | set_bit(vid, priv->vlan.active_vlans); |
642 | ||
9b37b07f | 643 | return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); |
afb736e9 AV |
644 | } |
645 | ||
646 | int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, | |
647 | u16 vid) | |
648 | { | |
649 | struct mlx5e_priv *priv = netdev_priv(dev); | |
650 | ||
aad9e6e4 SM |
651 | clear_bit(vid, priv->vlan.active_vlans); |
652 | ||
9b37b07f | 653 | mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); |
afb736e9 AV |
654 | |
655 | return 0; | |
656 | } | |
657 | ||
afb736e9 AV |
658 | #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ |
659 | for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ | |
660 | hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) | |
661 | ||
662 | static void mlx5e_execute_action(struct mlx5e_priv *priv, | |
663 | struct mlx5e_eth_addr_hash_node *hn) | |
664 | { | |
665 | switch (hn->action) { | |
666 | case MLX5E_ACTION_ADD: | |
667 | mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); | |
668 | hn->action = MLX5E_ACTION_NONE; | |
669 | break; | |
670 | ||
671 | case MLX5E_ACTION_DEL: | |
672 | mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); | |
673 | mlx5e_del_eth_addr_from_hash(hn); | |
674 | break; | |
675 | } | |
676 | } | |
677 | ||
678 | static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) | |
679 | { | |
680 | struct net_device *netdev = priv->netdev; | |
681 | struct netdev_hw_addr *ha; | |
682 | ||
683 | netif_addr_lock_bh(netdev); | |
684 | ||
685 | mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, | |
686 | priv->netdev->dev_addr); | |
687 | ||
688 | netdev_for_each_uc_addr(ha, netdev) | |
689 | mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); | |
690 | ||
691 | netdev_for_each_mc_addr(ha, netdev) | |
692 | mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); | |
693 | ||
694 | netif_addr_unlock_bh(netdev); | |
695 | } | |
696 | ||
5e55da1d SM |
697 | static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, |
698 | u8 addr_array[][ETH_ALEN], int size) | |
699 | { | |
700 | bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); | |
701 | struct net_device *ndev = priv->netdev; | |
702 | struct mlx5e_eth_addr_hash_node *hn; | |
703 | struct hlist_head *addr_list; | |
704 | struct hlist_node *tmp; | |
705 | int i = 0; | |
706 | int hi; | |
707 | ||
708 | addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; | |
709 | ||
710 | if (is_uc) /* Make sure our own address is pushed first */ | |
711 | ether_addr_copy(addr_array[i++], ndev->dev_addr); | |
712 | else if (priv->eth_addr.broadcast_enabled) | |
713 | ether_addr_copy(addr_array[i++], ndev->broadcast); | |
714 | ||
715 | mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { | |
716 | if (ether_addr_equal(ndev->dev_addr, hn->ai.addr)) | |
717 | continue; | |
718 | if (i >= size) | |
719 | break; | |
720 | ether_addr_copy(addr_array[i++], hn->ai.addr); | |
721 | } | |
722 | } | |
723 | ||
724 | static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, | |
725 | int list_type) | |
726 | { | |
727 | bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); | |
728 | struct mlx5e_eth_addr_hash_node *hn; | |
729 | u8 (*addr_array)[ETH_ALEN] = NULL; | |
730 | struct hlist_head *addr_list; | |
731 | struct hlist_node *tmp; | |
732 | int max_size; | |
733 | int size; | |
734 | int err; | |
735 | int hi; | |
736 | ||
737 | size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); | |
738 | max_size = is_uc ? | |
739 | 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : | |
740 | 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); | |
741 | ||
742 | addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; | |
743 | mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) | |
744 | size++; | |
745 | ||
746 | if (size > max_size) { | |
747 | netdev_warn(priv->netdev, | |
748 | "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", | |
749 | is_uc ? "UC" : "MC", size, max_size); | |
750 | size = max_size; | |
751 | } | |
752 | ||
753 | if (size) { | |
754 | addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); | |
755 | if (!addr_array) { | |
756 | err = -ENOMEM; | |
757 | goto out; | |
758 | } | |
759 | mlx5e_fill_addr_array(priv, list_type, addr_array, size); | |
760 | } | |
761 | ||
762 | err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); | |
763 | out: | |
764 | if (err) | |
765 | netdev_err(priv->netdev, | |
766 | "Failed to modify vport %s list err(%d)\n", | |
767 | is_uc ? "UC" : "MC", err); | |
768 | kfree(addr_array); | |
769 | } | |
770 | ||
771 | static void mlx5e_vport_context_update(struct mlx5e_priv *priv) | |
772 | { | |
773 | struct mlx5e_eth_addr_db *ea = &priv->eth_addr; | |
774 | ||
775 | mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); | |
776 | mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); | |
777 | mlx5_modify_nic_vport_promisc(priv->mdev, 0, | |
778 | ea->allmulti_enabled, | |
779 | ea->promisc_enabled); | |
780 | } | |
781 | ||
afb736e9 AV |
782 | static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) |
783 | { | |
784 | struct mlx5e_eth_addr_hash_node *hn; | |
785 | struct hlist_node *tmp; | |
786 | int i; | |
787 | ||
788 | mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) | |
789 | mlx5e_execute_action(priv, hn); | |
790 | ||
791 | mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) | |
792 | mlx5e_execute_action(priv, hn); | |
793 | } | |
794 | ||
795 | static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) | |
796 | { | |
797 | struct mlx5e_eth_addr_hash_node *hn; | |
798 | struct hlist_node *tmp; | |
799 | int i; | |
800 | ||
801 | mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) | |
802 | hn->action = MLX5E_ACTION_DEL; | |
803 | mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) | |
804 | hn->action = MLX5E_ACTION_DEL; | |
805 | ||
9b37b07f | 806 | if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) |
afb736e9 AV |
807 | mlx5e_sync_netdev_addr(priv); |
808 | ||
809 | mlx5e_apply_netdev_addr(priv); | |
810 | } | |
811 | ||
9b37b07f | 812 | void mlx5e_set_rx_mode_work(struct work_struct *work) |
afb736e9 | 813 | { |
9b37b07f AS |
814 | struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, |
815 | set_rx_mode_work); | |
816 | ||
afb736e9 AV |
817 | struct mlx5e_eth_addr_db *ea = &priv->eth_addr; |
818 | struct net_device *ndev = priv->netdev; | |
819 | ||
9b37b07f | 820 | bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); |
afb736e9 AV |
821 | bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); |
822 | bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); | |
823 | bool broadcast_enabled = rx_mode_enable; | |
824 | ||
825 | bool enable_promisc = !ea->promisc_enabled && promisc_enabled; | |
826 | bool disable_promisc = ea->promisc_enabled && !promisc_enabled; | |
827 | bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; | |
828 | bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; | |
829 | bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; | |
830 | bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; | |
831 | ||
c0754343 | 832 | if (enable_promisc) { |
afb736e9 | 833 | mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); |
c0754343 AS |
834 | if (!priv->vlan.filter_disabled) |
835 | mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, | |
836 | 0); | |
837 | } | |
afb736e9 AV |
838 | if (enable_allmulti) |
839 | mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); | |
840 | if (enable_broadcast) | |
841 | mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); | |
842 | ||
843 | mlx5e_handle_netdev_addr(priv); | |
844 | ||
845 | if (disable_broadcast) | |
846 | mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); | |
847 | if (disable_allmulti) | |
848 | mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); | |
c0754343 AS |
849 | if (disable_promisc) { |
850 | if (!priv->vlan.filter_disabled) | |
851 | mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, | |
852 | 0); | |
afb736e9 | 853 | mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); |
c0754343 | 854 | } |
afb736e9 AV |
855 | |
856 | ea->promisc_enabled = promisc_enabled; | |
857 | ea->allmulti_enabled = allmulti_enabled; | |
858 | ea->broadcast_enabled = broadcast_enabled; | |
5e55da1d SM |
859 | |
860 | mlx5e_vport_context_update(priv); | |
afb736e9 AV |
861 | } |
862 | ||
86d722ad MG |
863 | static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) |
864 | { | |
865 | int i; | |
866 | ||
867 | for (i = ft->num_groups - 1; i >= 0; i--) { | |
868 | if (!IS_ERR_OR_NULL(ft->g[i])) | |
869 | mlx5_destroy_flow_group(ft->g[i]); | |
870 | ft->g[i] = NULL; | |
871 | } | |
872 | ft->num_groups = 0; | |
873 | } | |
874 | ||
afb736e9 AV |
875 | void mlx5e_init_eth_addr(struct mlx5e_priv *priv) |
876 | { | |
877 | ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); | |
878 | } | |
879 | ||
86d722ad MG |
880 | #define MLX5E_MAIN_GROUP0_SIZE BIT(3) |
881 | #define MLX5E_MAIN_GROUP1_SIZE BIT(1) | |
882 | #define MLX5E_MAIN_GROUP2_SIZE BIT(0) | |
883 | #define MLX5E_MAIN_GROUP3_SIZE BIT(14) | |
884 | #define MLX5E_MAIN_GROUP4_SIZE BIT(13) | |
885 | #define MLX5E_MAIN_GROUP5_SIZE BIT(11) | |
886 | #define MLX5E_MAIN_GROUP6_SIZE BIT(2) | |
887 | #define MLX5E_MAIN_GROUP7_SIZE BIT(1) | |
888 | #define MLX5E_MAIN_GROUP8_SIZE BIT(0) | |
889 | #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ | |
890 | MLX5E_MAIN_GROUP1_SIZE +\ | |
891 | MLX5E_MAIN_GROUP2_SIZE +\ | |
892 | MLX5E_MAIN_GROUP3_SIZE +\ | |
893 | MLX5E_MAIN_GROUP4_SIZE +\ | |
894 | MLX5E_MAIN_GROUP5_SIZE +\ | |
895 | MLX5E_MAIN_GROUP6_SIZE +\ | |
896 | MLX5E_MAIN_GROUP7_SIZE +\ | |
897 | MLX5E_MAIN_GROUP8_SIZE) | |
898 | ||
899 | static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in, | |
900 | int inlen) | |
afb736e9 | 901 | { |
86d722ad MG |
902 | u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); |
903 | u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, | |
904 | match_criteria.outer_headers.dmac_47_16); | |
905 | int err; | |
906 | int ix = 0; | |
907 | ||
908 | memset(in, 0, inlen); | |
909 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
910 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); | |
911 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); | |
912 | MLX5_SET_CFG(in, start_flow_index, ix); | |
913 | ix += MLX5E_MAIN_GROUP0_SIZE; | |
914 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
915 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
916 | if (IS_ERR(ft->g[ft->num_groups])) | |
917 | goto err_destroy_groups; | |
918 | ft->num_groups++; | |
919 | ||
920 | memset(in, 0, inlen); | |
921 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
922 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); | |
923 | MLX5_SET_CFG(in, start_flow_index, ix); | |
924 | ix += MLX5E_MAIN_GROUP1_SIZE; | |
925 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
926 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
927 | if (IS_ERR(ft->g[ft->num_groups])) | |
928 | goto err_destroy_groups; | |
929 | ft->num_groups++; | |
930 | ||
931 | memset(in, 0, inlen); | |
932 | MLX5_SET_CFG(in, start_flow_index, ix); | |
933 | ix += MLX5E_MAIN_GROUP2_SIZE; | |
934 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
935 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
936 | if (IS_ERR(ft->g[ft->num_groups])) | |
937 | goto err_destroy_groups; | |
938 | ft->num_groups++; | |
939 | ||
940 | memset(in, 0, inlen); | |
941 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
942 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); | |
943 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); | |
944 | eth_broadcast_addr(dmac); | |
945 | MLX5_SET_CFG(in, start_flow_index, ix); | |
946 | ix += MLX5E_MAIN_GROUP3_SIZE; | |
947 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
948 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
949 | if (IS_ERR(ft->g[ft->num_groups])) | |
950 | goto err_destroy_groups; | |
951 | ft->num_groups++; | |
952 | ||
953 | memset(in, 0, inlen); | |
954 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
955 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); | |
956 | eth_broadcast_addr(dmac); | |
957 | MLX5_SET_CFG(in, start_flow_index, ix); | |
958 | ix += MLX5E_MAIN_GROUP4_SIZE; | |
959 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
960 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
961 | if (IS_ERR(ft->g[ft->num_groups])) | |
962 | goto err_destroy_groups; | |
963 | ft->num_groups++; | |
964 | ||
965 | memset(in, 0, inlen); | |
966 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
967 | eth_broadcast_addr(dmac); | |
968 | MLX5_SET_CFG(in, start_flow_index, ix); | |
969 | ix += MLX5E_MAIN_GROUP5_SIZE; | |
970 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
971 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
972 | if (IS_ERR(ft->g[ft->num_groups])) | |
973 | goto err_destroy_groups; | |
974 | ft->num_groups++; | |
975 | ||
976 | memset(in, 0, inlen); | |
977 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
978 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); | |
979 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); | |
980 | dmac[0] = 0x01; | |
981 | MLX5_SET_CFG(in, start_flow_index, ix); | |
982 | ix += MLX5E_MAIN_GROUP6_SIZE; | |
983 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
984 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
985 | if (IS_ERR(ft->g[ft->num_groups])) | |
986 | goto err_destroy_groups; | |
987 | ft->num_groups++; | |
988 | ||
989 | memset(in, 0, inlen); | |
990 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
991 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); | |
992 | dmac[0] = 0x01; | |
993 | MLX5_SET_CFG(in, start_flow_index, ix); | |
994 | ix += MLX5E_MAIN_GROUP7_SIZE; | |
995 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
996 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
997 | if (IS_ERR(ft->g[ft->num_groups])) | |
998 | goto err_destroy_groups; | |
999 | ft->num_groups++; | |
1000 | ||
1001 | memset(in, 0, inlen); | |
1002 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
1003 | dmac[0] = 0x01; | |
1004 | MLX5_SET_CFG(in, start_flow_index, ix); | |
1005 | ix += MLX5E_MAIN_GROUP8_SIZE; | |
1006 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
1007 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
1008 | if (IS_ERR(ft->g[ft->num_groups])) | |
1009 | goto err_destroy_groups; | |
1010 | ft->num_groups++; | |
1011 | ||
1012 | return 0; | |
1013 | ||
1014 | err_destroy_groups: | |
1015 | err = PTR_ERR(ft->g[ft->num_groups]); | |
1016 | ft->g[ft->num_groups] = NULL; | |
1017 | mlx5e_destroy_groups(ft); | |
1018 | ||
1019 | return err; | |
1020 | } | |
afb736e9 | 1021 | |
86d722ad MG |
1022 | static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft) |
1023 | { | |
1024 | u32 *in; | |
1025 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | |
1026 | int err; | |
1027 | ||
1028 | in = mlx5_vzalloc(inlen); | |
1029 | if (!in) | |
5e24851e | 1030 | return -ENOMEM; |
afb736e9 | 1031 | |
86d722ad | 1032 | err = __mlx5e_create_main_groups(ft, in, inlen); |
afb736e9 | 1033 | |
86d722ad MG |
1034 | kvfree(in); |
1035 | return err; | |
1036 | } | |
afb736e9 | 1037 | |
86d722ad MG |
1038 | static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) |
1039 | { | |
1040 | struct mlx5e_flow_table *ft = &priv->fts.main; | |
1041 | int err; | |
1042 | ||
1043 | ft->num_groups = 0; | |
b6172aac | 1044 | ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE); |
86d722ad MG |
1045 | |
1046 | if (IS_ERR(ft->t)) { | |
1047 | err = PTR_ERR(ft->t); | |
1048 | ft->t = NULL; | |
1049 | return err; | |
1050 | } | |
1051 | ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); | |
1052 | if (!ft->g) { | |
1053 | err = -ENOMEM; | |
1054 | goto err_destroy_main_flow_table; | |
1055 | } | |
1056 | ||
1057 | err = mlx5e_create_main_groups(ft); | |
1058 | if (err) | |
1059 | goto err_free_g; | |
1060 | return 0; | |
1061 | ||
1062 | err_free_g: | |
1063 | kfree(ft->g); | |
1064 | ||
1065 | err_destroy_main_flow_table: | |
1066 | mlx5_destroy_flow_table(ft->t); | |
1067 | ft->t = NULL; | |
1068 | ||
1069 | return err; | |
1070 | } | |
1071 | ||
1072 | static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) | |
1073 | { | |
1074 | mlx5e_destroy_groups(ft); | |
1075 | kfree(ft->g); | |
1076 | mlx5_destroy_flow_table(ft->t); | |
1077 | ft->t = NULL; | |
afb736e9 AV |
1078 | } |
1079 | ||
1080 | static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) | |
1081 | { | |
86d722ad | 1082 | mlx5e_destroy_flow_table(&priv->fts.main); |
afb736e9 AV |
1083 | } |
1084 | ||
86d722ad MG |
1085 | #define MLX5E_NUM_VLAN_GROUPS 2 |
1086 | #define MLX5E_VLAN_GROUP0_SIZE BIT(12) | |
1087 | #define MLX5E_VLAN_GROUP1_SIZE BIT(1) | |
1088 | #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ | |
1089 | MLX5E_VLAN_GROUP1_SIZE) | |
1090 | ||
1091 | static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in, | |
1092 | int inlen) | |
1093 | { | |
1094 | int err; | |
1095 | int ix = 0; | |
1096 | u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); | |
1097 | ||
1098 | memset(in, 0, inlen); | |
1099 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
1100 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); | |
1101 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); | |
1102 | MLX5_SET_CFG(in, start_flow_index, ix); | |
1103 | ix += MLX5E_VLAN_GROUP0_SIZE; | |
1104 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
1105 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
1106 | if (IS_ERR(ft->g[ft->num_groups])) | |
1107 | goto err_destroy_groups; | |
1108 | ft->num_groups++; | |
1109 | ||
1110 | memset(in, 0, inlen); | |
1111 | MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); | |
1112 | MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); | |
1113 | MLX5_SET_CFG(in, start_flow_index, ix); | |
1114 | ix += MLX5E_VLAN_GROUP1_SIZE; | |
1115 | MLX5_SET_CFG(in, end_flow_index, ix - 1); | |
1116 | ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); | |
1117 | if (IS_ERR(ft->g[ft->num_groups])) | |
1118 | goto err_destroy_groups; | |
1119 | ft->num_groups++; | |
1120 | ||
1121 | return 0; | |
1122 | ||
1123 | err_destroy_groups: | |
1124 | err = PTR_ERR(ft->g[ft->num_groups]); | |
1125 | ft->g[ft->num_groups] = NULL; | |
1126 | mlx5e_destroy_groups(ft); | |
1127 | ||
1128 | return err; | |
1129 | } | |
1130 | ||
1131 | static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) | |
afb736e9 | 1132 | { |
86d722ad MG |
1133 | u32 *in; |
1134 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | |
1135 | int err; | |
afb736e9 | 1136 | |
86d722ad MG |
1137 | in = mlx5_vzalloc(inlen); |
1138 | if (!in) | |
afb736e9 AV |
1139 | return -ENOMEM; |
1140 | ||
86d722ad MG |
1141 | err = __mlx5e_create_vlan_groups(ft, in, inlen); |
1142 | ||
1143 | kvfree(in); | |
1144 | return err; | |
1145 | } | |
1146 | ||
1147 | static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) | |
1148 | { | |
1149 | struct mlx5e_flow_table *ft = &priv->fts.vlan; | |
1150 | int err; | |
1151 | ||
1152 | ft->num_groups = 0; | |
b6172aac | 1153 | ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE); |
86d722ad MG |
1154 | |
1155 | if (IS_ERR(ft->t)) { | |
1156 | err = PTR_ERR(ft->t); | |
1157 | ft->t = NULL; | |
1158 | return err; | |
1159 | } | |
1160 | ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); | |
1161 | if (!ft->g) { | |
1162 | err = -ENOMEM; | |
1163 | goto err_destroy_vlan_flow_table; | |
1164 | } | |
1165 | ||
1166 | err = mlx5e_create_vlan_groups(ft); | |
1167 | if (err) | |
1168 | goto err_free_g; | |
1169 | ||
1170 | return 0; | |
1171 | ||
1172 | err_free_g: | |
1173 | kfree(ft->g); | |
1174 | ||
1175 | err_destroy_vlan_flow_table: | |
1176 | mlx5_destroy_flow_table(ft->t); | |
1177 | ft->t = NULL; | |
1178 | ||
1179 | return err; | |
afb736e9 AV |
1180 | } |
1181 | ||
1182 | static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) | |
1183 | { | |
86d722ad | 1184 | mlx5e_destroy_flow_table(&priv->fts.vlan); |
afb736e9 AV |
1185 | } |
1186 | ||
40ab6a6e | 1187 | int mlx5e_create_flow_tables(struct mlx5e_priv *priv) |
afb736e9 AV |
1188 | { |
1189 | int err; | |
1190 | ||
86d722ad MG |
1191 | priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, |
1192 | MLX5_FLOW_NAMESPACE_KERNEL); | |
1193 | ||
1194 | if (!priv->fts.ns) | |
1195 | return -EINVAL; | |
1196 | ||
1197 | err = mlx5e_create_vlan_flow_table(priv); | |
afb736e9 AV |
1198 | if (err) |
1199 | return err; | |
1200 | ||
86d722ad | 1201 | err = mlx5e_create_main_flow_table(priv); |
afb736e9 | 1202 | if (err) |
86d722ad | 1203 | goto err_destroy_vlan_flow_table; |
afb736e9 | 1204 | |
9b37b07f AS |
1205 | err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); |
1206 | if (err) | |
86d722ad | 1207 | goto err_destroy_main_flow_table; |
9b37b07f | 1208 | |
afb736e9 AV |
1209 | return 0; |
1210 | ||
1211 | err_destroy_main_flow_table: | |
1212 | mlx5e_destroy_main_flow_table(priv); | |
86d722ad MG |
1213 | err_destroy_vlan_flow_table: |
1214 | mlx5e_destroy_vlan_flow_table(priv); | |
afb736e9 AV |
1215 | |
1216 | return err; | |
1217 | } | |
1218 | ||
40ab6a6e | 1219 | void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) |
afb736e9 | 1220 | { |
9b37b07f | 1221 | mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); |
afb736e9 | 1222 | mlx5e_destroy_main_flow_table(priv); |
86d722ad | 1223 | mlx5e_destroy_vlan_flow_table(priv); |
afb736e9 | 1224 | } |