net/mlx5_core: Flow steering tree initialization
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_flow_table.c
CommitLineData
afb736e9
AV
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/list.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
37#include <linux/mlx5/flow_table.h>
38#include "en.h"
39
40enum {
41 MLX5E_FULLMATCH = 0,
42 MLX5E_ALLMULTI = 1,
43 MLX5E_PROMISC = 2,
44};
45
46enum {
47 MLX5E_UC = 0,
48 MLX5E_MC_IPV4 = 1,
49 MLX5E_MC_IPV6 = 2,
50 MLX5E_MC_OTHER = 3,
51};
52
53enum {
54 MLX5E_ACTION_NONE = 0,
55 MLX5E_ACTION_ADD = 1,
56 MLX5E_ACTION_DEL = 2,
57};
58
59struct mlx5e_eth_addr_hash_node {
60 struct hlist_node hlist;
61 u8 action;
62 struct mlx5e_eth_addr_info ai;
63};
64
65static inline int mlx5e_hash_eth_addr(u8 *addr)
66{
67 return addr[5];
68}
69
70static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
71{
72 struct mlx5e_eth_addr_hash_node *hn;
73 int ix = mlx5e_hash_eth_addr(addr);
74 int found = 0;
75
76 hlist_for_each_entry(hn, &hash[ix], hlist)
77 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
78 found = 1;
79 break;
80 }
81
82 if (found) {
83 hn->action = MLX5E_ACTION_NONE;
84 return;
85 }
86
87 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
88 if (!hn)
89 return;
90
91 ether_addr_copy(hn->ai.addr, addr);
92 hn->action = MLX5E_ACTION_ADD;
93
94 hlist_add_head(&hn->hlist, &hash[ix]);
95}
96
97static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
98{
99 hlist_del(&hn->hlist);
100 kfree(hn);
101}
102
103static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
104 struct mlx5e_eth_addr_info *ai)
105{
106 void *ft = priv->ft.main;
107
a741749f
AS
108 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
109 mlx5_del_flow_table_entry(ft,
110 ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
111
112 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
113 mlx5_del_flow_table_entry(ft,
114 ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
115
116 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
117 mlx5_del_flow_table_entry(ft,
118 ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
119
120 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
121 mlx5_del_flow_table_entry(ft,
122 ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
123
5a6f8aef 124 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
afb736e9
AV
125 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
126
5a6f8aef 127 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
afb736e9
AV
128 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
129
5a6f8aef 130 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
afb736e9
AV
131 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
132
5a6f8aef 133 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
afb736e9
AV
134 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
135
5a6f8aef 136 if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
afb736e9
AV
137 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
138
5a6f8aef 139 if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
afb736e9
AV
140 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
141
5a6f8aef 142 if (ai->tt_vec & BIT(MLX5E_TT_ANY))
afb736e9
AV
143 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
144}
145
146static int mlx5e_get_eth_addr_type(u8 *addr)
147{
148 if (is_unicast_ether_addr(addr))
149 return MLX5E_UC;
150
151 if ((addr[0] == 0x01) &&
152 (addr[1] == 0x00) &&
153 (addr[2] == 0x5e) &&
154 !(addr[3] & 0x80))
155 return MLX5E_MC_IPV4;
156
157 if ((addr[0] == 0x33) &&
158 (addr[1] == 0x33))
159 return MLX5E_MC_IPV6;
160
161 return MLX5E_MC_OTHER;
162}
163
164static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
165{
166 int eth_addr_type;
167 u32 ret;
168
169 switch (type) {
170 case MLX5E_FULLMATCH:
171 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
172 switch (eth_addr_type) {
173 case MLX5E_UC:
174 ret =
5a6f8aef
AS
175 BIT(MLX5E_TT_IPV4_TCP) |
176 BIT(MLX5E_TT_IPV6_TCP) |
177 BIT(MLX5E_TT_IPV4_UDP) |
178 BIT(MLX5E_TT_IPV6_UDP) |
a741749f
AS
179 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
180 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
181 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
182 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
5a6f8aef
AS
183 BIT(MLX5E_TT_IPV4) |
184 BIT(MLX5E_TT_IPV6) |
185 BIT(MLX5E_TT_ANY) |
afb736e9
AV
186 0;
187 break;
188
189 case MLX5E_MC_IPV4:
190 ret =
5a6f8aef
AS
191 BIT(MLX5E_TT_IPV4_UDP) |
192 BIT(MLX5E_TT_IPV4) |
afb736e9
AV
193 0;
194 break;
195
196 case MLX5E_MC_IPV6:
197 ret =
5a6f8aef
AS
198 BIT(MLX5E_TT_IPV6_UDP) |
199 BIT(MLX5E_TT_IPV6) |
afb736e9
AV
200 0;
201 break;
202
203 case MLX5E_MC_OTHER:
204 ret =
5a6f8aef 205 BIT(MLX5E_TT_ANY) |
afb736e9
AV
206 0;
207 break;
208 }
209
210 break;
211
212 case MLX5E_ALLMULTI:
213 ret =
5a6f8aef
AS
214 BIT(MLX5E_TT_IPV4_UDP) |
215 BIT(MLX5E_TT_IPV6_UDP) |
216 BIT(MLX5E_TT_IPV4) |
217 BIT(MLX5E_TT_IPV6) |
218 BIT(MLX5E_TT_ANY) |
afb736e9
AV
219 0;
220 break;
221
222 default: /* MLX5E_PROMISC */
223 ret =
5a6f8aef
AS
224 BIT(MLX5E_TT_IPV4_TCP) |
225 BIT(MLX5E_TT_IPV6_TCP) |
226 BIT(MLX5E_TT_IPV4_UDP) |
227 BIT(MLX5E_TT_IPV6_UDP) |
a741749f
AS
228 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
229 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
230 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
231 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
5a6f8aef
AS
232 BIT(MLX5E_TT_IPV4) |
233 BIT(MLX5E_TT_IPV6) |
234 BIT(MLX5E_TT_ANY) |
afb736e9
AV
235 0;
236 break;
237 }
238
239 return ret;
240}
241
242static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
243 struct mlx5e_eth_addr_info *ai, int type,
244 void *flow_context, void *match_criteria)
245{
246 u8 match_criteria_enable = 0;
247 void *match_value;
248 void *dest;
249 u8 *dmac;
250 u8 *match_criteria_dmac;
251 void *ft = priv->ft.main;
252 u32 *tirn = priv->tirn;
5a6f8aef 253 u32 *ft_ix;
afb736e9
AV
254 u32 tt_vec;
255 int err;
256
257 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
258 dmac = MLX5_ADDR_OF(fte_match_param, match_value,
259 outer_headers.dmac_47_16);
260 match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
261 outer_headers.dmac_47_16);
262 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
263
264 MLX5_SET(flow_context, flow_context, action,
265 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
266 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
267 MLX5_SET(dest_format_struct, dest, destination_type,
268 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
269
270 switch (type) {
271 case MLX5E_FULLMATCH:
272 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
273 memset(match_criteria_dmac, 0xff, ETH_ALEN);
274 ether_addr_copy(dmac, ai->addr);
275 break;
276
277 case MLX5E_ALLMULTI:
278 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
279 match_criteria_dmac[0] = 0x01;
280 dmac[0] = 0x01;
281 break;
282
283 case MLX5E_PROMISC:
284 break;
285 }
286
287 tt_vec = mlx5e_get_tt_vec(ai, type);
288
5a6f8aef
AS
289 ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
290 if (tt_vec & BIT(MLX5E_TT_ANY)) {
afb736e9
AV
291 MLX5_SET(dest_format_struct, dest, destination_id,
292 tirn[MLX5E_TT_ANY]);
293 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
294 match_criteria, flow_context,
5a6f8aef
AS
295 ft_ix);
296 if (err)
297 goto err_del_ai;
298
299 ai->tt_vec |= BIT(MLX5E_TT_ANY);
afb736e9
AV
300 }
301
302 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
303 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
304 outer_headers.ethertype);
305
5a6f8aef
AS
306 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
307 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
afb736e9
AV
308 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
309 ETH_P_IP);
310 MLX5_SET(dest_format_struct, dest, destination_id,
311 tirn[MLX5E_TT_IPV4]);
312 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
313 match_criteria, flow_context,
5a6f8aef
AS
314 ft_ix);
315 if (err)
316 goto err_del_ai;
317
318 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
afb736e9
AV
319 }
320
5a6f8aef
AS
321 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
322 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
afb736e9
AV
323 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
324 ETH_P_IPV6);
325 MLX5_SET(dest_format_struct, dest, destination_id,
326 tirn[MLX5E_TT_IPV6]);
327 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
328 match_criteria, flow_context,
5a6f8aef
AS
329 ft_ix);
330 if (err)
331 goto err_del_ai;
332
333 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
afb736e9
AV
334 }
335
336 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
337 outer_headers.ip_protocol);
338 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
339 IPPROTO_UDP);
340
5a6f8aef
AS
341 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
342 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
afb736e9
AV
343 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
344 ETH_P_IP);
345 MLX5_SET(dest_format_struct, dest, destination_id,
346 tirn[MLX5E_TT_IPV4_UDP]);
347 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
348 match_criteria, flow_context,
5a6f8aef
AS
349 ft_ix);
350 if (err)
351 goto err_del_ai;
352
353 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
afb736e9
AV
354 }
355
5a6f8aef
AS
356 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
357 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
afb736e9
AV
358 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
359 ETH_P_IPV6);
360 MLX5_SET(dest_format_struct, dest, destination_id,
361 tirn[MLX5E_TT_IPV6_UDP]);
362 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
363 match_criteria, flow_context,
5a6f8aef
AS
364 ft_ix);
365 if (err)
366 goto err_del_ai;
367
368 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
afb736e9
AV
369 }
370
371 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
372 IPPROTO_TCP);
373
5a6f8aef
AS
374 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
375 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
afb736e9
AV
376 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
377 ETH_P_IP);
378 MLX5_SET(dest_format_struct, dest, destination_id,
379 tirn[MLX5E_TT_IPV4_TCP]);
380 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
381 match_criteria, flow_context,
5a6f8aef
AS
382 ft_ix);
383 if (err)
384 goto err_del_ai;
385
386 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
afb736e9
AV
387 }
388
5a6f8aef
AS
389 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
390 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
afb736e9
AV
391 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
392 ETH_P_IPV6);
393 MLX5_SET(dest_format_struct, dest, destination_id,
394 tirn[MLX5E_TT_IPV6_TCP]);
395 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
396 match_criteria, flow_context,
5a6f8aef
AS
397 ft_ix);
398 if (err)
399 goto err_del_ai;
400
401 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
afb736e9
AV
402 }
403
a741749f
AS
404 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
405 IPPROTO_AH);
406
407 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
408 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
409 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
410 ETH_P_IP);
411 MLX5_SET(dest_format_struct, dest, destination_id,
412 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
413 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
414 match_criteria, flow_context,
415 ft_ix);
416 if (err)
417 goto err_del_ai;
418
419 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
420 }
421
422 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
423 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
424 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
425 ETH_P_IPV6);
426 MLX5_SET(dest_format_struct, dest, destination_id,
427 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
428 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
429 match_criteria, flow_context,
430 ft_ix);
431 if (err)
432 goto err_del_ai;
433
434 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
435 }
436
437 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
438 IPPROTO_ESP);
439
440 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
441 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
442 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
443 ETH_P_IP);
444 MLX5_SET(dest_format_struct, dest, destination_id,
445 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
446 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
447 match_criteria, flow_context,
448 ft_ix);
449 if (err)
450 goto err_del_ai;
451
452 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
453 }
454
455 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
456 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
457 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
458 ETH_P_IPV6);
459 MLX5_SET(dest_format_struct, dest, destination_id,
460 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
461 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
462 match_criteria, flow_context,
463 ft_ix);
464 if (err)
465 goto err_del_ai;
466
467 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
468 }
469
afb736e9 470 return 0;
5a6f8aef
AS
471
472err_del_ai:
473 mlx5e_del_eth_addr_from_flow_table(priv, ai);
474
475 return err;
afb736e9
AV
476}
477
478static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
479 struct mlx5e_eth_addr_info *ai, int type)
480{
481 u32 *flow_context;
482 u32 *match_criteria;
483 int err;
484
485 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
486 MLX5_ST_SZ_BYTES(dest_format_struct));
487 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
488 if (!flow_context || !match_criteria) {
489 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
490 err = -ENOMEM;
491 goto add_eth_addr_rule_out;
492 }
493
494 err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
495 match_criteria);
496 if (err)
497 netdev_err(priv->netdev, "%s: failed\n", __func__);
498
499add_eth_addr_rule_out:
500 kvfree(match_criteria);
501 kvfree(flow_context);
502 return err;
503}
504
aad9e6e4
SM
505static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
506{
507 struct net_device *ndev = priv->netdev;
508 int max_list_size;
509 int list_size;
510 u16 *vlans;
511 int vlan;
512 int err;
513 int i;
514
515 list_size = 0;
516 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
517 list_size++;
518
519 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
520
521 if (list_size > max_list_size) {
522 netdev_warn(ndev,
523 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
524 list_size, max_list_size);
525 list_size = max_list_size;
526 }
527
528 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
529 if (!vlans)
530 return -ENOMEM;
531
532 i = 0;
533 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
534 if (i >= list_size)
535 break;
536 vlans[i++] = vlan;
537 }
538
539 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
540 if (err)
541 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
542 err);
543
544 kfree(vlans);
545 return err;
546}
547
afb736e9
AV
548enum mlx5e_vlan_rule_type {
549 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
550 MLX5E_VLAN_RULE_TYPE_ANY_VID,
551 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
552};
553
554static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
555 enum mlx5e_vlan_rule_type rule_type, u16 vid)
556{
557 u8 match_criteria_enable = 0;
558 u32 *flow_context;
559 void *match_value;
560 void *dest;
561 u32 *match_criteria;
562 u32 *ft_ix;
563 int err;
564
565 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
566 MLX5_ST_SZ_BYTES(dest_format_struct));
567 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
568 if (!flow_context || !match_criteria) {
569 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
570 err = -ENOMEM;
571 goto add_vlan_rule_out;
572 }
573 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
574 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
575
576 MLX5_SET(flow_context, flow_context, action,
577 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
578 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
579 MLX5_SET(dest_format_struct, dest, destination_type,
580 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
581 MLX5_SET(dest_format_struct, dest, destination_id,
582 mlx5_get_flow_table_id(priv->ft.main));
583
584 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
585 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
586 outer_headers.vlan_tag);
587
588 switch (rule_type) {
589 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
590 ft_ix = &priv->vlan.untagged_rule_ft_ix;
591 break;
592 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
593 ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
594 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
595 1);
596 break;
597 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
aad9e6e4
SM
598 err = mlx5e_vport_context_update_vlans(priv);
599 if (err)
600 goto add_vlan_rule_out;
601
afb736e9
AV
602 ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
603 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
604 1);
605 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
606 outer_headers.first_vid);
607 MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
608 vid);
609 break;
610 }
611
612 err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
613 match_criteria, flow_context, ft_ix);
614 if (err)
615 netdev_err(priv->netdev, "%s: failed\n", __func__);
616
617add_vlan_rule_out:
618 kvfree(match_criteria);
619 kvfree(flow_context);
620 return err;
621}
622
623static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
624 enum mlx5e_vlan_rule_type rule_type, u16 vid)
625{
626 switch (rule_type) {
627 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
628 mlx5_del_flow_table_entry(priv->ft.vlan,
629 priv->vlan.untagged_rule_ft_ix);
630 break;
631 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
632 mlx5_del_flow_table_entry(priv->ft.vlan,
633 priv->vlan.any_vlan_rule_ft_ix);
634 break;
635 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
636 mlx5_del_flow_table_entry(priv->ft.vlan,
637 priv->vlan.active_vlans_ft_ix[vid]);
aad9e6e4 638 mlx5e_vport_context_update_vlans(priv);
afb736e9
AV
639 break;
640 }
641}
642
643void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
644{
9b37b07f
AS
645 if (!priv->vlan.filter_disabled)
646 return;
afb736e9 647
9b37b07f 648 priv->vlan.filter_disabled = false;
c0754343
AS
649 if (priv->netdev->flags & IFF_PROMISC)
650 return;
9b37b07f 651 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
afb736e9
AV
652}
653
654void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
655{
9b37b07f
AS
656 if (priv->vlan.filter_disabled)
657 return;
afb736e9 658
9b37b07f 659 priv->vlan.filter_disabled = true;
c0754343
AS
660 if (priv->netdev->flags & IFF_PROMISC)
661 return;
9b37b07f 662 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
afb736e9
AV
663}
664
665int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
666 u16 vid)
667{
668 struct mlx5e_priv *priv = netdev_priv(dev);
afb736e9 669
aad9e6e4
SM
670 set_bit(vid, priv->vlan.active_vlans);
671
9b37b07f 672 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
afb736e9
AV
673}
674
675int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
676 u16 vid)
677{
678 struct mlx5e_priv *priv = netdev_priv(dev);
679
aad9e6e4
SM
680 clear_bit(vid, priv->vlan.active_vlans);
681
9b37b07f 682 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
afb736e9
AV
683
684 return 0;
685}
686
afb736e9
AV
687#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
688 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
689 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
690
691static void mlx5e_execute_action(struct mlx5e_priv *priv,
692 struct mlx5e_eth_addr_hash_node *hn)
693{
694 switch (hn->action) {
695 case MLX5E_ACTION_ADD:
696 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
697 hn->action = MLX5E_ACTION_NONE;
698 break;
699
700 case MLX5E_ACTION_DEL:
701 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
702 mlx5e_del_eth_addr_from_hash(hn);
703 break;
704 }
705}
706
707static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
708{
709 struct net_device *netdev = priv->netdev;
710 struct netdev_hw_addr *ha;
711
712 netif_addr_lock_bh(netdev);
713
714 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
715 priv->netdev->dev_addr);
716
717 netdev_for_each_uc_addr(ha, netdev)
718 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
719
720 netdev_for_each_mc_addr(ha, netdev)
721 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
722
723 netif_addr_unlock_bh(netdev);
724}
725
5e55da1d
SM
726static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
727 u8 addr_array[][ETH_ALEN], int size)
728{
729 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
730 struct net_device *ndev = priv->netdev;
731 struct mlx5e_eth_addr_hash_node *hn;
732 struct hlist_head *addr_list;
733 struct hlist_node *tmp;
734 int i = 0;
735 int hi;
736
737 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
738
739 if (is_uc) /* Make sure our own address is pushed first */
740 ether_addr_copy(addr_array[i++], ndev->dev_addr);
741 else if (priv->eth_addr.broadcast_enabled)
742 ether_addr_copy(addr_array[i++], ndev->broadcast);
743
744 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
745 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
746 continue;
747 if (i >= size)
748 break;
749 ether_addr_copy(addr_array[i++], hn->ai.addr);
750 }
751}
752
753static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
754 int list_type)
755{
756 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
757 struct mlx5e_eth_addr_hash_node *hn;
758 u8 (*addr_array)[ETH_ALEN] = NULL;
759 struct hlist_head *addr_list;
760 struct hlist_node *tmp;
761 int max_size;
762 int size;
763 int err;
764 int hi;
765
766 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
767 max_size = is_uc ?
768 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
769 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
770
771 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
772 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
773 size++;
774
775 if (size > max_size) {
776 netdev_warn(priv->netdev,
777 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
778 is_uc ? "UC" : "MC", size, max_size);
779 size = max_size;
780 }
781
782 if (size) {
783 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
784 if (!addr_array) {
785 err = -ENOMEM;
786 goto out;
787 }
788 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
789 }
790
791 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
792out:
793 if (err)
794 netdev_err(priv->netdev,
795 "Failed to modify vport %s list err(%d)\n",
796 is_uc ? "UC" : "MC", err);
797 kfree(addr_array);
798}
799
800static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
801{
802 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
803
804 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
805 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
806 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
807 ea->allmulti_enabled,
808 ea->promisc_enabled);
809}
810
afb736e9
AV
811static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
812{
813 struct mlx5e_eth_addr_hash_node *hn;
814 struct hlist_node *tmp;
815 int i;
816
817 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
818 mlx5e_execute_action(priv, hn);
819
820 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
821 mlx5e_execute_action(priv, hn);
822}
823
824static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
825{
826 struct mlx5e_eth_addr_hash_node *hn;
827 struct hlist_node *tmp;
828 int i;
829
830 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
831 hn->action = MLX5E_ACTION_DEL;
832 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
833 hn->action = MLX5E_ACTION_DEL;
834
9b37b07f 835 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
afb736e9
AV
836 mlx5e_sync_netdev_addr(priv);
837
838 mlx5e_apply_netdev_addr(priv);
839}
840
9b37b07f 841void mlx5e_set_rx_mode_work(struct work_struct *work)
afb736e9 842{
9b37b07f
AS
843 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
844 set_rx_mode_work);
845
afb736e9
AV
846 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
847 struct net_device *ndev = priv->netdev;
848
9b37b07f 849 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
afb736e9
AV
850 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
851 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
852 bool broadcast_enabled = rx_mode_enable;
853
854 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
855 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
856 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
857 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
858 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
859 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
860
c0754343 861 if (enable_promisc) {
afb736e9 862 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
c0754343
AS
863 if (!priv->vlan.filter_disabled)
864 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
865 0);
866 }
afb736e9
AV
867 if (enable_allmulti)
868 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
869 if (enable_broadcast)
870 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
871
872 mlx5e_handle_netdev_addr(priv);
873
874 if (disable_broadcast)
875 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
876 if (disable_allmulti)
877 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
c0754343
AS
878 if (disable_promisc) {
879 if (!priv->vlan.filter_disabled)
880 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
881 0);
afb736e9 882 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
c0754343 883 }
afb736e9
AV
884
885 ea->promisc_enabled = promisc_enabled;
886 ea->allmulti_enabled = allmulti_enabled;
887 ea->broadcast_enabled = broadcast_enabled;
5e55da1d
SM
888
889 mlx5e_vport_context_update(priv);
afb736e9
AV
890}
891
afb736e9
AV
892void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
893{
894 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
895}
896
897static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
898{
899 struct mlx5_flow_table_group *g;
900 u8 *dmac;
901
902 g = kcalloc(9, sizeof(*g), GFP_KERNEL);
5e24851e
AV
903 if (!g)
904 return -ENOMEM;
afb736e9 905
a741749f 906 g[0].log_sz = 3;
afb736e9
AV
907 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
908 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
909 outer_headers.ethertype);
910 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
911 outer_headers.ip_protocol);
912
913 g[1].log_sz = 1;
914 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
915 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
916 outer_headers.ethertype);
917
918 g[2].log_sz = 0;
919
920 g[3].log_sz = 14;
921 g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
922 dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
923 outer_headers.dmac_47_16);
924 memset(dmac, 0xff, ETH_ALEN);
925 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
926 outer_headers.ethertype);
927 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
928 outer_headers.ip_protocol);
929
930 g[4].log_sz = 13;
931 g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
932 dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
933 outer_headers.dmac_47_16);
934 memset(dmac, 0xff, ETH_ALEN);
935 MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
936 outer_headers.ethertype);
937
938 g[5].log_sz = 11;
939 g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
940 dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
941 outer_headers.dmac_47_16);
942 memset(dmac, 0xff, ETH_ALEN);
943
944 g[6].log_sz = 2;
945 g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
946 dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
947 outer_headers.dmac_47_16);
948 dmac[0] = 0x01;
949 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
950 outer_headers.ethertype);
951 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
952 outer_headers.ip_protocol);
953
954 g[7].log_sz = 1;
955 g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
956 dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
957 outer_headers.dmac_47_16);
958 dmac[0] = 0x01;
959 MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
960 outer_headers.ethertype);
961
962 g[8].log_sz = 0;
963 g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
964 dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
965 outer_headers.dmac_47_16);
966 dmac[0] = 0x01;
967 priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
968 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
969 9, g);
970 kfree(g);
971
972 return priv->ft.main ? 0 : -ENOMEM;
973}
974
975static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
976{
977 mlx5_destroy_flow_table(priv->ft.main);
978}
979
980static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
981{
982 struct mlx5_flow_table_group *g;
983
984 g = kcalloc(2, sizeof(*g), GFP_KERNEL);
985 if (!g)
986 return -ENOMEM;
987
988 g[0].log_sz = 12;
989 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
990 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
991 outer_headers.vlan_tag);
992 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
993 outer_headers.first_vid);
994
995 /* untagged + any vlan id */
996 g[1].log_sz = 1;
997 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
998 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
999 outer_headers.vlan_tag);
1000
1001 priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
1002 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
1003 2, g);
1004
1005 kfree(g);
1006 return priv->ft.vlan ? 0 : -ENOMEM;
1007}
1008
1009static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1010{
1011 mlx5_destroy_flow_table(priv->ft.vlan);
1012}
1013
40ab6a6e 1014int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
afb736e9
AV
1015{
1016 int err;
1017
1018 err = mlx5e_create_main_flow_table(priv);
1019 if (err)
1020 return err;
1021
1022 err = mlx5e_create_vlan_flow_table(priv);
1023 if (err)
1024 goto err_destroy_main_flow_table;
1025
9b37b07f
AS
1026 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1027 if (err)
1028 goto err_destroy_vlan_flow_table;
1029
afb736e9
AV
1030 return 0;
1031
9b37b07f
AS
1032err_destroy_vlan_flow_table:
1033 mlx5e_destroy_vlan_flow_table(priv);
1034
afb736e9
AV
1035err_destroy_main_flow_table:
1036 mlx5e_destroy_main_flow_table(priv);
1037
1038 return err;
1039}
1040
40ab6a6e 1041void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
afb736e9 1042{
9b37b07f 1043 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
afb736e9
AV
1044 mlx5e_destroy_vlan_flow_table(priv);
1045 mlx5e_destroy_main_flow_table(priv);
1046}
This page took 0.118145 seconds and 5 git commands to generate.