6feebda4b3e47d95dadc8e2d588db2259bcd2f7b
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_flow_table.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/flow_table.h>
38 #include "en.h"
39
40 enum {
41 MLX5E_FULLMATCH = 0,
42 MLX5E_ALLMULTI = 1,
43 MLX5E_PROMISC = 2,
44 };
45
46 enum {
47 MLX5E_UC = 0,
48 MLX5E_MC_IPV4 = 1,
49 MLX5E_MC_IPV6 = 2,
50 MLX5E_MC_OTHER = 3,
51 };
52
53 enum {
54 MLX5E_ACTION_NONE = 0,
55 MLX5E_ACTION_ADD = 1,
56 MLX5E_ACTION_DEL = 2,
57 };
58
59 struct mlx5e_eth_addr_hash_node {
60 struct hlist_node hlist;
61 u8 action;
62 struct mlx5e_eth_addr_info ai;
63 };
64
65 static inline int mlx5e_hash_eth_addr(u8 *addr)
66 {
67 return addr[5];
68 }
69
70 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
71 {
72 struct mlx5e_eth_addr_hash_node *hn;
73 int ix = mlx5e_hash_eth_addr(addr);
74 int found = 0;
75
76 hlist_for_each_entry(hn, &hash[ix], hlist)
77 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
78 found = 1;
79 break;
80 }
81
82 if (found) {
83 hn->action = MLX5E_ACTION_NONE;
84 return;
85 }
86
87 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
88 if (!hn)
89 return;
90
91 ether_addr_copy(hn->ai.addr, addr);
92 hn->action = MLX5E_ACTION_ADD;
93
94 hlist_add_head(&hn->hlist, &hash[ix]);
95 }
96
97 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
98 {
99 hlist_del(&hn->hlist);
100 kfree(hn);
101 }
102
103 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
104 struct mlx5e_eth_addr_info *ai)
105 {
106 void *ft = priv->ft.main;
107
108 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
109 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
110
111 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
112 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
113
114 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
115 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
116
117 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
118 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
119
120 if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
121 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
122
123 if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
124 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
125
126 if (ai->tt_vec & (1 << MLX5E_TT_ANY))
127 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
128 }
129
130 static int mlx5e_get_eth_addr_type(u8 *addr)
131 {
132 if (is_unicast_ether_addr(addr))
133 return MLX5E_UC;
134
135 if ((addr[0] == 0x01) &&
136 (addr[1] == 0x00) &&
137 (addr[2] == 0x5e) &&
138 !(addr[3] & 0x80))
139 return MLX5E_MC_IPV4;
140
141 if ((addr[0] == 0x33) &&
142 (addr[1] == 0x33))
143 return MLX5E_MC_IPV6;
144
145 return MLX5E_MC_OTHER;
146 }
147
148 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
149 {
150 int eth_addr_type;
151 u32 ret;
152
153 switch (type) {
154 case MLX5E_FULLMATCH:
155 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
156 switch (eth_addr_type) {
157 case MLX5E_UC:
158 ret =
159 (1 << MLX5E_TT_IPV4_TCP) |
160 (1 << MLX5E_TT_IPV6_TCP) |
161 (1 << MLX5E_TT_IPV4_UDP) |
162 (1 << MLX5E_TT_IPV6_UDP) |
163 (1 << MLX5E_TT_IPV4) |
164 (1 << MLX5E_TT_IPV6) |
165 (1 << MLX5E_TT_ANY) |
166 0;
167 break;
168
169 case MLX5E_MC_IPV4:
170 ret =
171 (1 << MLX5E_TT_IPV4_UDP) |
172 (1 << MLX5E_TT_IPV4) |
173 0;
174 break;
175
176 case MLX5E_MC_IPV6:
177 ret =
178 (1 << MLX5E_TT_IPV6_UDP) |
179 (1 << MLX5E_TT_IPV6) |
180 0;
181 break;
182
183 case MLX5E_MC_OTHER:
184 ret =
185 (1 << MLX5E_TT_ANY) |
186 0;
187 break;
188 }
189
190 break;
191
192 case MLX5E_ALLMULTI:
193 ret =
194 (1 << MLX5E_TT_IPV4_UDP) |
195 (1 << MLX5E_TT_IPV6_UDP) |
196 (1 << MLX5E_TT_IPV4) |
197 (1 << MLX5E_TT_IPV6) |
198 (1 << MLX5E_TT_ANY) |
199 0;
200 break;
201
202 default: /* MLX5E_PROMISC */
203 ret =
204 (1 << MLX5E_TT_IPV4_TCP) |
205 (1 << MLX5E_TT_IPV6_TCP) |
206 (1 << MLX5E_TT_IPV4_UDP) |
207 (1 << MLX5E_TT_IPV6_UDP) |
208 (1 << MLX5E_TT_IPV4) |
209 (1 << MLX5E_TT_IPV6) |
210 (1 << MLX5E_TT_ANY) |
211 0;
212 break;
213 }
214
215 return ret;
216 }
217
218 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
219 struct mlx5e_eth_addr_info *ai, int type,
220 void *flow_context, void *match_criteria)
221 {
222 u8 match_criteria_enable = 0;
223 void *match_value;
224 void *dest;
225 u8 *dmac;
226 u8 *match_criteria_dmac;
227 void *ft = priv->ft.main;
228 u32 *tirn = priv->tirn;
229 u32 tt_vec;
230 int err;
231
232 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
233 dmac = MLX5_ADDR_OF(fte_match_param, match_value,
234 outer_headers.dmac_47_16);
235 match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
236 outer_headers.dmac_47_16);
237 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
238
239 MLX5_SET(flow_context, flow_context, action,
240 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
241 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
242 MLX5_SET(dest_format_struct, dest, destination_type,
243 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
244
245 switch (type) {
246 case MLX5E_FULLMATCH:
247 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
248 memset(match_criteria_dmac, 0xff, ETH_ALEN);
249 ether_addr_copy(dmac, ai->addr);
250 break;
251
252 case MLX5E_ALLMULTI:
253 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
254 match_criteria_dmac[0] = 0x01;
255 dmac[0] = 0x01;
256 break;
257
258 case MLX5E_PROMISC:
259 break;
260 }
261
262 tt_vec = mlx5e_get_tt_vec(ai, type);
263
264 if (tt_vec & (1 << MLX5E_TT_ANY)) {
265 MLX5_SET(dest_format_struct, dest, destination_id,
266 tirn[MLX5E_TT_ANY]);
267 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
268 match_criteria, flow_context,
269 &ai->ft_ix[MLX5E_TT_ANY]);
270 if (err) {
271 mlx5e_del_eth_addr_from_flow_table(priv, ai);
272 return err;
273 }
274 ai->tt_vec |= (1 << MLX5E_TT_ANY);
275 }
276
277 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
278 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
279 outer_headers.ethertype);
280
281 if (tt_vec & (1 << MLX5E_TT_IPV4)) {
282 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
283 ETH_P_IP);
284 MLX5_SET(dest_format_struct, dest, destination_id,
285 tirn[MLX5E_TT_IPV4]);
286 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
287 match_criteria, flow_context,
288 &ai->ft_ix[MLX5E_TT_IPV4]);
289 if (err) {
290 mlx5e_del_eth_addr_from_flow_table(priv, ai);
291 return err;
292 }
293 ai->tt_vec |= (1 << MLX5E_TT_IPV4);
294 }
295
296 if (tt_vec & (1 << MLX5E_TT_IPV6)) {
297 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
298 ETH_P_IPV6);
299 MLX5_SET(dest_format_struct, dest, destination_id,
300 tirn[MLX5E_TT_IPV6]);
301 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
302 match_criteria, flow_context,
303 &ai->ft_ix[MLX5E_TT_IPV6]);
304 if (err) {
305 mlx5e_del_eth_addr_from_flow_table(priv, ai);
306 return err;
307 }
308 ai->tt_vec |= (1 << MLX5E_TT_IPV6);
309 }
310
311 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
312 outer_headers.ip_protocol);
313 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
314 IPPROTO_UDP);
315
316 if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
317 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
318 ETH_P_IP);
319 MLX5_SET(dest_format_struct, dest, destination_id,
320 tirn[MLX5E_TT_IPV4_UDP]);
321 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
322 match_criteria, flow_context,
323 &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
324 if (err) {
325 mlx5e_del_eth_addr_from_flow_table(priv, ai);
326 return err;
327 }
328 ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
329 }
330
331 if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
332 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
333 ETH_P_IPV6);
334 MLX5_SET(dest_format_struct, dest, destination_id,
335 tirn[MLX5E_TT_IPV6_UDP]);
336 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
337 match_criteria, flow_context,
338 &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
339 if (err) {
340 mlx5e_del_eth_addr_from_flow_table(priv, ai);
341 return err;
342 }
343 ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
344 }
345
346 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
347 IPPROTO_TCP);
348
349 if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
350 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
351 ETH_P_IP);
352 MLX5_SET(dest_format_struct, dest, destination_id,
353 tirn[MLX5E_TT_IPV4_TCP]);
354 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
355 match_criteria, flow_context,
356 &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
357 if (err) {
358 mlx5e_del_eth_addr_from_flow_table(priv, ai);
359 return err;
360 }
361 ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
362 }
363
364 if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
365 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
366 ETH_P_IPV6);
367 MLX5_SET(dest_format_struct, dest, destination_id,
368 tirn[MLX5E_TT_IPV6_TCP]);
369 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
370 match_criteria, flow_context,
371 &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
372 if (err) {
373 mlx5e_del_eth_addr_from_flow_table(priv, ai);
374 return err;
375 }
376 ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
377 }
378
379 return 0;
380 }
381
382 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
383 struct mlx5e_eth_addr_info *ai, int type)
384 {
385 u32 *flow_context;
386 u32 *match_criteria;
387 int err;
388
389 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
390 MLX5_ST_SZ_BYTES(dest_format_struct));
391 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
392 if (!flow_context || !match_criteria) {
393 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
394 err = -ENOMEM;
395 goto add_eth_addr_rule_out;
396 }
397
398 err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
399 match_criteria);
400 if (err)
401 netdev_err(priv->netdev, "%s: failed\n", __func__);
402
403 add_eth_addr_rule_out:
404 kvfree(match_criteria);
405 kvfree(flow_context);
406 return err;
407 }
408
409 enum mlx5e_vlan_rule_type {
410 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
411 MLX5E_VLAN_RULE_TYPE_ANY_VID,
412 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
413 };
414
415 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
416 enum mlx5e_vlan_rule_type rule_type, u16 vid)
417 {
418 u8 match_criteria_enable = 0;
419 u32 *flow_context;
420 void *match_value;
421 void *dest;
422 u32 *match_criteria;
423 u32 *ft_ix;
424 int err;
425
426 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
427 MLX5_ST_SZ_BYTES(dest_format_struct));
428 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
429 if (!flow_context || !match_criteria) {
430 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
431 err = -ENOMEM;
432 goto add_vlan_rule_out;
433 }
434 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
435 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
436
437 MLX5_SET(flow_context, flow_context, action,
438 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
439 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
440 MLX5_SET(dest_format_struct, dest, destination_type,
441 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
442 MLX5_SET(dest_format_struct, dest, destination_id,
443 mlx5_get_flow_table_id(priv->ft.main));
444
445 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
446 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
447 outer_headers.vlan_tag);
448
449 switch (rule_type) {
450 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
451 ft_ix = &priv->vlan.untagged_rule_ft_ix;
452 break;
453 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
454 ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
455 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
456 1);
457 break;
458 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
459 ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
460 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
461 1);
462 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
463 outer_headers.first_vid);
464 MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
465 vid);
466 break;
467 }
468
469 err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
470 match_criteria, flow_context, ft_ix);
471 if (err)
472 netdev_err(priv->netdev, "%s: failed\n", __func__);
473
474 add_vlan_rule_out:
475 kvfree(match_criteria);
476 kvfree(flow_context);
477 return err;
478 }
479
480 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
481 enum mlx5e_vlan_rule_type rule_type, u16 vid)
482 {
483 switch (rule_type) {
484 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
485 mlx5_del_flow_table_entry(priv->ft.vlan,
486 priv->vlan.untagged_rule_ft_ix);
487 break;
488 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
489 mlx5_del_flow_table_entry(priv->ft.vlan,
490 priv->vlan.any_vlan_rule_ft_ix);
491 break;
492 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
493 mlx5_del_flow_table_entry(priv->ft.vlan,
494 priv->vlan.active_vlans_ft_ix[vid]);
495 break;
496 }
497 }
498
499 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
500 {
501 WARN_ON(!mutex_is_locked(&priv->state_lock));
502
503 if (priv->vlan.filter_disabled) {
504 priv->vlan.filter_disabled = false;
505 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
506 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
507 0);
508 }
509 }
510
511 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
512 {
513 WARN_ON(!mutex_is_locked(&priv->state_lock));
514
515 if (!priv->vlan.filter_disabled) {
516 priv->vlan.filter_disabled = true;
517 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
518 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
519 0);
520 }
521 }
522
523 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
524 u16 vid)
525 {
526 struct mlx5e_priv *priv = netdev_priv(dev);
527 int err = 0;
528
529 mutex_lock(&priv->state_lock);
530
531 set_bit(vid, priv->vlan.active_vlans);
532 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
533 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
534 vid);
535
536 mutex_unlock(&priv->state_lock);
537
538 return err;
539 }
540
541 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
542 u16 vid)
543 {
544 struct mlx5e_priv *priv = netdev_priv(dev);
545
546 mutex_lock(&priv->state_lock);
547
548 clear_bit(vid, priv->vlan.active_vlans);
549 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
550 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
551
552 mutex_unlock(&priv->state_lock);
553
554 return 0;
555 }
556
557 int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
558 {
559 u16 vid;
560 int err;
561
562 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
563 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
564 vid);
565 if (err)
566 return err;
567 }
568
569 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
570 if (err)
571 return err;
572
573 if (priv->vlan.filter_disabled) {
574 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
575 0);
576 if (err)
577 return err;
578 }
579
580 return 0;
581 }
582
583 void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
584 {
585 u16 vid;
586
587 if (priv->vlan.filter_disabled)
588 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
589
590 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
591
592 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
593 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
594 }
595
596 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
597 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
598 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
599
600 static void mlx5e_execute_action(struct mlx5e_priv *priv,
601 struct mlx5e_eth_addr_hash_node *hn)
602 {
603 switch (hn->action) {
604 case MLX5E_ACTION_ADD:
605 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
606 hn->action = MLX5E_ACTION_NONE;
607 break;
608
609 case MLX5E_ACTION_DEL:
610 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
611 mlx5e_del_eth_addr_from_hash(hn);
612 break;
613 }
614 }
615
616 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
617 {
618 struct net_device *netdev = priv->netdev;
619 struct netdev_hw_addr *ha;
620
621 netif_addr_lock_bh(netdev);
622
623 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
624 priv->netdev->dev_addr);
625
626 netdev_for_each_uc_addr(ha, netdev)
627 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
628
629 netdev_for_each_mc_addr(ha, netdev)
630 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
631
632 netif_addr_unlock_bh(netdev);
633 }
634
635 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
636 {
637 struct mlx5e_eth_addr_hash_node *hn;
638 struct hlist_node *tmp;
639 int i;
640
641 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
642 mlx5e_execute_action(priv, hn);
643
644 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
645 mlx5e_execute_action(priv, hn);
646 }
647
648 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
649 {
650 struct mlx5e_eth_addr_hash_node *hn;
651 struct hlist_node *tmp;
652 int i;
653
654 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
655 hn->action = MLX5E_ACTION_DEL;
656 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
657 hn->action = MLX5E_ACTION_DEL;
658
659 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
660 mlx5e_sync_netdev_addr(priv);
661
662 mlx5e_apply_netdev_addr(priv);
663 }
664
665 void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
666 {
667 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
668 struct net_device *ndev = priv->netdev;
669
670 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
671 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
672 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
673 bool broadcast_enabled = rx_mode_enable;
674
675 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
676 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
677 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
678 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
679 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
680 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
681
682 if (enable_promisc)
683 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
684 if (enable_allmulti)
685 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
686 if (enable_broadcast)
687 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
688
689 mlx5e_handle_netdev_addr(priv);
690
691 if (disable_broadcast)
692 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
693 if (disable_allmulti)
694 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
695 if (disable_promisc)
696 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
697
698 ea->promisc_enabled = promisc_enabled;
699 ea->allmulti_enabled = allmulti_enabled;
700 ea->broadcast_enabled = broadcast_enabled;
701 }
702
703 void mlx5e_set_rx_mode_work(struct work_struct *work)
704 {
705 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
706 set_rx_mode_work);
707
708 mutex_lock(&priv->state_lock);
709 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
710 mlx5e_set_rx_mode_core(priv);
711 mutex_unlock(&priv->state_lock);
712 }
713
714 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
715 {
716 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
717 }
718
719 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
720 {
721 struct mlx5_flow_table_group *g;
722 u8 *dmac;
723
724 g = kcalloc(9, sizeof(*g), GFP_KERNEL);
725
726 g[0].log_sz = 2;
727 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
728 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
729 outer_headers.ethertype);
730 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
731 outer_headers.ip_protocol);
732
733 g[1].log_sz = 1;
734 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
735 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
736 outer_headers.ethertype);
737
738 g[2].log_sz = 0;
739
740 g[3].log_sz = 14;
741 g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
742 dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
743 outer_headers.dmac_47_16);
744 memset(dmac, 0xff, ETH_ALEN);
745 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
746 outer_headers.ethertype);
747 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
748 outer_headers.ip_protocol);
749
750 g[4].log_sz = 13;
751 g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
752 dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
753 outer_headers.dmac_47_16);
754 memset(dmac, 0xff, ETH_ALEN);
755 MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
756 outer_headers.ethertype);
757
758 g[5].log_sz = 11;
759 g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
760 dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
761 outer_headers.dmac_47_16);
762 memset(dmac, 0xff, ETH_ALEN);
763
764 g[6].log_sz = 2;
765 g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
766 dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
767 outer_headers.dmac_47_16);
768 dmac[0] = 0x01;
769 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
770 outer_headers.ethertype);
771 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
772 outer_headers.ip_protocol);
773
774 g[7].log_sz = 1;
775 g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
776 dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
777 outer_headers.dmac_47_16);
778 dmac[0] = 0x01;
779 MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
780 outer_headers.ethertype);
781
782 g[8].log_sz = 0;
783 g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
784 dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
785 outer_headers.dmac_47_16);
786 dmac[0] = 0x01;
787 priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
788 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
789 9, g);
790 kfree(g);
791
792 return priv->ft.main ? 0 : -ENOMEM;
793 }
794
795 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
796 {
797 mlx5_destroy_flow_table(priv->ft.main);
798 }
799
800 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
801 {
802 struct mlx5_flow_table_group *g;
803
804 g = kcalloc(2, sizeof(*g), GFP_KERNEL);
805 if (!g)
806 return -ENOMEM;
807
808 g[0].log_sz = 12;
809 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
810 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
811 outer_headers.vlan_tag);
812 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
813 outer_headers.first_vid);
814
815 /* untagged + any vlan id */
816 g[1].log_sz = 1;
817 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
818 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
819 outer_headers.vlan_tag);
820
821 priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
822 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
823 2, g);
824
825 kfree(g);
826 return priv->ft.vlan ? 0 : -ENOMEM;
827 }
828
829 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
830 {
831 mlx5_destroy_flow_table(priv->ft.vlan);
832 }
833
834 int mlx5e_open_flow_table(struct mlx5e_priv *priv)
835 {
836 int err;
837
838 err = mlx5e_create_main_flow_table(priv);
839 if (err)
840 return err;
841
842 err = mlx5e_create_vlan_flow_table(priv);
843 if (err)
844 goto err_destroy_main_flow_table;
845
846 return 0;
847
848 err_destroy_main_flow_table:
849 mlx5e_destroy_main_flow_table(priv);
850
851 return err;
852 }
853
854 void mlx5e_close_flow_table(struct mlx5e_priv *priv)
855 {
856 mlx5e_destroy_vlan_flow_table(priv);
857 mlx5e_destroy_main_flow_table(priv);
858 }
This page took 0.060961 seconds and 4 git commands to generate.