2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
54 static u16
mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
59 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
60 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
62 fid
= mlxsw_sp_vfid_to_fid(vfid
);
66 fid
= mlxsw_sp_port
->pvid
;
71 static struct mlxsw_sp_port
*
72 mlxsw_sp_port_orig_get(struct net_device
*dev
,
73 struct mlxsw_sp_port
*mlxsw_sp_port
)
75 struct mlxsw_sp_port
*mlxsw_sp_vport
;
78 if (!is_vlan_dev(dev
))
81 vid
= vlan_dev_vlan_id(dev
);
82 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
83 WARN_ON(!mlxsw_sp_vport
);
85 return mlxsw_sp_vport
;
88 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
89 struct switchdev_attr
*attr
)
91 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
92 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
94 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
99 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
100 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
101 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
102 attr
->u
.ppid
.id_len
);
104 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
105 attr
->u
.brport_flags
=
106 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
107 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
108 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
117 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
120 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
121 enum mlxsw_reg_spms_state spms_state
;
127 case BR_STATE_DISABLED
: /* fall-through */
128 case BR_STATE_FORWARDING
:
129 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
131 case BR_STATE_LISTENING
: /* fall-through */
132 case BR_STATE_LEARNING
:
133 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
135 case BR_STATE_BLOCKING
:
136 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
142 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
145 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
147 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
148 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
149 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
151 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
152 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
155 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
160 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
161 struct switchdev_trans
*trans
,
164 if (switchdev_trans_ph_prepare(trans
))
167 mlxsw_sp_port
->stp_state
= state
;
168 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
171 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid
)
173 return vfid
>= MLXSW_SP_VFID_PORT_MAX
;
176 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
177 u16 idx_begin
, u16 idx_end
, bool set
,
180 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
181 u16 local_port
= mlxsw_sp_port
->local_port
;
182 enum mlxsw_flood_table_type table_type
;
183 u16 range
= idx_end
- idx_begin
+ 1;
187 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
188 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
189 if (mlxsw_sp_vfid_is_vport_br(idx_begin
))
190 local_port
= mlxsw_sp_port
->local_port
;
192 local_port
= MLXSW_PORT_CPU_PORT
;
194 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
197 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
201 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
202 table_type
, range
, local_port
, set
);
203 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
207 /* Flooding control allows one to decide whether a given port will
208 * flood unicast traffic for which there is no FDB entry.
213 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, idx_begin
,
214 table_type
, range
, local_port
, set
);
215 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
222 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
225 struct net_device
*dev
= mlxsw_sp_port
->dev
;
226 u16 vid
, last_visited_vid
;
229 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
230 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
232 return __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vfid
, vfid
,
236 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
237 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
240 last_visited_vid
= vid
;
241 goto err_port_flood_set
;
248 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
249 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
250 netdev_err(dev
, "Failed to configure unicast flooding\n");
254 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 vfid
,
255 bool set
, bool only_uc
)
257 /* In case of vFIDs, index into the flooding table is relative to
258 * the start of the vFIDs range.
260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
,
264 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
265 struct switchdev_trans
*trans
,
266 unsigned long brport_flags
)
268 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
272 if (!mlxsw_sp_port
->bridged
)
275 if (switchdev_trans_ph_prepare(trans
))
278 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
279 set
= mlxsw_sp_port
->uc_flood
? false : true;
280 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
285 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
286 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
287 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
292 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
294 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
297 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
298 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
301 mlxsw_sp
->ageing_time
= ageing_time
;
305 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
306 struct switchdev_trans
*trans
,
307 unsigned long ageing_clock_t
)
309 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
310 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
311 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
313 if (switchdev_trans_ph_prepare(trans
))
316 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
319 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
320 struct switchdev_trans
*trans
,
321 struct net_device
*orig_dev
,
324 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
326 /* SWITCHDEV_TRANS_PREPARE phase */
327 if ((!vlan_enabled
) && (mlxsw_sp
->master_bridge
.dev
== orig_dev
)) {
328 netdev_err(mlxsw_sp_port
->dev
, "Bridge must be vlan-aware\n");
335 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
336 const struct switchdev_attr
*attr
,
337 struct switchdev_trans
*trans
)
339 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
342 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
347 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
348 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
351 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
352 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
353 attr
->u
.brport_flags
);
355 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
356 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
357 attr
->u
.ageing_time
);
359 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
360 err
= mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port
, trans
,
362 attr
->u
.vlan_filtering
);
372 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
374 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
375 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
377 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
378 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
381 static int mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
383 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
386 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, fid
);
387 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
392 set_bit(fid
, mlxsw_sp
->active_fids
);
396 static void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
398 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
400 clear_bit(fid
, mlxsw_sp
->active_fids
);
402 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
,
404 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
407 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
409 enum mlxsw_reg_svfa_mt mt
;
411 if (!list_empty(&mlxsw_sp_port
->vports_list
))
412 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
414 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
416 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, fid
, fid
);
419 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
421 enum mlxsw_reg_svfa_mt mt
;
423 if (list_empty(&mlxsw_sp_port
->vports_list
))
426 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
427 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, fid
, fid
);
430 static int mlxsw_sp_port_add_vids(struct net_device
*dev
, u16 vid_begin
,
436 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
437 err
= mlxsw_sp_port_add_vid(dev
, 0, vid
);
439 goto err_port_add_vid
;
444 for (vid
--; vid
>= vid_begin
; vid
--)
445 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
449 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
450 u16 vid_begin
, u16 vid_end
, bool is_member
,
456 for (vid
= vid_begin
; vid
<= vid_end
;
457 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
458 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
461 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
462 is_member
, untagged
);
470 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
471 u16 vid_begin
, u16 vid_end
,
472 bool flag_untagged
, bool flag_pvid
)
474 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
475 struct net_device
*dev
= mlxsw_sp_port
->dev
;
476 u16 vid
, last_visited_vid
, old_pvid
;
477 enum mlxsw_reg_svfa_mt mt
;
480 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
481 * not bridged, then packets ingressing through the port with
482 * the specified VIDs will be directed to CPU.
484 if (!mlxsw_sp_port
->bridged
)
485 return mlxsw_sp_port_add_vids(dev
, vid_begin
, vid_end
);
487 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
488 if (!test_bit(vid
, mlxsw_sp
->active_fids
)) {
489 err
= mlxsw_sp_fid_create(mlxsw_sp
, vid
);
491 netdev_err(dev
, "Failed to create FID=%d\n",
496 /* When creating a FID, we set a VID to FID mapping
497 * regardless of the port's mode.
499 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
500 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
,
503 netdev_err(dev
, "Failed to create FID=VID=%d mapping\n",
505 goto err_port_vid_to_fid_set
;
510 /* Set FID mapping according to port's mode */
511 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
512 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, vid
);
514 netdev_err(dev
, "Failed to map FID=%d", vid
);
515 last_visited_vid
= --vid
;
516 goto err_port_fid_map
;
520 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
523 netdev_err(dev
, "Failed to configure flooding\n");
524 goto err_port_flood_set
;
527 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
528 true, flag_untagged
);
530 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
532 goto err_port_vlans_set
;
535 old_pvid
= mlxsw_sp_port
->pvid
;
536 if (flag_pvid
&& old_pvid
!= vid_begin
) {
537 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
539 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
540 goto err_port_pvid_set
;
542 mlxsw_sp_port
->pvid
= vid_begin
;
545 /* Changing activity bits only if HW operation succeded */
546 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
547 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
549 set_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
551 clear_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
554 /* STP state change must be done after we set active VLANs */
555 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
556 mlxsw_sp_port
->stp_state
);
558 netdev_err(dev
, "Failed to set STP state\n");
559 goto err_port_stp_state_set
;
564 err_port_vid_to_fid_set
:
565 mlxsw_sp_fid_destroy(mlxsw_sp
, vid
);
568 err_port_stp_state_set
:
569 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
570 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
571 if (old_pvid
!= mlxsw_sp_port
->pvid
)
572 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
574 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
577 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
580 last_visited_vid
= vid_end
;
582 for (vid
= last_visited_vid
; vid
>= vid_begin
; vid
--)
583 mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
587 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
588 const struct switchdev_obj_port_vlan
*vlan
,
589 struct switchdev_trans
*trans
)
591 bool flag_untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
592 bool flag_pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
594 if (switchdev_trans_ph_prepare(trans
))
597 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
598 vlan
->vid_begin
, vlan
->vid_end
,
599 flag_untagged
, flag_pvid
);
602 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
604 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
605 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
608 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
610 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
611 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
614 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
615 const char *mac
, u16 fid
, bool adding
,
621 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
625 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
626 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
627 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
629 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
635 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
636 const char *mac
, u16 fid
, u16 lag_vid
,
637 bool adding
, bool dynamic
)
642 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
646 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
647 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
648 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
650 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
657 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
658 const struct switchdev_obj_port_fdb
*fdb
,
659 struct switchdev_trans
*trans
)
661 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
664 if (switchdev_trans_ph_prepare(trans
))
667 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
668 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
671 if (!mlxsw_sp_port
->lagged
)
672 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
673 mlxsw_sp_port
->local_port
,
674 fdb
->addr
, fid
, true, false);
676 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
677 mlxsw_sp_port
->lag_id
,
678 fdb
->addr
, fid
, lag_vid
,
682 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *addr
,
683 u16 fid
, u16 mid
, bool adding
)
688 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
692 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
693 mlxsw_reg_sfd_mc_pack(sfd_pl
, 0, addr
, fid
,
694 MLXSW_REG_SFD_REC_ACTION_NOP
, mid
);
695 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
700 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mid
,
701 bool add
, bool clear_all_ports
)
703 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
707 smid_pl
= kmalloc(MLXSW_REG_SMID_LEN
, GFP_KERNEL
);
711 mlxsw_reg_smid_pack(smid_pl
, mid
, mlxsw_sp_port
->local_port
, add
);
712 if (clear_all_ports
) {
713 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
714 if (mlxsw_sp
->ports
[i
])
715 mlxsw_reg_smid_port_mask_set(smid_pl
, i
, 1);
717 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(smid
), smid_pl
);
722 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_get(struct mlxsw_sp
*mlxsw_sp
,
723 const unsigned char *addr
,
726 struct mlxsw_sp_mid
*mid
;
728 list_for_each_entry(mid
, &mlxsw_sp
->br_mids
.list
, list
) {
729 if (ether_addr_equal(mid
->addr
, addr
) && mid
->vid
== vid
)
735 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_alloc(struct mlxsw_sp
*mlxsw_sp
,
736 const unsigned char *addr
,
739 struct mlxsw_sp_mid
*mid
;
742 mid_idx
= find_first_zero_bit(mlxsw_sp
->br_mids
.mapped
,
744 if (mid_idx
== MLXSW_SP_MID_MAX
)
747 mid
= kzalloc(sizeof(*mid
), GFP_KERNEL
);
751 set_bit(mid_idx
, mlxsw_sp
->br_mids
.mapped
);
752 ether_addr_copy(mid
->addr
, addr
);
756 list_add_tail(&mid
->list
, &mlxsw_sp
->br_mids
.list
);
761 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp
*mlxsw_sp
,
762 struct mlxsw_sp_mid
*mid
)
764 if (--mid
->ref_count
== 0) {
765 list_del(&mid
->list
);
766 clear_bit(mid
->mid
, mlxsw_sp
->br_mids
.mapped
);
773 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
774 const struct switchdev_obj_port_mdb
*mdb
,
775 struct switchdev_trans
*trans
)
777 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
778 struct net_device
*dev
= mlxsw_sp_port
->dev
;
779 struct mlxsw_sp_mid
*mid
;
780 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
783 if (switchdev_trans_ph_prepare(trans
))
786 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
788 mid
= __mlxsw_sp_mc_alloc(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
790 netdev_err(dev
, "Unable to allocate MC group\n");
796 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, true,
797 mid
->ref_count
== 1);
799 netdev_err(dev
, "Unable to set SMID\n");
803 if (mid
->ref_count
== 1) {
804 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid
->mid
,
807 netdev_err(dev
, "Unable to set MC SFD\n");
815 __mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
);
819 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
820 const struct switchdev_obj
*obj
,
821 struct switchdev_trans
*trans
)
823 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
826 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
831 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
832 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
835 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
836 SWITCHDEV_OBJ_PORT_VLAN(obj
),
839 case SWITCHDEV_OBJ_ID_PORT_FDB
:
840 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
841 SWITCHDEV_OBJ_PORT_FDB(obj
),
844 case SWITCHDEV_OBJ_ID_PORT_MDB
:
845 err
= mlxsw_sp_port_mdb_add(mlxsw_sp_port
,
846 SWITCHDEV_OBJ_PORT_MDB(obj
),
857 static int mlxsw_sp_port_kill_vids(struct net_device
*dev
, u16 vid_begin
,
863 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
864 err
= mlxsw_sp_port_kill_vid(dev
, 0, vid
);
872 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
873 u16 vid_begin
, u16 vid_end
, bool init
)
875 struct net_device
*dev
= mlxsw_sp_port
->dev
;
879 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
880 * not bridged, then prevent packets ingressing through the
881 * port with the specified VIDs from being trapped to CPU.
883 if (!init
&& !mlxsw_sp_port
->bridged
)
884 return mlxsw_sp_port_kill_vids(dev
, vid_begin
, vid_end
);
886 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
889 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
894 pvid
= mlxsw_sp_port
->pvid
;
895 if (pvid
>= vid_begin
&& pvid
<= vid_end
&& pvid
!= 1) {
896 /* Default VLAN is always 1 */
897 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
899 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
902 mlxsw_sp_port
->pvid
= 1;
908 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
911 netdev_err(dev
, "Failed to clear flooding\n");
915 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
916 /* Remove FID mapping in case of Virtual mode */
917 err
= mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
919 netdev_err(dev
, "Failed to unmap FID=%d", vid
);
925 /* Changing activity bits only if HW operation succeded */
926 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
927 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
932 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
933 const struct switchdev_obj_port_vlan
*vlan
)
935 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
936 vlan
->vid_begin
, vlan
->vid_end
, false);
940 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
941 const struct switchdev_obj_port_fdb
*fdb
)
943 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
946 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
947 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
950 if (!mlxsw_sp_port
->lagged
)
951 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
952 mlxsw_sp_port
->local_port
,
956 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
957 mlxsw_sp_port
->lag_id
,
958 fdb
->addr
, fid
, lag_vid
,
962 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
963 const struct switchdev_obj_port_mdb
*mdb
)
965 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
966 struct net_device
*dev
= mlxsw_sp_port
->dev
;
967 struct mlxsw_sp_mid
*mid
;
968 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
972 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
974 netdev_err(dev
, "Unable to remove port from MC DB\n");
978 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, false, false);
980 netdev_err(dev
, "Unable to remove port from SMID\n");
983 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
)) {
984 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid_idx
,
987 netdev_err(dev
, "Unable to remove MC SFD\n");
993 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
994 const struct switchdev_obj
*obj
)
996 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
999 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1004 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1005 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
1008 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
1009 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1011 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1012 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
1013 SWITCHDEV_OBJ_PORT_FDB(obj
));
1015 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1016 err
= mlxsw_sp_port_mdb_del(mlxsw_sp_port
,
1017 SWITCHDEV_OBJ_PORT_MDB(obj
));
1026 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
1029 struct mlxsw_sp_port
*mlxsw_sp_port
;
1032 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
1033 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
1035 return mlxsw_sp_port
;
1040 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1041 struct switchdev_obj_port_fdb
*fdb
,
1042 switchdev_obj_dump_cb_t
*cb
)
1044 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1045 u16 vport_vid
= 0, vport_fid
= 0;
1056 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1060 mutex_lock(&mlxsw_sp_port
->mlxsw_sp
->fdb_lock
);
1061 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1064 tmp
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
1065 vport_fid
= mlxsw_sp_vfid_to_fid(tmp
);
1066 vport_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1069 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
1071 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
1072 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1076 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
1078 /* Even in case of error, we have to run the dump to the end
1079 * so the session in firmware is finished.
1084 for (i
= 0; i
< num_rec
; i
++) {
1085 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
1086 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
1087 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &fid
,
1089 if (local_port
== mlxsw_sp_port
->local_port
) {
1090 if (vport_fid
&& vport_fid
!= fid
)
1093 fdb
->vid
= vport_vid
;
1096 ether_addr_copy(fdb
->addr
, mac
);
1097 fdb
->ndm_state
= NUD_REACHABLE
;
1098 err
= cb(&fdb
->obj
);
1103 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
1104 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
1105 mac
, &fid
, &lag_id
);
1106 if (mlxsw_sp_port
==
1107 mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
)) {
1108 if (vport_fid
&& vport_fid
!= fid
)
1111 fdb
->vid
= vport_vid
;
1114 ether_addr_copy(fdb
->addr
, mac
);
1115 fdb
->ndm_state
= NUD_REACHABLE
;
1116 err
= cb(&fdb
->obj
);
1123 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
1126 mutex_unlock(&mlxsw_sp_port
->mlxsw_sp
->fdb_lock
);
1128 return stored_err
? stored_err
: err
;
1131 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1132 struct switchdev_obj_port_vlan
*vlan
,
1133 switchdev_obj_dump_cb_t
*cb
)
1138 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1140 vlan
->vid_begin
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1141 vlan
->vid_end
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1142 return cb(&vlan
->obj
);
1145 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1147 if (vid
== mlxsw_sp_port
->pvid
)
1148 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1149 if (test_bit(vid
, mlxsw_sp_port
->untagged_vlans
))
1150 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1151 vlan
->vid_begin
= vid
;
1152 vlan
->vid_end
= vid
;
1153 err
= cb(&vlan
->obj
);
1160 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
1161 struct switchdev_obj
*obj
,
1162 switchdev_obj_dump_cb_t
*cb
)
1164 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1167 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1172 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1173 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
1174 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
1176 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1177 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
1178 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
);
1188 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
1189 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
1190 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
1191 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
1192 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
1193 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
1196 static void mlxsw_sp_fdb_call_notifiers(bool learning
, bool learning_sync
,
1197 bool adding
, char *mac
, u16 vid
,
1198 struct net_device
*dev
)
1200 struct switchdev_notifier_fdb_info info
;
1201 unsigned long notifier_type
;
1203 if (learning
&& learning_sync
) {
1206 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
1207 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
1211 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
1212 char *sfn_pl
, int rec_index
,
1215 struct mlxsw_sp_port
*mlxsw_sp_port
;
1219 bool do_notification
= true;
1222 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1223 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1224 if (!mlxsw_sp_port
) {
1225 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1229 if (mlxsw_sp_fid_is_vfid(fid
)) {
1230 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1231 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1233 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1235 if (!mlxsw_sp_vport
) {
1236 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1239 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1240 /* Override the physical port with the vPort. */
1241 mlxsw_sp_port
= mlxsw_sp_vport
;
1246 adding
= adding
&& mlxsw_sp_port
->learning
;
1249 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
,
1252 if (net_ratelimit())
1253 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1257 if (!do_notification
)
1259 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
1260 mlxsw_sp_port
->learning_sync
,
1261 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
1266 do_notification
= false;
1270 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1271 char *sfn_pl
, int rec_index
,
1274 struct mlxsw_sp_port
*mlxsw_sp_port
;
1279 bool do_notification
= true;
1282 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1283 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1284 if (!mlxsw_sp_port
) {
1285 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1289 if (mlxsw_sp_fid_is_vfid(fid
)) {
1290 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1291 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1293 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1295 if (!mlxsw_sp_vport
) {
1296 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1300 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1302 /* Override the physical port with the vPort. */
1303 mlxsw_sp_port
= mlxsw_sp_vport
;
1308 adding
= adding
&& mlxsw_sp_port
->learning
;
1311 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1314 if (net_ratelimit())
1315 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1319 if (!do_notification
)
1321 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
1322 mlxsw_sp_port
->learning_sync
,
1324 mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
);
1329 do_notification
= false;
1333 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1334 char *sfn_pl
, int rec_index
)
1336 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1337 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1338 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1341 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1342 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1345 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1346 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1349 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1350 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1356 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1358 schedule_delayed_work(&mlxsw_sp
->fdb_notify
.dw
,
1359 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
1362 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1364 struct mlxsw_sp
*mlxsw_sp
;
1370 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1374 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
1376 mutex_lock(&mlxsw_sp
->fdb_lock
);
1378 mlxsw_reg_sfn_pack(sfn_pl
);
1379 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1381 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1384 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1385 for (i
= 0; i
< num_rec
; i
++)
1386 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1389 mutex_unlock(&mlxsw_sp
->fdb_lock
);
1392 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1395 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1399 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1401 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1404 mutex_init(&mlxsw_sp
->fdb_lock
);
1405 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1406 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1407 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1411 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1413 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1416 static void mlxsw_sp_fids_fini(struct mlxsw_sp
*mlxsw_sp
)
1420 for_each_set_bit(fid
, mlxsw_sp
->active_fids
, VLAN_N_VID
)
1421 mlxsw_sp_fid_destroy(mlxsw_sp
, fid
);
1424 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1426 return mlxsw_sp_fdb_init(mlxsw_sp
);
1429 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1431 mlxsw_sp_fdb_fini(mlxsw_sp
);
1432 mlxsw_sp_fids_fini(mlxsw_sp
);
1435 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1437 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1440 /* Allow only untagged packets to ingress and tag them internally
1443 mlxsw_sp_port
->pvid
= 1;
1444 err
= __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, 0, VLAN_N_VID
- 1,
1447 netdev_err(dev
, "Unable to init VLANs\n");
1451 /* Add implicit VLAN interface in the device, so that untagged
1452 * packets will be classified to the default vFID.
1454 err
= mlxsw_sp_port_add_vid(dev
, 0, 1);
1456 netdev_err(dev
, "Failed to configure default vFID\n");
1461 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1463 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1466 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)