2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
54 static struct mlxsw_sp_port
*
55 mlxsw_sp_port_orig_get(struct net_device
*dev
,
56 struct mlxsw_sp_port
*mlxsw_sp_port
)
58 struct mlxsw_sp_port
*mlxsw_sp_vport
;
61 if (!is_vlan_dev(dev
))
64 vid
= vlan_dev_vlan_id(dev
);
65 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
66 WARN_ON(!mlxsw_sp_vport
);
68 return mlxsw_sp_vport
;
71 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
72 struct switchdev_attr
*attr
)
74 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
75 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
77 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
82 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
83 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
84 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
87 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
88 attr
->u
.brport_flags
=
89 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
90 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
91 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
100 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
103 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
104 enum mlxsw_reg_spms_state spms_state
;
110 case BR_STATE_DISABLED
: /* fall-through */
111 case BR_STATE_FORWARDING
:
112 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
114 case BR_STATE_LISTENING
: /* fall-through */
115 case BR_STATE_LEARNING
:
116 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
118 case BR_STATE_BLOCKING
:
119 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
125 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
128 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
130 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
131 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
132 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
134 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
135 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
138 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
143 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
144 struct switchdev_trans
*trans
,
147 if (switchdev_trans_ph_prepare(trans
))
150 mlxsw_sp_port
->stp_state
= state
;
151 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
154 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid
)
156 return vfid
>= MLXSW_SP_VFID_PORT_MAX
;
159 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
160 u16 idx_begin
, u16 idx_end
, bool set
,
163 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
164 u16 local_port
= mlxsw_sp_port
->local_port
;
165 enum mlxsw_flood_table_type table_type
;
166 u16 range
= idx_end
- idx_begin
+ 1;
170 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
171 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
172 if (mlxsw_sp_vfid_is_vport_br(idx_begin
))
173 local_port
= mlxsw_sp_port
->local_port
;
175 local_port
= MLXSW_PORT_CPU_PORT
;
177 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
180 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
184 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
185 table_type
, range
, local_port
, set
);
186 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
190 /* Flooding control allows one to decide whether a given port will
191 * flood unicast traffic for which there is no FDB entry.
196 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, idx_begin
,
197 table_type
, range
, local_port
, set
);
198 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
205 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
208 struct net_device
*dev
= mlxsw_sp_port
->dev
;
209 u16 vid
, last_visited_vid
;
212 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
213 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
215 return __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vfid
, vfid
,
219 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
220 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
223 last_visited_vid
= vid
;
224 goto err_port_flood_set
;
231 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
232 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
233 netdev_err(dev
, "Failed to configure unicast flooding\n");
237 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 vfid
,
238 bool set
, bool only_uc
)
240 /* In case of vFIDs, index into the flooding table is relative to
241 * the start of the vFIDs range.
243 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
,
247 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
248 struct switchdev_trans
*trans
,
249 unsigned long brport_flags
)
251 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
255 if (switchdev_trans_ph_prepare(trans
))
258 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
259 set
= mlxsw_sp_port
->uc_flood
? false : true;
260 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
265 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
266 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
267 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
272 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
274 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
277 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
278 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
281 mlxsw_sp
->ageing_time
= ageing_time
;
285 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
286 struct switchdev_trans
*trans
,
287 unsigned long ageing_clock_t
)
289 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
290 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
291 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
293 if (switchdev_trans_ph_prepare(trans
))
296 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
299 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
300 const struct switchdev_attr
*attr
,
301 struct switchdev_trans
*trans
)
303 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
306 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
311 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
312 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
315 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
316 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
317 attr
->u
.brport_flags
);
319 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
320 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
321 attr
->u
.ageing_time
);
331 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
333 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
334 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
336 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
337 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
340 static int mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
342 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
345 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, fid
);
346 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
351 set_bit(fid
, mlxsw_sp
->active_fids
);
355 static void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
357 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
359 clear_bit(fid
, mlxsw_sp
->active_fids
);
361 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
,
363 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
366 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
368 enum mlxsw_reg_svfa_mt mt
;
370 if (!list_empty(&mlxsw_sp_port
->vports_list
))
371 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
373 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
375 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, fid
, fid
);
378 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
380 enum mlxsw_reg_svfa_mt mt
;
382 if (list_empty(&mlxsw_sp_port
->vports_list
))
385 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
386 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, fid
, fid
);
389 static int mlxsw_sp_port_add_vids(struct net_device
*dev
, u16 vid_begin
,
395 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
396 err
= mlxsw_sp_port_add_vid(dev
, 0, vid
);
398 goto err_port_add_vid
;
403 for (vid
--; vid
>= vid_begin
; vid
--)
404 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
408 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
409 u16 vid_begin
, u16 vid_end
, bool is_member
,
415 for (vid
= vid_begin
; vid
<= vid_end
;
416 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
417 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
420 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
421 is_member
, untagged
);
429 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
430 u16 vid_begin
, u16 vid_end
,
431 bool flag_untagged
, bool flag_pvid
)
433 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
434 struct net_device
*dev
= mlxsw_sp_port
->dev
;
435 u16 vid
, last_visited_vid
, old_pvid
;
436 enum mlxsw_reg_svfa_mt mt
;
439 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
440 * not bridged, then packets ingressing through the port with
441 * the specified VIDs will be directed to CPU.
443 if (!mlxsw_sp_port
->bridged
)
444 return mlxsw_sp_port_add_vids(dev
, vid_begin
, vid_end
);
446 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
447 if (!test_bit(vid
, mlxsw_sp
->active_fids
)) {
448 err
= mlxsw_sp_fid_create(mlxsw_sp
, vid
);
450 netdev_err(dev
, "Failed to create FID=%d\n",
455 /* When creating a FID, we set a VID to FID mapping
456 * regardless of the port's mode.
458 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
459 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
,
462 netdev_err(dev
, "Failed to create FID=VID=%d mapping\n",
464 goto err_port_vid_to_fid_set
;
469 /* Set FID mapping according to port's mode */
470 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
471 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, vid
);
473 netdev_err(dev
, "Failed to map FID=%d", vid
);
474 last_visited_vid
= --vid
;
475 goto err_port_fid_map
;
479 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
482 netdev_err(dev
, "Failed to configure flooding\n");
483 goto err_port_flood_set
;
486 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
487 true, flag_untagged
);
489 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
491 goto err_port_vlans_set
;
494 old_pvid
= mlxsw_sp_port
->pvid
;
495 if (flag_pvid
&& old_pvid
!= vid_begin
) {
496 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
498 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
499 goto err_port_pvid_set
;
501 mlxsw_sp_port
->pvid
= vid_begin
;
504 /* Changing activity bits only if HW operation succeded */
505 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
506 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
508 /* STP state change must be done after we set active VLANs */
509 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
510 mlxsw_sp_port
->stp_state
);
512 netdev_err(dev
, "Failed to set STP state\n");
513 goto err_port_stp_state_set
;
518 err_port_vid_to_fid_set
:
519 mlxsw_sp_fid_destroy(mlxsw_sp
, vid
);
522 err_port_stp_state_set
:
523 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
524 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
525 if (old_pvid
!= mlxsw_sp_port
->pvid
)
526 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
528 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
531 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
534 last_visited_vid
= vid_end
;
536 for (vid
= last_visited_vid
; vid
>= vid_begin
; vid
--)
537 mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
541 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
542 const struct switchdev_obj_port_vlan
*vlan
,
543 struct switchdev_trans
*trans
)
545 bool untagged_flag
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
546 bool pvid_flag
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
548 if (switchdev_trans_ph_prepare(trans
))
551 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
552 vlan
->vid_begin
, vlan
->vid_end
,
553 untagged_flag
, pvid_flag
);
556 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
558 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
559 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
562 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
564 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
565 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
568 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port
*mlxsw_sp_port
,
569 const char *mac
, u16 fid
, bool adding
,
572 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
576 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
580 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
581 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
582 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
583 mlxsw_sp_port
->local_port
);
584 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
590 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
591 const char *mac
, u16 fid
, u16 lag_vid
,
592 bool adding
, bool dynamic
)
597 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
601 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
602 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
603 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
605 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
612 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
613 const struct switchdev_obj_port_fdb
*fdb
,
614 struct switchdev_trans
*trans
)
619 if (switchdev_trans_ph_prepare(trans
))
622 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
623 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
625 fid
= mlxsw_sp_vfid_to_fid(vfid
);
626 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
630 fid
= mlxsw_sp_port
->pvid
;
632 if (!mlxsw_sp_port
->lagged
)
633 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
,
634 fdb
->addr
, fid
, true, false);
636 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
637 mlxsw_sp_port
->lag_id
,
638 fdb
->addr
, fid
, lag_vid
,
642 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
643 const struct switchdev_obj
*obj
,
644 struct switchdev_trans
*trans
)
646 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
649 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
654 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
655 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
658 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
659 SWITCHDEV_OBJ_PORT_VLAN(obj
),
662 case SWITCHDEV_OBJ_ID_PORT_FDB
:
663 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
664 SWITCHDEV_OBJ_PORT_FDB(obj
),
675 static int mlxsw_sp_port_kill_vids(struct net_device
*dev
, u16 vid_begin
,
681 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
682 err
= mlxsw_sp_port_kill_vid(dev
, 0, vid
);
690 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
691 u16 vid_begin
, u16 vid_end
, bool init
)
693 struct net_device
*dev
= mlxsw_sp_port
->dev
;
697 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
698 * not bridged, then prevent packets ingressing through the
699 * port with the specified VIDs from being trapped to CPU.
701 if (!init
&& !mlxsw_sp_port
->bridged
)
702 return mlxsw_sp_port_kill_vids(dev
, vid_begin
, vid_end
);
704 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
707 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
712 pvid
= mlxsw_sp_port
->pvid
;
713 if (pvid
>= vid_begin
&& pvid
<= vid_end
&& pvid
!= 1) {
714 /* Default VLAN is always 1 */
715 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
717 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
720 mlxsw_sp_port
->pvid
= 1;
726 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
729 netdev_err(dev
, "Failed to clear flooding\n");
733 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
734 /* Remove FID mapping in case of Virtual mode */
735 err
= mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
737 netdev_err(dev
, "Failed to unmap FID=%d", vid
);
743 /* Changing activity bits only if HW operation succeded */
744 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
745 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
750 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
751 const struct switchdev_obj_port_vlan
*vlan
)
753 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
754 vlan
->vid_begin
, vlan
->vid_end
, false);
758 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
759 const struct switchdev_obj_port_fdb
*fdb
)
764 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
765 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
767 fid
= mlxsw_sp_vfid_to_fid(vfid
);
768 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
771 if (!mlxsw_sp_port
->lagged
)
772 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
,
776 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
777 mlxsw_sp_port
->lag_id
,
778 fdb
->addr
, fid
, lag_vid
,
782 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
783 const struct switchdev_obj
*obj
)
785 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
788 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
793 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
794 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
797 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
798 SWITCHDEV_OBJ_PORT_VLAN(obj
));
800 case SWITCHDEV_OBJ_ID_PORT_FDB
:
801 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
802 SWITCHDEV_OBJ_PORT_FDB(obj
));
812 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
815 struct mlxsw_sp_port
*mlxsw_sp_port
;
818 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
819 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
821 return mlxsw_sp_port
;
826 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
827 struct switchdev_obj_port_fdb
*fdb
,
828 switchdev_obj_dump_cb_t
*cb
)
830 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
831 u16 vport_vid
= 0, vport_fid
= 0;
842 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
846 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
849 tmp
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
850 vport_fid
= mlxsw_sp_vfid_to_fid(tmp
);
851 vport_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
854 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
856 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
857 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
861 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
863 /* Even in case of error, we have to run the dump to the end
864 * so the session in firmware is finished.
869 for (i
= 0; i
< num_rec
; i
++) {
870 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
871 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
872 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &fid
,
874 if (local_port
== mlxsw_sp_port
->local_port
) {
875 if (vport_fid
&& vport_fid
!= fid
)
878 fdb
->vid
= vport_vid
;
881 ether_addr_copy(fdb
->addr
, mac
);
882 fdb
->ndm_state
= NUD_REACHABLE
;
888 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
889 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
892 mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
)) {
893 if (vport_fid
&& vport_fid
!= fid
)
896 fdb
->vid
= vport_vid
;
899 ether_addr_copy(fdb
->addr
, mac
);
900 fdb
->ndm_state
= NUD_REACHABLE
;
908 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
912 return stored_err
? stored_err
: err
;
915 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
916 struct switchdev_obj_port_vlan
*vlan
,
917 switchdev_obj_dump_cb_t
*cb
)
922 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
924 vlan
->vid_begin
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
925 vlan
->vid_end
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
926 return cb(&vlan
->obj
);
929 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
931 if (vid
== mlxsw_sp_port
->pvid
)
932 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
933 vlan
->vid_begin
= vid
;
935 err
= cb(&vlan
->obj
);
942 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
943 struct switchdev_obj
*obj
,
944 switchdev_obj_dump_cb_t
*cb
)
946 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
949 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
954 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
955 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
956 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
958 case SWITCHDEV_OBJ_ID_PORT_FDB
:
959 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
960 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
);
970 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
971 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
972 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
973 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
974 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
975 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
978 static void mlxsw_sp_fdb_call_notifiers(bool learning
, bool learning_sync
,
979 bool adding
, char *mac
, u16 vid
,
980 struct net_device
*dev
)
982 struct switchdev_notifier_fdb_info info
;
983 unsigned long notifier_type
;
985 if (learning
&& learning_sync
) {
988 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
989 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
993 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
994 char *sfn_pl
, int rec_index
,
997 struct mlxsw_sp_port
*mlxsw_sp_port
;
1003 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1004 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1005 if (!mlxsw_sp_port
) {
1006 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1010 if (mlxsw_sp_fid_is_vfid(fid
)) {
1011 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1012 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1014 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1016 if (!mlxsw_sp_vport
) {
1017 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1021 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1022 /* Override the physical port with the vPort. */
1023 mlxsw_sp_port
= mlxsw_sp_vport
;
1028 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
, mac
, fid
,
1029 adding
&& mlxsw_sp_port
->learning
, true);
1031 if (net_ratelimit())
1032 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1036 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
1037 mlxsw_sp_port
->learning_sync
,
1038 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
1041 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1042 char *sfn_pl
, int rec_index
,
1045 struct mlxsw_sp_port
*mlxsw_sp_port
;
1052 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1053 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1054 if (!mlxsw_sp_port
) {
1055 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1059 if (mlxsw_sp_fid_is_vfid(fid
)) {
1060 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1061 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1063 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1065 if (!mlxsw_sp_vport
) {
1066 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1070 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1072 /* Override the physical port with the vPort. */
1073 mlxsw_sp_port
= mlxsw_sp_vport
;
1078 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1079 adding
&& mlxsw_sp_port
->learning
,
1082 if (net_ratelimit())
1083 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1087 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
1088 mlxsw_sp_port
->learning_sync
,
1090 mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
);
1093 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1094 char *sfn_pl
, int rec_index
)
1096 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1097 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1098 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1101 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1102 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1105 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1106 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1109 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1110 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1116 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1118 schedule_delayed_work(&mlxsw_sp
->fdb_notify
.dw
,
1119 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
1122 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1124 struct mlxsw_sp
*mlxsw_sp
;
1130 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1134 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
1137 mlxsw_reg_sfn_pack(sfn_pl
);
1138 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1140 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1143 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1144 for (i
= 0; i
< num_rec
; i
++)
1145 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1150 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1153 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1157 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1159 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1162 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1163 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1164 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1168 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1170 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1173 static void mlxsw_sp_fids_fini(struct mlxsw_sp
*mlxsw_sp
)
1177 for_each_set_bit(fid
, mlxsw_sp
->active_fids
, VLAN_N_VID
)
1178 mlxsw_sp_fid_destroy(mlxsw_sp
, fid
);
1181 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1183 return mlxsw_sp_fdb_init(mlxsw_sp
);
1186 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1188 mlxsw_sp_fdb_fini(mlxsw_sp
);
1189 mlxsw_sp_fids_fini(mlxsw_sp
);
1192 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1194 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1197 /* Allow only untagged packets to ingress and tag them internally
1200 mlxsw_sp_port
->pvid
= 1;
1201 err
= __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, 0, VLAN_N_VID
, true);
1203 netdev_err(dev
, "Unable to init VLANs\n");
1207 /* Add implicit VLAN interface in the device, so that untagged
1208 * packets will be classified to the default vFID.
1210 err
= mlxsw_sp_port_add_vid(dev
, 0, 1);
1212 netdev_err(dev
, "Failed to configure default vFID\n");
1217 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1219 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1222 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)