2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
54 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
55 struct switchdev_attr
*attr
)
57 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
58 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
61 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
62 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
63 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
66 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
67 attr
->u
.brport_flags
=
68 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
69 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
70 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
79 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
82 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
83 enum mlxsw_reg_spms_state spms_state
;
89 case BR_STATE_DISABLED
: /* fall-through */
90 case BR_STATE_FORWARDING
:
91 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
93 case BR_STATE_LISTENING
: /* fall-through */
94 case BR_STATE_LEARNING
:
95 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
97 case BR_STATE_BLOCKING
:
98 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
104 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
107 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
108 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
109 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
111 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
116 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
117 struct switchdev_trans
*trans
,
120 if (switchdev_trans_ph_prepare(trans
))
123 mlxsw_sp_port
->stp_state
= state
;
124 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
127 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
128 u16 fid_begin
, u16 fid_end
, bool set
,
131 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
132 u16 range
= fid_end
- fid_begin
+ 1;
136 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
140 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, fid_begin
,
141 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
, range
,
142 mlxsw_sp_port
->local_port
, set
);
143 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
147 /* Flooding control allows one to decide whether a given port will
148 * flood unicast traffic for which there is no FDB entry.
153 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, fid_begin
,
154 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
, range
,
155 mlxsw_sp_port
->local_port
, set
);
156 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
163 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
166 struct net_device
*dev
= mlxsw_sp_port
->dev
;
167 u16 vid
, last_visited_vid
;
170 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
171 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
174 last_visited_vid
= vid
;
175 goto err_port_flood_set
;
182 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
183 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
184 netdev_err(dev
, "Failed to configure unicast flooding\n");
188 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
189 struct switchdev_trans
*trans
,
190 unsigned long brport_flags
)
192 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
196 if (switchdev_trans_ph_prepare(trans
))
199 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
200 set
= mlxsw_sp_port
->uc_flood
? false : true;
201 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
206 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
207 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
208 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
213 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
215 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
218 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
219 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
222 mlxsw_sp
->ageing_time
= ageing_time
;
226 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
227 struct switchdev_trans
*trans
,
228 unsigned long ageing_clock_t
)
230 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
231 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
232 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
234 if (switchdev_trans_ph_prepare(trans
))
237 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
240 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
241 const struct switchdev_attr
*attr
,
242 struct switchdev_trans
*trans
)
244 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
248 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
249 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
252 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
253 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
254 attr
->u
.brport_flags
);
256 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
257 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
258 attr
->u
.ageing_time
);
268 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
270 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
271 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
273 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
274 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
277 static int mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
279 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
282 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, fid
);
283 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
288 set_bit(fid
, mlxsw_sp
->active_fids
);
292 static void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
294 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
296 clear_bit(fid
, mlxsw_sp
->active_fids
);
298 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
,
300 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
303 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
305 enum mlxsw_reg_svfa_mt mt
;
307 if (mlxsw_sp_port
->nr_vfids
)
308 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
310 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
312 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, fid
, fid
);
315 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
317 enum mlxsw_reg_svfa_mt mt
;
319 if (!mlxsw_sp_port
->nr_vfids
)
322 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
323 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, fid
, fid
);
326 static int mlxsw_sp_port_add_vids(struct net_device
*dev
, u16 vid_begin
,
332 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
333 err
= mlxsw_sp_port_add_vid(dev
, 0, vid
);
335 goto err_port_add_vid
;
340 for (vid
--; vid
>= vid_begin
; vid
--)
341 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
345 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
346 u16 vid_begin
, u16 vid_end
, bool is_member
,
352 for (vid
= vid_begin
; vid
<= vid_end
;
353 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
354 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
357 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
358 is_member
, untagged
);
366 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
367 u16 vid_begin
, u16 vid_end
,
368 bool flag_untagged
, bool flag_pvid
)
370 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
371 struct net_device
*dev
= mlxsw_sp_port
->dev
;
372 u16 vid
, last_visited_vid
, old_pvid
;
373 enum mlxsw_reg_svfa_mt mt
;
376 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
377 * not bridged, then packets ingressing through the port with
378 * the specified VIDs will be directed to CPU.
380 if (!mlxsw_sp_port
->bridged
)
381 return mlxsw_sp_port_add_vids(dev
, vid_begin
, vid_end
);
383 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
384 if (!test_bit(vid
, mlxsw_sp
->active_fids
)) {
385 err
= mlxsw_sp_fid_create(mlxsw_sp
, vid
);
387 netdev_err(dev
, "Failed to create FID=%d\n",
392 /* When creating a FID, we set a VID to FID mapping
393 * regardless of the port's mode.
395 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
396 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
,
399 netdev_err(dev
, "Failed to create FID=VID=%d mapping\n",
401 goto err_port_vid_to_fid_set
;
406 /* Set FID mapping according to port's mode */
407 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
408 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, vid
);
410 netdev_err(dev
, "Failed to map FID=%d", vid
);
411 last_visited_vid
= --vid
;
412 goto err_port_fid_map
;
416 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
419 netdev_err(dev
, "Failed to configure flooding\n");
420 goto err_port_flood_set
;
423 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
424 true, flag_untagged
);
426 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
428 goto err_port_vlans_set
;
431 old_pvid
= mlxsw_sp_port
->pvid
;
432 if (flag_pvid
&& old_pvid
!= vid_begin
) {
433 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
435 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
436 goto err_port_pvid_set
;
438 mlxsw_sp_port
->pvid
= vid_begin
;
441 /* Changing activity bits only if HW operation succeded */
442 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
443 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
445 /* STP state change must be done after we set active VLANs */
446 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
447 mlxsw_sp_port
->stp_state
);
449 netdev_err(dev
, "Failed to set STP state\n");
450 goto err_port_stp_state_set
;
455 err_port_vid_to_fid_set
:
456 mlxsw_sp_fid_destroy(mlxsw_sp
, vid
);
459 err_port_stp_state_set
:
460 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
461 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
462 if (old_pvid
!= mlxsw_sp_port
->pvid
)
463 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
465 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
468 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
471 last_visited_vid
= vid_end
;
473 for (vid
= last_visited_vid
; vid
>= vid_begin
; vid
--)
474 mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
478 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
479 const struct switchdev_obj_port_vlan
*vlan
,
480 struct switchdev_trans
*trans
)
482 bool untagged_flag
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
483 bool pvid_flag
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
485 if (switchdev_trans_ph_prepare(trans
))
488 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
489 vlan
->vid_begin
, vlan
->vid_end
,
490 untagged_flag
, pvid_flag
);
493 static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port
*mlxsw_sp_port
,
494 const char *mac
, u16 vid
, bool adding
,
497 enum mlxsw_reg_sfd_rec_policy policy
;
498 enum mlxsw_reg_sfd_op op
;
503 vid
= mlxsw_sp_port
->pvid
;
505 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
509 policy
= dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
510 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
511 op
= adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
512 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
513 mlxsw_reg_sfd_pack(sfd_pl
, op
, 0);
514 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, policy
,
515 mac
, vid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
516 mlxsw_sp_port
->local_port
);
517 err
= mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(sfd
),
525 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
526 const struct switchdev_obj_port_fdb
*fdb
,
527 struct switchdev_trans
*trans
)
529 if (switchdev_trans_ph_prepare(trans
))
532 return mlxsw_sp_port_fdb_op(mlxsw_sp_port
, fdb
->addr
, fdb
->vid
,
536 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
537 const struct switchdev_obj
*obj
,
538 struct switchdev_trans
*trans
)
540 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
544 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
545 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
546 SWITCHDEV_OBJ_PORT_VLAN(obj
),
549 case SWITCHDEV_OBJ_ID_PORT_FDB
:
550 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
551 SWITCHDEV_OBJ_PORT_FDB(obj
),
562 static int mlxsw_sp_port_kill_vids(struct net_device
*dev
, u16 vid_begin
,
568 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
569 err
= mlxsw_sp_port_kill_vid(dev
, 0, vid
);
577 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
578 u16 vid_begin
, u16 vid_end
, bool init
)
580 struct net_device
*dev
= mlxsw_sp_port
->dev
;
584 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
585 * not bridged, then prevent packets ingressing through the
586 * port with the specified VIDs from being trapped to CPU.
588 if (!init
&& !mlxsw_sp_port
->bridged
)
589 return mlxsw_sp_port_kill_vids(dev
, vid_begin
, vid_end
);
591 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
594 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
599 pvid
= mlxsw_sp_port
->pvid
;
600 if (pvid
>= vid_begin
&& pvid
<= vid_end
&& pvid
!= 1) {
601 /* Default VLAN is always 1 */
602 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
604 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
607 mlxsw_sp_port
->pvid
= 1;
613 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
616 netdev_err(dev
, "Failed to clear flooding\n");
620 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
621 /* Remove FID mapping in case of Virtual mode */
622 err
= mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
624 netdev_err(dev
, "Failed to unmap FID=%d", vid
);
630 /* Changing activity bits only if HW operation succeded */
631 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
632 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
637 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
638 const struct switchdev_obj_port_vlan
*vlan
)
640 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
641 vlan
->vid_begin
, vlan
->vid_end
, false);
645 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
646 const struct switchdev_obj_port_fdb
*fdb
)
648 return mlxsw_sp_port_fdb_op(mlxsw_sp_port
, fdb
->addr
, fdb
->vid
,
652 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
653 const struct switchdev_obj
*obj
)
655 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
659 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
660 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
661 SWITCHDEV_OBJ_PORT_VLAN(obj
));
663 case SWITCHDEV_OBJ_ID_PORT_FDB
:
664 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
665 SWITCHDEV_OBJ_PORT_FDB(obj
));
675 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
676 struct switchdev_obj_port_fdb
*fdb
,
677 switchdev_obj_dump_cb_t
*cb
)
688 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
692 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
694 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
695 err
= mlxsw_reg_query(mlxsw_sp_port
->mlxsw_sp
->core
,
696 MLXSW_REG(sfd
), sfd_pl
);
700 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
702 /* Even in case of error, we have to run the dump to the end
703 * so the session in firmware is finished.
708 for (i
= 0; i
< num_rec
; i
++) {
709 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
710 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
711 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &vid
,
713 if (local_port
== mlxsw_sp_port
->local_port
) {
714 ether_addr_copy(fdb
->addr
, mac
);
715 fdb
->ndm_state
= NUD_REACHABLE
;
723 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
727 return stored_err
? stored_err
: err
;
730 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
731 struct switchdev_obj_port_vlan
*vlan
,
732 switchdev_obj_dump_cb_t
*cb
)
737 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
739 if (vid
== mlxsw_sp_port
->pvid
)
740 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
741 vlan
->vid_begin
= vid
;
743 err
= cb(&vlan
->obj
);
750 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
751 struct switchdev_obj
*obj
,
752 switchdev_obj_dump_cb_t
*cb
)
754 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
758 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
759 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
760 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
762 case SWITCHDEV_OBJ_ID_PORT_FDB
:
763 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
764 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
);
774 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
775 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
776 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
777 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
778 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
779 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
782 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
783 char *sfn_pl
, int rec_index
,
786 struct mlxsw_sp_port
*mlxsw_sp_port
;
792 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &vid
, &local_port
);
793 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
794 if (!mlxsw_sp_port
) {
795 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
799 err
= mlxsw_sp_port_fdb_op(mlxsw_sp_port
, mac
, vid
,
800 adding
&& mlxsw_sp_port
->learning
, true);
803 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
807 if (mlxsw_sp_port
->learning
&& mlxsw_sp_port
->learning_sync
) {
808 struct switchdev_notifier_fdb_info info
;
809 unsigned long notifier_type
;
813 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
814 call_switchdev_notifiers(notifier_type
, mlxsw_sp_port
->dev
,
819 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
820 char *sfn_pl
, int rec_index
)
822 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
823 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
824 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
827 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
828 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
834 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
836 schedule_delayed_work(&mlxsw_sp
->fdb_notify
.dw
,
837 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
840 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
842 struct mlxsw_sp
*mlxsw_sp
;
848 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
852 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
855 mlxsw_reg_sfn_pack(sfn_pl
);
856 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
858 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
861 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
862 for (i
= 0; i
< num_rec
; i
++)
863 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
868 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
871 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
875 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
877 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
880 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
881 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
882 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
886 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
888 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
891 static void mlxsw_sp_fids_fini(struct mlxsw_sp
*mlxsw_sp
)
895 for_each_set_bit(fid
, mlxsw_sp
->active_fids
, VLAN_N_VID
)
896 mlxsw_sp_fid_destroy(mlxsw_sp
, fid
);
899 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
901 return mlxsw_sp_fdb_init(mlxsw_sp
);
904 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
906 mlxsw_sp_fdb_fini(mlxsw_sp
);
907 mlxsw_sp_fids_fini(mlxsw_sp
);
910 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
912 struct net_device
*dev
= mlxsw_sp_port
->dev
;
915 /* Allow only untagged packets to ingress and tag them internally
918 mlxsw_sp_port
->pvid
= 1;
919 err
= __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, 0, VLAN_N_VID
, true);
921 netdev_err(dev
, "Unable to init VLANs\n");
925 /* Add implicit VLAN interface in the device, so that untagged
926 * packets will be classified to the default vFID.
928 err
= mlxsw_sp_port_add_vid(dev
, 0, 1);
930 netdev_err(dev
, "Failed to configure default vFID\n");
935 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
937 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
940 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)