2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
54 static struct mlxsw_sp_port
*
55 mlxsw_sp_port_orig_get(struct net_device
*dev
,
56 struct mlxsw_sp_port
*mlxsw_sp_port
)
58 struct mlxsw_sp_port
*mlxsw_sp_vport
;
61 if (!is_vlan_dev(dev
))
64 vid
= vlan_dev_vlan_id(dev
);
65 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
66 WARN_ON(!mlxsw_sp_vport
);
68 return mlxsw_sp_vport
;
71 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
72 struct switchdev_attr
*attr
)
74 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
75 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
77 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
82 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
83 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
84 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
87 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
88 attr
->u
.brport_flags
=
89 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
90 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
91 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
100 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
103 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
104 enum mlxsw_reg_spms_state spms_state
;
110 case BR_STATE_DISABLED
: /* fall-through */
111 case BR_STATE_FORWARDING
:
112 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
114 case BR_STATE_LISTENING
: /* fall-through */
115 case BR_STATE_LEARNING
:
116 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
118 case BR_STATE_BLOCKING
:
119 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
125 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
128 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
130 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
131 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
132 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
134 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
135 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
138 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
143 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
144 struct switchdev_trans
*trans
,
147 if (switchdev_trans_ph_prepare(trans
))
150 mlxsw_sp_port
->stp_state
= state
;
151 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
154 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid
)
156 return vfid
>= MLXSW_SP_VFID_PORT_MAX
;
159 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
160 u16 idx_begin
, u16 idx_end
, bool set
,
163 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
164 u16 local_port
= mlxsw_sp_port
->local_port
;
165 enum mlxsw_flood_table_type table_type
;
166 u16 range
= idx_end
- idx_begin
+ 1;
170 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
171 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
172 if (mlxsw_sp_vfid_is_vport_br(idx_begin
))
173 local_port
= mlxsw_sp_port
->local_port
;
175 local_port
= MLXSW_PORT_CPU_PORT
;
177 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
180 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
184 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
185 table_type
, range
, local_port
, set
);
186 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
190 /* Flooding control allows one to decide whether a given port will
191 * flood unicast traffic for which there is no FDB entry.
196 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, idx_begin
,
197 table_type
, range
, local_port
, set
);
198 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
205 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
208 struct net_device
*dev
= mlxsw_sp_port
->dev
;
209 u16 vid
, last_visited_vid
;
212 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
213 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
215 return __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vfid
, vfid
,
219 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
220 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
223 last_visited_vid
= vid
;
224 goto err_port_flood_set
;
231 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
232 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
233 netdev_err(dev
, "Failed to configure unicast flooding\n");
237 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 vfid
,
238 bool set
, bool only_uc
)
240 /* In case of vFIDs, index into the flooding table is relative to
241 * the start of the vFIDs range.
243 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
,
247 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
248 struct switchdev_trans
*trans
,
249 unsigned long brport_flags
)
251 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
255 if (!mlxsw_sp_port
->bridged
)
258 if (switchdev_trans_ph_prepare(trans
))
261 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
262 set
= mlxsw_sp_port
->uc_flood
? false : true;
263 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
268 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
269 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
270 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
275 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
277 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
280 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
281 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
284 mlxsw_sp
->ageing_time
= ageing_time
;
288 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
289 struct switchdev_trans
*trans
,
290 unsigned long ageing_clock_t
)
292 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
293 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
294 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
296 if (switchdev_trans_ph_prepare(trans
))
299 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
302 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
303 struct switchdev_trans
*trans
,
304 struct net_device
*orig_dev
,
307 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
309 /* SWITCHDEV_TRANS_PREPARE phase */
310 if ((!vlan_enabled
) && (mlxsw_sp
->master_bridge
.dev
== orig_dev
)) {
311 netdev_err(mlxsw_sp_port
->dev
, "Bridge must be vlan-aware\n");
318 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
319 const struct switchdev_attr
*attr
,
320 struct switchdev_trans
*trans
)
322 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
325 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
330 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
331 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
334 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
335 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
336 attr
->u
.brport_flags
);
338 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
339 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
340 attr
->u
.ageing_time
);
342 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
343 err
= mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port
, trans
,
345 attr
->u
.vlan_filtering
);
355 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
357 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
358 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
360 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
361 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
364 static int mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
366 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
369 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, fid
);
370 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
375 set_bit(fid
, mlxsw_sp
->active_fids
);
379 static void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
381 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
383 clear_bit(fid
, mlxsw_sp
->active_fids
);
385 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
,
387 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
390 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
392 enum mlxsw_reg_svfa_mt mt
;
394 if (!list_empty(&mlxsw_sp_port
->vports_list
))
395 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
397 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
399 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, fid
, fid
);
402 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
404 enum mlxsw_reg_svfa_mt mt
;
406 if (list_empty(&mlxsw_sp_port
->vports_list
))
409 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
410 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, fid
, fid
);
413 static int mlxsw_sp_port_add_vids(struct net_device
*dev
, u16 vid_begin
,
419 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
420 err
= mlxsw_sp_port_add_vid(dev
, 0, vid
);
422 goto err_port_add_vid
;
427 for (vid
--; vid
>= vid_begin
; vid
--)
428 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
432 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
433 u16 vid_begin
, u16 vid_end
, bool is_member
,
439 for (vid
= vid_begin
; vid
<= vid_end
;
440 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
441 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
444 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
445 is_member
, untagged
);
453 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
454 u16 vid_begin
, u16 vid_end
,
455 bool flag_untagged
, bool flag_pvid
)
457 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
458 struct net_device
*dev
= mlxsw_sp_port
->dev
;
459 u16 vid
, last_visited_vid
, old_pvid
;
460 enum mlxsw_reg_svfa_mt mt
;
463 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
464 * not bridged, then packets ingressing through the port with
465 * the specified VIDs will be directed to CPU.
467 if (!mlxsw_sp_port
->bridged
)
468 return mlxsw_sp_port_add_vids(dev
, vid_begin
, vid_end
);
470 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
471 if (!test_bit(vid
, mlxsw_sp
->active_fids
)) {
472 err
= mlxsw_sp_fid_create(mlxsw_sp
, vid
);
474 netdev_err(dev
, "Failed to create FID=%d\n",
479 /* When creating a FID, we set a VID to FID mapping
480 * regardless of the port's mode.
482 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
483 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
,
486 netdev_err(dev
, "Failed to create FID=VID=%d mapping\n",
488 goto err_port_vid_to_fid_set
;
493 /* Set FID mapping according to port's mode */
494 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
495 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, vid
);
497 netdev_err(dev
, "Failed to map FID=%d", vid
);
498 last_visited_vid
= --vid
;
499 goto err_port_fid_map
;
503 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
506 netdev_err(dev
, "Failed to configure flooding\n");
507 goto err_port_flood_set
;
510 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
511 true, flag_untagged
);
513 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
515 goto err_port_vlans_set
;
518 old_pvid
= mlxsw_sp_port
->pvid
;
519 if (flag_pvid
&& old_pvid
!= vid_begin
) {
520 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
522 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
523 goto err_port_pvid_set
;
525 mlxsw_sp_port
->pvid
= vid_begin
;
528 /* Changing activity bits only if HW operation succeded */
529 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
530 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
532 set_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
534 clear_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
537 /* STP state change must be done after we set active VLANs */
538 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
539 mlxsw_sp_port
->stp_state
);
541 netdev_err(dev
, "Failed to set STP state\n");
542 goto err_port_stp_state_set
;
547 err_port_vid_to_fid_set
:
548 mlxsw_sp_fid_destroy(mlxsw_sp
, vid
);
551 err_port_stp_state_set
:
552 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
553 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
554 if (old_pvid
!= mlxsw_sp_port
->pvid
)
555 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
557 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
560 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
563 last_visited_vid
= vid_end
;
565 for (vid
= last_visited_vid
; vid
>= vid_begin
; vid
--)
566 mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
570 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
571 const struct switchdev_obj_port_vlan
*vlan
,
572 struct switchdev_trans
*trans
)
574 bool flag_untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
575 bool flag_pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
577 if (switchdev_trans_ph_prepare(trans
))
580 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
581 vlan
->vid_begin
, vlan
->vid_end
,
582 flag_untagged
, flag_pvid
);
585 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
587 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
588 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
591 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
593 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
594 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
597 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port
*mlxsw_sp_port
,
598 const char *mac
, u16 fid
, bool adding
,
601 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
605 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
609 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
610 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
611 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
612 mlxsw_sp_port
->local_port
);
613 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
619 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
620 const char *mac
, u16 fid
, u16 lag_vid
,
621 bool adding
, bool dynamic
)
626 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
630 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
631 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
632 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
634 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
641 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
642 const struct switchdev_obj_port_fdb
*fdb
,
643 struct switchdev_trans
*trans
)
648 if (switchdev_trans_ph_prepare(trans
))
651 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
652 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
654 fid
= mlxsw_sp_vfid_to_fid(vfid
);
655 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
659 fid
= mlxsw_sp_port
->pvid
;
661 if (!mlxsw_sp_port
->lagged
)
662 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
,
663 fdb
->addr
, fid
, true, false);
665 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
666 mlxsw_sp_port
->lag_id
,
667 fdb
->addr
, fid
, lag_vid
,
671 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
672 const struct switchdev_obj
*obj
,
673 struct switchdev_trans
*trans
)
675 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
678 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
683 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
684 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
687 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
688 SWITCHDEV_OBJ_PORT_VLAN(obj
),
691 case SWITCHDEV_OBJ_ID_PORT_FDB
:
692 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
693 SWITCHDEV_OBJ_PORT_FDB(obj
),
704 static int mlxsw_sp_port_kill_vids(struct net_device
*dev
, u16 vid_begin
,
710 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
711 err
= mlxsw_sp_port_kill_vid(dev
, 0, vid
);
719 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
720 u16 vid_begin
, u16 vid_end
, bool init
)
722 struct net_device
*dev
= mlxsw_sp_port
->dev
;
726 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
727 * not bridged, then prevent packets ingressing through the
728 * port with the specified VIDs from being trapped to CPU.
730 if (!init
&& !mlxsw_sp_port
->bridged
)
731 return mlxsw_sp_port_kill_vids(dev
, vid_begin
, vid_end
);
733 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
736 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
741 pvid
= mlxsw_sp_port
->pvid
;
742 if (pvid
>= vid_begin
&& pvid
<= vid_end
&& pvid
!= 1) {
743 /* Default VLAN is always 1 */
744 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
746 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
749 mlxsw_sp_port
->pvid
= 1;
755 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
758 netdev_err(dev
, "Failed to clear flooding\n");
762 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
763 /* Remove FID mapping in case of Virtual mode */
764 err
= mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
766 netdev_err(dev
, "Failed to unmap FID=%d", vid
);
772 /* Changing activity bits only if HW operation succeded */
773 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
774 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
779 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
780 const struct switchdev_obj_port_vlan
*vlan
)
782 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
783 vlan
->vid_begin
, vlan
->vid_end
, false);
787 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
788 const struct switchdev_obj_port_fdb
*fdb
)
793 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
794 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
796 fid
= mlxsw_sp_vfid_to_fid(vfid
);
797 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
800 if (!mlxsw_sp_port
->lagged
)
801 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
,
805 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
806 mlxsw_sp_port
->lag_id
,
807 fdb
->addr
, fid
, lag_vid
,
811 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
812 const struct switchdev_obj
*obj
)
814 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
817 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
822 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
823 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
826 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
827 SWITCHDEV_OBJ_PORT_VLAN(obj
));
829 case SWITCHDEV_OBJ_ID_PORT_FDB
:
830 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
831 SWITCHDEV_OBJ_PORT_FDB(obj
));
841 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
844 struct mlxsw_sp_port
*mlxsw_sp_port
;
847 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
848 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
850 return mlxsw_sp_port
;
855 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
856 struct switchdev_obj_port_fdb
*fdb
,
857 switchdev_obj_dump_cb_t
*cb
)
859 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
860 u16 vport_vid
= 0, vport_fid
= 0;
871 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
875 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
878 tmp
= mlxsw_sp_vport_vfid_get(mlxsw_sp_port
);
879 vport_fid
= mlxsw_sp_vfid_to_fid(tmp
);
880 vport_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
883 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
885 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
886 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
890 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
892 /* Even in case of error, we have to run the dump to the end
893 * so the session in firmware is finished.
898 for (i
= 0; i
< num_rec
; i
++) {
899 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
900 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
901 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &fid
,
903 if (local_port
== mlxsw_sp_port
->local_port
) {
904 if (vport_fid
&& vport_fid
!= fid
)
907 fdb
->vid
= vport_vid
;
910 ether_addr_copy(fdb
->addr
, mac
);
911 fdb
->ndm_state
= NUD_REACHABLE
;
917 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
918 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
921 mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
)) {
922 if (vport_fid
&& vport_fid
!= fid
)
925 fdb
->vid
= vport_vid
;
928 ether_addr_copy(fdb
->addr
, mac
);
929 fdb
->ndm_state
= NUD_REACHABLE
;
937 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
941 return stored_err
? stored_err
: err
;
944 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
945 struct switchdev_obj_port_vlan
*vlan
,
946 switchdev_obj_dump_cb_t
*cb
)
951 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
953 vlan
->vid_begin
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
954 vlan
->vid_end
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
955 return cb(&vlan
->obj
);
958 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
960 if (vid
== mlxsw_sp_port
->pvid
)
961 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
962 if (test_bit(vid
, mlxsw_sp_port
->untagged_vlans
))
963 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
964 vlan
->vid_begin
= vid
;
966 err
= cb(&vlan
->obj
);
973 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
974 struct switchdev_obj
*obj
,
975 switchdev_obj_dump_cb_t
*cb
)
977 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
980 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
985 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
986 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
987 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
989 case SWITCHDEV_OBJ_ID_PORT_FDB
:
990 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
991 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
);
1001 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
1002 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
1003 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
1004 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
1005 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
1006 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
1009 static void mlxsw_sp_fdb_call_notifiers(bool learning
, bool learning_sync
,
1010 bool adding
, char *mac
, u16 vid
,
1011 struct net_device
*dev
)
1013 struct switchdev_notifier_fdb_info info
;
1014 unsigned long notifier_type
;
1016 if (learning
&& learning_sync
) {
1019 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
1020 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
1024 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
1025 char *sfn_pl
, int rec_index
,
1028 struct mlxsw_sp_port
*mlxsw_sp_port
;
1034 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1035 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1036 if (!mlxsw_sp_port
) {
1037 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1041 if (mlxsw_sp_fid_is_vfid(fid
)) {
1042 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1043 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1045 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1047 if (!mlxsw_sp_vport
) {
1048 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1052 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1053 /* Override the physical port with the vPort. */
1054 mlxsw_sp_port
= mlxsw_sp_vport
;
1059 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
, mac
, fid
,
1060 adding
&& mlxsw_sp_port
->learning
, true);
1062 if (net_ratelimit())
1063 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1067 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
1068 mlxsw_sp_port
->learning_sync
,
1069 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
1072 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1073 char *sfn_pl
, int rec_index
,
1076 struct mlxsw_sp_port
*mlxsw_sp_port
;
1083 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1084 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1085 if (!mlxsw_sp_port
) {
1086 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1090 if (mlxsw_sp_fid_is_vfid(fid
)) {
1091 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
1092 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1094 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port
,
1096 if (!mlxsw_sp_vport
) {
1097 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1101 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1103 /* Override the physical port with the vPort. */
1104 mlxsw_sp_port
= mlxsw_sp_vport
;
1109 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1110 adding
&& mlxsw_sp_port
->learning
,
1113 if (net_ratelimit())
1114 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1118 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
1119 mlxsw_sp_port
->learning_sync
,
1121 mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
);
1124 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1125 char *sfn_pl
, int rec_index
)
1127 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1128 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1129 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1132 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1133 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1136 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1137 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1140 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1141 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1147 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1149 schedule_delayed_work(&mlxsw_sp
->fdb_notify
.dw
,
1150 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
1153 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1155 struct mlxsw_sp
*mlxsw_sp
;
1161 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1165 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
1168 mlxsw_reg_sfn_pack(sfn_pl
);
1169 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1171 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1174 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1175 for (i
= 0; i
< num_rec
; i
++)
1176 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1181 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1184 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1188 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1190 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1193 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1194 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1195 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1199 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1201 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1204 static void mlxsw_sp_fids_fini(struct mlxsw_sp
*mlxsw_sp
)
1208 for_each_set_bit(fid
, mlxsw_sp
->active_fids
, VLAN_N_VID
)
1209 mlxsw_sp_fid_destroy(mlxsw_sp
, fid
);
1212 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1214 return mlxsw_sp_fdb_init(mlxsw_sp
);
1217 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1219 mlxsw_sp_fdb_fini(mlxsw_sp
);
1220 mlxsw_sp_fids_fini(mlxsw_sp
);
1223 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1225 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1228 /* Allow only untagged packets to ingress and tag them internally
1231 mlxsw_sp_port
->pvid
= 1;
1232 err
= __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, 0, VLAN_N_VID
- 1,
1235 netdev_err(dev
, "Unable to init VLANs\n");
1239 /* Add implicit VLAN interface in the device, so that untagged
1240 * packets will be classified to the default vFID.
1242 err
= mlxsw_sp_port_add_vid(dev
, 0, 1);
1244 netdev_err(dev
, "Failed to configure default vFID\n");
1249 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1251 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1254 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)