614ef57ceefa0bd0fee78c1b271fbb569e8853ad
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
49
50 #include "spectrum.h"
51 #include "core.h"
52 #include "reg.h"
53
54 static struct mlxsw_sp_port *
55 mlxsw_sp_port_orig_get(struct net_device *dev,
56 struct mlxsw_sp_port *mlxsw_sp_port)
57 {
58 struct mlxsw_sp_port *mlxsw_sp_vport;
59 u16 vid;
60
61 if (!is_vlan_dev(dev))
62 return mlxsw_sp_port;
63
64 vid = vlan_dev_vlan_id(dev);
65 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
66 WARN_ON(!mlxsw_sp_vport);
67
68 return mlxsw_sp_vport;
69 }
70
71 static int mlxsw_sp_port_attr_get(struct net_device *dev,
72 struct switchdev_attr *attr)
73 {
74 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
75 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
76
77 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
78 if (!mlxsw_sp_port)
79 return -EINVAL;
80
81 switch (attr->id) {
82 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
83 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
84 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
85 attr->u.ppid.id_len);
86 break;
87 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
88 attr->u.brport_flags =
89 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
90 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
91 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
92 break;
93 default:
94 return -EOPNOTSUPP;
95 }
96
97 return 0;
98 }
99
100 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
101 u8 state)
102 {
103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
104 enum mlxsw_reg_spms_state spms_state;
105 char *spms_pl;
106 u16 vid;
107 int err;
108
109 switch (state) {
110 case BR_STATE_DISABLED: /* fall-through */
111 case BR_STATE_FORWARDING:
112 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
113 break;
114 case BR_STATE_LISTENING: /* fall-through */
115 case BR_STATE_LEARNING:
116 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
117 break;
118 case BR_STATE_BLOCKING:
119 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
120 break;
121 default:
122 BUG();
123 }
124
125 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
126 if (!spms_pl)
127 return -ENOMEM;
128 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
129
130 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
131 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
132 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
133 } else {
134 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
135 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
136 }
137
138 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
139 kfree(spms_pl);
140 return err;
141 }
142
143 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
144 struct switchdev_trans *trans,
145 u8 state)
146 {
147 if (switchdev_trans_ph_prepare(trans))
148 return 0;
149
150 mlxsw_sp_port->stp_state = state;
151 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
152 }
153
154 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
155 {
156 return vfid >= MLXSW_SP_VFID_PORT_MAX;
157 }
158
159 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
160 u16 idx_begin, u16 idx_end, bool set,
161 bool only_uc)
162 {
163 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
164 u16 local_port = mlxsw_sp_port->local_port;
165 enum mlxsw_flood_table_type table_type;
166 u16 range = idx_end - idx_begin + 1;
167 char *sftr_pl;
168 int err;
169
170 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
171 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
172 if (mlxsw_sp_vfid_is_vport_br(idx_begin))
173 local_port = mlxsw_sp_port->local_port;
174 else
175 local_port = MLXSW_PORT_CPU_PORT;
176 } else {
177 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
178 }
179
180 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
181 if (!sftr_pl)
182 return -ENOMEM;
183
184 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
185 table_type, range, local_port, set);
186 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
187 if (err)
188 goto buffer_out;
189
190 /* Flooding control allows one to decide whether a given port will
191 * flood unicast traffic for which there is no FDB entry.
192 */
193 if (only_uc)
194 goto buffer_out;
195
196 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
197 table_type, range, local_port, set);
198 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
199
200 buffer_out:
201 kfree(sftr_pl);
202 return err;
203 }
204
205 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
206 bool set)
207 {
208 struct net_device *dev = mlxsw_sp_port->dev;
209 u16 vid, last_visited_vid;
210 int err;
211
212 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
213 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
214
215 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
216 set, true);
217 }
218
219 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
220 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
221 true);
222 if (err) {
223 last_visited_vid = vid;
224 goto err_port_flood_set;
225 }
226 }
227
228 return 0;
229
230 err_port_flood_set:
231 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
232 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
233 netdev_err(dev, "Failed to configure unicast flooding\n");
234 return err;
235 }
236
237 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
238 bool set, bool only_uc)
239 {
240 /* In case of vFIDs, index into the flooding table is relative to
241 * the start of the vFIDs range.
242 */
243 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
244 only_uc);
245 }
246
247 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
248 struct switchdev_trans *trans,
249 unsigned long brport_flags)
250 {
251 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
252 bool set;
253 int err;
254
255 if (!mlxsw_sp_port->bridged)
256 return -EINVAL;
257
258 if (switchdev_trans_ph_prepare(trans))
259 return 0;
260
261 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
262 set = mlxsw_sp_port->uc_flood ? false : true;
263 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
264 if (err)
265 return err;
266 }
267
268 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
269 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
270 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
271
272 return 0;
273 }
274
275 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
276 {
277 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
278 int err;
279
280 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
281 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
282 if (err)
283 return err;
284 mlxsw_sp->ageing_time = ageing_time;
285 return 0;
286 }
287
288 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
289 struct switchdev_trans *trans,
290 unsigned long ageing_clock_t)
291 {
292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
293 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
294 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
295
296 if (switchdev_trans_ph_prepare(trans))
297 return 0;
298
299 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
300 }
301
302 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
303 struct switchdev_trans *trans,
304 struct net_device *orig_dev,
305 bool vlan_enabled)
306 {
307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
308
309 /* SWITCHDEV_TRANS_PREPARE phase */
310 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
311 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
312 return -EINVAL;
313 }
314
315 return 0;
316 }
317
318 static int mlxsw_sp_port_attr_set(struct net_device *dev,
319 const struct switchdev_attr *attr,
320 struct switchdev_trans *trans)
321 {
322 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
323 int err = 0;
324
325 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
326 if (!mlxsw_sp_port)
327 return -EINVAL;
328
329 switch (attr->id) {
330 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
331 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
332 attr->u.stp_state);
333 break;
334 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
335 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
336 attr->u.brport_flags);
337 break;
338 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
339 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
340 attr->u.ageing_time);
341 break;
342 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
343 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
344 attr->orig_dev,
345 attr->u.vlan_filtering);
346 break;
347 default:
348 err = -EOPNOTSUPP;
349 break;
350 }
351
352 return err;
353 }
354
355 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
356 {
357 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
358 char spvid_pl[MLXSW_REG_SPVID_LEN];
359
360 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
361 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
362 }
363
364 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
365 {
366 char sfmr_pl[MLXSW_REG_SFMR_LEN];
367 int err;
368
369 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
370 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
371
372 if (err)
373 return err;
374
375 set_bit(fid, mlxsw_sp->active_fids);
376 return 0;
377 }
378
379 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
380 {
381 char sfmr_pl[MLXSW_REG_SFMR_LEN];
382
383 clear_bit(fid, mlxsw_sp->active_fids);
384
385 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
386 fid, fid);
387 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
388 }
389
390 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
391 {
392 enum mlxsw_reg_svfa_mt mt;
393
394 if (!list_empty(&mlxsw_sp_port->vports_list))
395 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
396 else
397 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
398
399 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
400 }
401
402 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
403 {
404 enum mlxsw_reg_svfa_mt mt;
405
406 if (list_empty(&mlxsw_sp_port->vports_list))
407 return 0;
408
409 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
410 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
411 }
412
413 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
414 u16 vid_end)
415 {
416 u16 vid;
417 int err;
418
419 for (vid = vid_begin; vid <= vid_end; vid++) {
420 err = mlxsw_sp_port_add_vid(dev, 0, vid);
421 if (err)
422 goto err_port_add_vid;
423 }
424 return 0;
425
426 err_port_add_vid:
427 for (vid--; vid >= vid_begin; vid--)
428 mlxsw_sp_port_kill_vid(dev, 0, vid);
429 return err;
430 }
431
432 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
433 u16 vid_begin, u16 vid_end, bool is_member,
434 bool untagged)
435 {
436 u16 vid, vid_e;
437 int err;
438
439 for (vid = vid_begin; vid <= vid_end;
440 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
441 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
442 vid_end);
443
444 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
445 is_member, untagged);
446 if (err)
447 return err;
448 }
449
450 return 0;
451 }
452
453 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
454 u16 vid_begin, u16 vid_end,
455 bool flag_untagged, bool flag_pvid)
456 {
457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
458 struct net_device *dev = mlxsw_sp_port->dev;
459 u16 vid, last_visited_vid, old_pvid;
460 enum mlxsw_reg_svfa_mt mt;
461 int err;
462
463 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
464 * not bridged, then packets ingressing through the port with
465 * the specified VIDs will be directed to CPU.
466 */
467 if (!mlxsw_sp_port->bridged)
468 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
469
470 for (vid = vid_begin; vid <= vid_end; vid++) {
471 if (!test_bit(vid, mlxsw_sp->active_fids)) {
472 err = mlxsw_sp_fid_create(mlxsw_sp, vid);
473 if (err) {
474 netdev_err(dev, "Failed to create FID=%d\n",
475 vid);
476 return err;
477 }
478
479 /* When creating a FID, we set a VID to FID mapping
480 * regardless of the port's mode.
481 */
482 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
483 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
484 true, vid, vid);
485 if (err) {
486 netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
487 vid);
488 goto err_port_vid_to_fid_set;
489 }
490 }
491 }
492
493 /* Set FID mapping according to port's mode */
494 for (vid = vid_begin; vid <= vid_end; vid++) {
495 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
496 if (err) {
497 netdev_err(dev, "Failed to map FID=%d", vid);
498 last_visited_vid = --vid;
499 goto err_port_fid_map;
500 }
501 }
502
503 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
504 true, false);
505 if (err) {
506 netdev_err(dev, "Failed to configure flooding\n");
507 goto err_port_flood_set;
508 }
509
510 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
511 true, flag_untagged);
512 if (err) {
513 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
514 vid_end);
515 goto err_port_vlans_set;
516 }
517
518 old_pvid = mlxsw_sp_port->pvid;
519 if (flag_pvid && old_pvid != vid_begin) {
520 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
521 if (err) {
522 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
523 goto err_port_pvid_set;
524 }
525 mlxsw_sp_port->pvid = vid_begin;
526 }
527
528 /* Changing activity bits only if HW operation succeded */
529 for (vid = vid_begin; vid <= vid_end; vid++) {
530 set_bit(vid, mlxsw_sp_port->active_vlans);
531 if (flag_untagged)
532 set_bit(vid, mlxsw_sp_port->untagged_vlans);
533 else
534 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
535 }
536
537 /* STP state change must be done after we set active VLANs */
538 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
539 mlxsw_sp_port->stp_state);
540 if (err) {
541 netdev_err(dev, "Failed to set STP state\n");
542 goto err_port_stp_state_set;
543 }
544
545 return 0;
546
547 err_port_vid_to_fid_set:
548 mlxsw_sp_fid_destroy(mlxsw_sp, vid);
549 return err;
550
551 err_port_stp_state_set:
552 for (vid = vid_begin; vid <= vid_end; vid++)
553 clear_bit(vid, mlxsw_sp_port->active_vlans);
554 if (old_pvid != mlxsw_sp_port->pvid)
555 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
556 err_port_pvid_set:
557 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
558 false);
559 err_port_vlans_set:
560 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
561 false);
562 err_port_flood_set:
563 last_visited_vid = vid_end;
564 err_port_fid_map:
565 for (vid = last_visited_vid; vid >= vid_begin; vid--)
566 mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
567 return err;
568 }
569
570 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
571 const struct switchdev_obj_port_vlan *vlan,
572 struct switchdev_trans *trans)
573 {
574 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
575 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
576
577 if (switchdev_trans_ph_prepare(trans))
578 return 0;
579
580 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
581 vlan->vid_begin, vlan->vid_end,
582 flag_untagged, flag_pvid);
583 }
584
585 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
586 {
587 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
588 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
589 }
590
591 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
592 {
593 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
594 MLXSW_REG_SFD_OP_WRITE_REMOVE;
595 }
596
597 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port,
598 const char *mac, u16 fid, bool adding,
599 bool dynamic)
600 {
601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
602 char *sfd_pl;
603 int err;
604
605 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
606 if (!sfd_pl)
607 return -ENOMEM;
608
609 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
610 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
611 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
612 mlxsw_sp_port->local_port);
613 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
614 kfree(sfd_pl);
615
616 return err;
617 }
618
619 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
620 const char *mac, u16 fid, u16 lag_vid,
621 bool adding, bool dynamic)
622 {
623 char *sfd_pl;
624 int err;
625
626 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
627 if (!sfd_pl)
628 return -ENOMEM;
629
630 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
631 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
632 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
633 lag_vid, lag_id);
634 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
635 kfree(sfd_pl);
636
637 return err;
638 }
639
640 static int
641 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
642 const struct switchdev_obj_port_fdb *fdb,
643 struct switchdev_trans *trans)
644 {
645 u16 fid = fdb->vid;
646 u16 lag_vid = 0;
647
648 if (switchdev_trans_ph_prepare(trans))
649 return 0;
650
651 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
652 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
653
654 fid = mlxsw_sp_vfid_to_fid(vfid);
655 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
656 }
657
658 if (!fid)
659 fid = mlxsw_sp_port->pvid;
660
661 if (!mlxsw_sp_port->lagged)
662 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
663 fdb->addr, fid, true, false);
664 else
665 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
666 mlxsw_sp_port->lag_id,
667 fdb->addr, fid, lag_vid,
668 true, false);
669 }
670
671 static int mlxsw_sp_port_obj_add(struct net_device *dev,
672 const struct switchdev_obj *obj,
673 struct switchdev_trans *trans)
674 {
675 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
676 int err = 0;
677
678 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
679 if (!mlxsw_sp_port)
680 return -EINVAL;
681
682 switch (obj->id) {
683 case SWITCHDEV_OBJ_ID_PORT_VLAN:
684 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
685 return 0;
686
687 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
688 SWITCHDEV_OBJ_PORT_VLAN(obj),
689 trans);
690 break;
691 case SWITCHDEV_OBJ_ID_PORT_FDB:
692 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
693 SWITCHDEV_OBJ_PORT_FDB(obj),
694 trans);
695 break;
696 default:
697 err = -EOPNOTSUPP;
698 break;
699 }
700
701 return err;
702 }
703
704 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
705 u16 vid_end)
706 {
707 u16 vid;
708 int err;
709
710 for (vid = vid_begin; vid <= vid_end; vid++) {
711 err = mlxsw_sp_port_kill_vid(dev, 0, vid);
712 if (err)
713 return err;
714 }
715
716 return 0;
717 }
718
719 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
720 u16 vid_begin, u16 vid_end, bool init)
721 {
722 struct net_device *dev = mlxsw_sp_port->dev;
723 u16 vid, pvid;
724 int err;
725
726 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
727 * not bridged, then prevent packets ingressing through the
728 * port with the specified VIDs from being trapped to CPU.
729 */
730 if (!init && !mlxsw_sp_port->bridged)
731 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
732
733 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
734 false, false);
735 if (err) {
736 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
737 vid_end);
738 return err;
739 }
740
741 pvid = mlxsw_sp_port->pvid;
742 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) {
743 /* Default VLAN is always 1 */
744 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
745 if (err) {
746 netdev_err(dev, "Unable to del PVID %d\n", pvid);
747 return err;
748 }
749 mlxsw_sp_port->pvid = 1;
750 }
751
752 if (init)
753 goto out;
754
755 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
756 false, false);
757 if (err) {
758 netdev_err(dev, "Failed to clear flooding\n");
759 return err;
760 }
761
762 for (vid = vid_begin; vid <= vid_end; vid++) {
763 /* Remove FID mapping in case of Virtual mode */
764 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
765 if (err) {
766 netdev_err(dev, "Failed to unmap FID=%d", vid);
767 return err;
768 }
769 }
770
771 out:
772 /* Changing activity bits only if HW operation succeded */
773 for (vid = vid_begin; vid <= vid_end; vid++)
774 clear_bit(vid, mlxsw_sp_port->active_vlans);
775
776 return 0;
777 }
778
779 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
780 const struct switchdev_obj_port_vlan *vlan)
781 {
782 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
783 vlan->vid_begin, vlan->vid_end, false);
784 }
785
786 static int
787 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
788 const struct switchdev_obj_port_fdb *fdb)
789 {
790 u16 fid = fdb->vid;
791 u16 lag_vid = 0;
792
793 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
794 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
795
796 fid = mlxsw_sp_vfid_to_fid(vfid);
797 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
798 }
799
800 if (!mlxsw_sp_port->lagged)
801 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
802 fdb->addr, fid,
803 false, false);
804 else
805 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
806 mlxsw_sp_port->lag_id,
807 fdb->addr, fid, lag_vid,
808 false, false);
809 }
810
811 static int mlxsw_sp_port_obj_del(struct net_device *dev,
812 const struct switchdev_obj *obj)
813 {
814 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
815 int err = 0;
816
817 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
818 if (!mlxsw_sp_port)
819 return -EINVAL;
820
821 switch (obj->id) {
822 case SWITCHDEV_OBJ_ID_PORT_VLAN:
823 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
824 return 0;
825
826 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
827 SWITCHDEV_OBJ_PORT_VLAN(obj));
828 break;
829 case SWITCHDEV_OBJ_ID_PORT_FDB:
830 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
831 SWITCHDEV_OBJ_PORT_FDB(obj));
832 break;
833 default:
834 err = -EOPNOTSUPP;
835 break;
836 }
837
838 return err;
839 }
840
841 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
842 u16 lag_id)
843 {
844 struct mlxsw_sp_port *mlxsw_sp_port;
845 int i;
846
847 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
848 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
849 if (mlxsw_sp_port)
850 return mlxsw_sp_port;
851 }
852 return NULL;
853 }
854
855 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
856 struct switchdev_obj_port_fdb *fdb,
857 switchdev_obj_dump_cb_t *cb)
858 {
859 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
860 u16 vport_vid = 0, vport_fid = 0;
861 char *sfd_pl;
862 char mac[ETH_ALEN];
863 u16 fid;
864 u8 local_port;
865 u16 lag_id;
866 u8 num_rec;
867 int stored_err = 0;
868 int i;
869 int err;
870
871 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
872 if (!sfd_pl)
873 return -ENOMEM;
874
875 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
876 u16 tmp;
877
878 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
879 vport_fid = mlxsw_sp_vfid_to_fid(tmp);
880 vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
881 }
882
883 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
884 do {
885 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
886 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
887 if (err)
888 goto out;
889
890 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
891
892 /* Even in case of error, we have to run the dump to the end
893 * so the session in firmware is finished.
894 */
895 if (stored_err)
896 continue;
897
898 for (i = 0; i < num_rec; i++) {
899 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
900 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
901 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
902 &local_port);
903 if (local_port == mlxsw_sp_port->local_port) {
904 if (vport_fid && vport_fid != fid)
905 continue;
906 else if (vport_fid)
907 fdb->vid = vport_vid;
908 else
909 fdb->vid = fid;
910 ether_addr_copy(fdb->addr, mac);
911 fdb->ndm_state = NUD_REACHABLE;
912 err = cb(&fdb->obj);
913 if (err)
914 stored_err = err;
915 }
916 break;
917 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
918 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
919 mac, &fid, &lag_id);
920 if (mlxsw_sp_port ==
921 mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
922 if (vport_fid && vport_fid != fid)
923 continue;
924 else if (vport_fid)
925 fdb->vid = vport_vid;
926 else
927 fdb->vid = fid;
928 ether_addr_copy(fdb->addr, mac);
929 fdb->ndm_state = NUD_REACHABLE;
930 err = cb(&fdb->obj);
931 if (err)
932 stored_err = err;
933 }
934 break;
935 }
936 }
937 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
938
939 out:
940 kfree(sfd_pl);
941 return stored_err ? stored_err : err;
942 }
943
944 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
945 struct switchdev_obj_port_vlan *vlan,
946 switchdev_obj_dump_cb_t *cb)
947 {
948 u16 vid;
949 int err = 0;
950
951 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
952 vlan->flags = 0;
953 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
954 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
955 return cb(&vlan->obj);
956 }
957
958 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
959 vlan->flags = 0;
960 if (vid == mlxsw_sp_port->pvid)
961 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
962 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
963 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
964 vlan->vid_begin = vid;
965 vlan->vid_end = vid;
966 err = cb(&vlan->obj);
967 if (err)
968 break;
969 }
970 return err;
971 }
972
973 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
974 struct switchdev_obj *obj,
975 switchdev_obj_dump_cb_t *cb)
976 {
977 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
978 int err = 0;
979
980 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
981 if (!mlxsw_sp_port)
982 return -EINVAL;
983
984 switch (obj->id) {
985 case SWITCHDEV_OBJ_ID_PORT_VLAN:
986 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
987 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
988 break;
989 case SWITCHDEV_OBJ_ID_PORT_FDB:
990 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
991 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
992 break;
993 default:
994 err = -EOPNOTSUPP;
995 break;
996 }
997
998 return err;
999 }
1000
1001 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1002 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1003 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1004 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1005 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1006 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1007 };
1008
1009 static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
1010 bool adding, char *mac, u16 vid,
1011 struct net_device *dev)
1012 {
1013 struct switchdev_notifier_fdb_info info;
1014 unsigned long notifier_type;
1015
1016 if (learning && learning_sync) {
1017 info.addr = mac;
1018 info.vid = vid;
1019 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1020 call_switchdev_notifiers(notifier_type, dev, &info.info);
1021 }
1022 }
1023
1024 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1025 char *sfn_pl, int rec_index,
1026 bool adding)
1027 {
1028 struct mlxsw_sp_port *mlxsw_sp_port;
1029 char mac[ETH_ALEN];
1030 u8 local_port;
1031 u16 vid, fid;
1032 int err;
1033
1034 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1035 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1036 if (!mlxsw_sp_port) {
1037 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1038 return;
1039 }
1040
1041 if (mlxsw_sp_fid_is_vfid(fid)) {
1042 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1043 struct mlxsw_sp_port *mlxsw_sp_vport;
1044
1045 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1046 vfid);
1047 if (!mlxsw_sp_vport) {
1048 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1049 return;
1050 }
1051
1052 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1053 /* Override the physical port with the vPort. */
1054 mlxsw_sp_port = mlxsw_sp_vport;
1055 } else {
1056 vid = fid;
1057 }
1058
1059 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, mac, fid,
1060 adding && mlxsw_sp_port->learning, true);
1061 if (err) {
1062 if (net_ratelimit())
1063 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1064 return;
1065 }
1066
1067 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
1068 mlxsw_sp_port->learning_sync,
1069 adding, mac, vid, mlxsw_sp_port->dev);
1070 }
1071
1072 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1073 char *sfn_pl, int rec_index,
1074 bool adding)
1075 {
1076 struct mlxsw_sp_port *mlxsw_sp_port;
1077 char mac[ETH_ALEN];
1078 u16 lag_vid = 0;
1079 u16 lag_id;
1080 u16 vid, fid;
1081 int err;
1082
1083 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1084 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1085 if (!mlxsw_sp_port) {
1086 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1087 return;
1088 }
1089
1090 if (mlxsw_sp_fid_is_vfid(fid)) {
1091 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1092 struct mlxsw_sp_port *mlxsw_sp_vport;
1093
1094 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1095 vfid);
1096 if (!mlxsw_sp_vport) {
1097 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1098 return;
1099 }
1100
1101 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1102 lag_vid = vid;
1103 /* Override the physical port with the vPort. */
1104 mlxsw_sp_port = mlxsw_sp_vport;
1105 } else {
1106 vid = fid;
1107 }
1108
1109 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1110 adding && mlxsw_sp_port->learning,
1111 true);
1112 if (err) {
1113 if (net_ratelimit())
1114 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1115 return;
1116 }
1117
1118 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
1119 mlxsw_sp_port->learning_sync,
1120 adding, mac, vid,
1121 mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
1122 }
1123
1124 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1125 char *sfn_pl, int rec_index)
1126 {
1127 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1128 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1129 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1130 rec_index, true);
1131 break;
1132 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1133 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1134 rec_index, false);
1135 break;
1136 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1137 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1138 rec_index, true);
1139 break;
1140 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1141 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1142 rec_index, false);
1143 break;
1144 }
1145 }
1146
1147 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1148 {
1149 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
1150 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1151 }
1152
1153 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1154 {
1155 struct mlxsw_sp *mlxsw_sp;
1156 char *sfn_pl;
1157 u8 num_rec;
1158 int i;
1159 int err;
1160
1161 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1162 if (!sfn_pl)
1163 return;
1164
1165 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1166
1167 do {
1168 mlxsw_reg_sfn_pack(sfn_pl);
1169 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1170 if (err) {
1171 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1172 break;
1173 }
1174 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1175 for (i = 0; i < num_rec; i++)
1176 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1177
1178 } while (num_rec);
1179
1180 kfree(sfn_pl);
1181 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1182 }
1183
1184 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1185 {
1186 int err;
1187
1188 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1189 if (err) {
1190 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1191 return err;
1192 }
1193 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1194 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1195 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1196 return 0;
1197 }
1198
1199 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1200 {
1201 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1202 }
1203
1204 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
1205 {
1206 u16 fid;
1207
1208 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
1209 mlxsw_sp_fid_destroy(mlxsw_sp, fid);
1210 }
1211
1212 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1213 {
1214 return mlxsw_sp_fdb_init(mlxsw_sp);
1215 }
1216
1217 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1218 {
1219 mlxsw_sp_fdb_fini(mlxsw_sp);
1220 mlxsw_sp_fids_fini(mlxsw_sp);
1221 }
1222
1223 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1224 {
1225 struct net_device *dev = mlxsw_sp_port->dev;
1226 int err;
1227
1228 /* Allow only untagged packets to ingress and tag them internally
1229 * with VID 1.
1230 */
1231 mlxsw_sp_port->pvid = 1;
1232 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1233 true);
1234 if (err) {
1235 netdev_err(dev, "Unable to init VLANs\n");
1236 return err;
1237 }
1238
1239 /* Add implicit VLAN interface in the device, so that untagged
1240 * packets will be classified to the default vFID.
1241 */
1242 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1243 if (err)
1244 netdev_err(dev, "Failed to configure default vFID\n");
1245
1246 return err;
1247 }
1248
1249 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1250 {
1251 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1252 }
1253
1254 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1255 {
1256 }
This page took 0.09229 seconds and 4 git commands to generate.