1e5ca8e0f48ef8e4e21f38508bb200d545bb6a04
[deliverable/linux.git] / drivers / net / dsa / mv88e6xxx.c
1 /*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/phy.h>
24 #include <net/dsa.h>
25 #include <net/switchdev.h>
26 #include "mv88e6xxx.h"
27
28 static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
29 {
30 if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
31 dev_err(ps->dev, "SMI lock not held!\n");
32 dump_stack();
33 }
34 }
35
36 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
37 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
38 * will be directly accessible on some {device address,register address}
39 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
40 * will only respond to SMI transactions to that specific address, and
41 * an indirect addressing mechanism needs to be used to access its
42 * registers.
43 */
44 static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
45 {
46 int ret;
47 int i;
48
49 for (i = 0; i < 16; i++) {
50 ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
51 if (ret < 0)
52 return ret;
53
54 if ((ret & SMI_CMD_BUSY) == 0)
55 return 0;
56 }
57
58 return -ETIMEDOUT;
59 }
60
61 static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
62 int reg)
63 {
64 int ret;
65
66 if (sw_addr == 0)
67 return mdiobus_read_nested(bus, addr, reg);
68
69 /* Wait for the bus to become free. */
70 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
71 if (ret < 0)
72 return ret;
73
74 /* Transmit the read command. */
75 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
76 SMI_CMD_OP_22_READ | (addr << 5) | reg);
77 if (ret < 0)
78 return ret;
79
80 /* Wait for the read command to complete. */
81 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
82 if (ret < 0)
83 return ret;
84
85 /* Read the data. */
86 ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
87 if (ret < 0)
88 return ret;
89
90 return ret & 0xffff;
91 }
92
93 static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
94 int addr, int reg)
95 {
96 int ret;
97
98 assert_smi_lock(ps);
99
100 ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
101 if (ret < 0)
102 return ret;
103
104 dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
105 addr, reg, ret);
106
107 return ret;
108 }
109
110 int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
111 {
112 int ret;
113
114 mutex_lock(&ps->smi_mutex);
115 ret = _mv88e6xxx_reg_read(ps, addr, reg);
116 mutex_unlock(&ps->smi_mutex);
117
118 return ret;
119 }
120
121 static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
122 int reg, u16 val)
123 {
124 int ret;
125
126 if (sw_addr == 0)
127 return mdiobus_write_nested(bus, addr, reg, val);
128
129 /* Wait for the bus to become free. */
130 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
131 if (ret < 0)
132 return ret;
133
134 /* Transmit the data to write. */
135 ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
136 if (ret < 0)
137 return ret;
138
139 /* Transmit the write command. */
140 ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
141 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
142 if (ret < 0)
143 return ret;
144
145 /* Wait for the write command to complete. */
146 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
147 if (ret < 0)
148 return ret;
149
150 return 0;
151 }
152
153 static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
154 int reg, u16 val)
155 {
156 assert_smi_lock(ps);
157
158 dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
159 addr, reg, val);
160
161 return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
162 }
163
164 int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
165 int reg, u16 val)
166 {
167 int ret;
168
169 mutex_lock(&ps->smi_mutex);
170 ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
171 mutex_unlock(&ps->smi_mutex);
172
173 return ret;
174 }
175
176 static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
177 {
178 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
179 int err;
180
181 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
182 (addr[0] << 8) | addr[1]);
183 if (err)
184 return err;
185
186 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
187 (addr[2] << 8) | addr[3]);
188 if (err)
189 return err;
190
191 return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
192 (addr[4] << 8) | addr[5]);
193 }
194
195 static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
196 {
197 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
198 int ret;
199 int i;
200
201 for (i = 0; i < 6; i++) {
202 int j;
203
204 /* Write the MAC address byte. */
205 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
206 GLOBAL2_SWITCH_MAC_BUSY |
207 (i << 8) | addr[i]);
208 if (ret)
209 return ret;
210
211 /* Wait for the write to complete. */
212 for (j = 0; j < 16; j++) {
213 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
214 GLOBAL2_SWITCH_MAC);
215 if (ret < 0)
216 return ret;
217
218 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
219 break;
220 }
221 if (j == 16)
222 return -ETIMEDOUT;
223 }
224
225 return 0;
226 }
227
228 int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
229 {
230 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
231
232 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
233 return mv88e6xxx_set_addr_indirect(ds, addr);
234 else
235 return mv88e6xxx_set_addr_direct(ds, addr);
236 }
237
238 static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
239 int regnum)
240 {
241 if (addr >= 0)
242 return _mv88e6xxx_reg_read(ps, addr, regnum);
243 return 0xffff;
244 }
245
246 static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
247 int regnum, u16 val)
248 {
249 if (addr >= 0)
250 return _mv88e6xxx_reg_write(ps, addr, regnum, val);
251 return 0;
252 }
253
254 static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
255 {
256 int ret;
257 unsigned long timeout;
258
259 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
260 if (ret < 0)
261 return ret;
262
263 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
264 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
265 if (ret)
266 return ret;
267
268 timeout = jiffies + 1 * HZ;
269 while (time_before(jiffies, timeout)) {
270 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
271 if (ret < 0)
272 return ret;
273
274 usleep_range(1000, 2000);
275 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
276 GLOBAL_STATUS_PPU_POLLING)
277 return 0;
278 }
279
280 return -ETIMEDOUT;
281 }
282
283 static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
284 {
285 int ret, err;
286 unsigned long timeout;
287
288 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
289 if (ret < 0)
290 return ret;
291
292 err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
293 ret | GLOBAL_CONTROL_PPU_ENABLE);
294 if (err)
295 return err;
296
297 timeout = jiffies + 1 * HZ;
298 while (time_before(jiffies, timeout)) {
299 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
300 if (ret < 0)
301 return ret;
302
303 usleep_range(1000, 2000);
304 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
305 GLOBAL_STATUS_PPU_POLLING)
306 return 0;
307 }
308
309 return -ETIMEDOUT;
310 }
311
312 static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
313 {
314 struct mv88e6xxx_priv_state *ps;
315
316 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
317 if (mutex_trylock(&ps->ppu_mutex)) {
318 if (mv88e6xxx_ppu_enable(ps) == 0)
319 ps->ppu_disabled = 0;
320 mutex_unlock(&ps->ppu_mutex);
321 }
322 }
323
324 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
325 {
326 struct mv88e6xxx_priv_state *ps = (void *)_ps;
327
328 schedule_work(&ps->ppu_work);
329 }
330
331 static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
332 {
333 int ret;
334
335 mutex_lock(&ps->ppu_mutex);
336
337 /* If the PHY polling unit is enabled, disable it so that
338 * we can access the PHY registers. If it was already
339 * disabled, cancel the timer that is going to re-enable
340 * it.
341 */
342 if (!ps->ppu_disabled) {
343 ret = mv88e6xxx_ppu_disable(ps);
344 if (ret < 0) {
345 mutex_unlock(&ps->ppu_mutex);
346 return ret;
347 }
348 ps->ppu_disabled = 1;
349 } else {
350 del_timer(&ps->ppu_timer);
351 ret = 0;
352 }
353
354 return ret;
355 }
356
357 static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
358 {
359 /* Schedule a timer to re-enable the PHY polling unit. */
360 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
361 mutex_unlock(&ps->ppu_mutex);
362 }
363
364 void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
365 {
366 mutex_init(&ps->ppu_mutex);
367 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
368 init_timer(&ps->ppu_timer);
369 ps->ppu_timer.data = (unsigned long)ps;
370 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
371 }
372
373 static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
374 int regnum)
375 {
376 int ret;
377
378 ret = mv88e6xxx_ppu_access_get(ps);
379 if (ret >= 0) {
380 ret = _mv88e6xxx_reg_read(ps, addr, regnum);
381 mv88e6xxx_ppu_access_put(ps);
382 }
383
384 return ret;
385 }
386
387 static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
388 int regnum, u16 val)
389 {
390 int ret;
391
392 ret = mv88e6xxx_ppu_access_get(ps);
393 if (ret >= 0) {
394 ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
395 mv88e6xxx_ppu_access_put(ps);
396 }
397
398 return ret;
399 }
400
401 static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
402 {
403 return ps->info->family == MV88E6XXX_FAMILY_6065;
404 }
405
406 static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
407 {
408 return ps->info->family == MV88E6XXX_FAMILY_6095;
409 }
410
411 static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
412 {
413 return ps->info->family == MV88E6XXX_FAMILY_6097;
414 }
415
416 static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
417 {
418 return ps->info->family == MV88E6XXX_FAMILY_6165;
419 }
420
421 static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
422 {
423 return ps->info->family == MV88E6XXX_FAMILY_6185;
424 }
425
426 static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
427 {
428 return ps->info->family == MV88E6XXX_FAMILY_6320;
429 }
430
431 static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
432 {
433 return ps->info->family == MV88E6XXX_FAMILY_6351;
434 }
435
436 static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
437 {
438 return ps->info->family == MV88E6XXX_FAMILY_6352;
439 }
440
441 static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
442 {
443 return ps->info->num_databases;
444 }
445
446 static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
447 {
448 /* Does the device have dedicated FID registers for ATU and VTU ops? */
449 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
450 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
451 return true;
452
453 return false;
454 }
455
456 static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state *ps)
457 {
458 /* Does the device have STU and dedicated SID registers for VTU ops? */
459 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
460 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
461 return true;
462
463 return false;
464 }
465
466 /* We expect the switch to perform auto negotiation if there is a real
467 * phy. However, in the case of a fixed link phy, we force the port
468 * settings from the fixed link settings.
469 */
470 static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
471 struct phy_device *phydev)
472 {
473 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
474 u32 reg;
475 int ret;
476
477 if (!phy_is_pseudo_fixed_link(phydev))
478 return;
479
480 mutex_lock(&ps->smi_mutex);
481
482 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
483 if (ret < 0)
484 goto out;
485
486 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
487 PORT_PCS_CTRL_FORCE_LINK |
488 PORT_PCS_CTRL_DUPLEX_FULL |
489 PORT_PCS_CTRL_FORCE_DUPLEX |
490 PORT_PCS_CTRL_UNFORCED);
491
492 reg |= PORT_PCS_CTRL_FORCE_LINK;
493 if (phydev->link)
494 reg |= PORT_PCS_CTRL_LINK_UP;
495
496 if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
497 goto out;
498
499 switch (phydev->speed) {
500 case SPEED_1000:
501 reg |= PORT_PCS_CTRL_1000;
502 break;
503 case SPEED_100:
504 reg |= PORT_PCS_CTRL_100;
505 break;
506 case SPEED_10:
507 reg |= PORT_PCS_CTRL_10;
508 break;
509 default:
510 pr_info("Unknown speed");
511 goto out;
512 }
513
514 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
515 if (phydev->duplex == DUPLEX_FULL)
516 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
517
518 if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
519 (port >= ps->info->num_ports - 2)) {
520 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
521 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
522 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
523 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
524 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
525 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
526 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
527 }
528 _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
529
530 out:
531 mutex_unlock(&ps->smi_mutex);
532 }
533
534 static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
535 {
536 int ret;
537 int i;
538
539 for (i = 0; i < 10; i++) {
540 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
541 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
542 return 0;
543 }
544
545 return -ETIMEDOUT;
546 }
547
548 static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
549 int port)
550 {
551 int ret;
552
553 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
554 port = (port + 1) << 5;
555
556 /* Snapshot the hardware statistics counters for this port. */
557 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
558 GLOBAL_STATS_OP_CAPTURE_PORT |
559 GLOBAL_STATS_OP_HIST_RX_TX | port);
560 if (ret < 0)
561 return ret;
562
563 /* Wait for the snapshotting to complete. */
564 ret = _mv88e6xxx_stats_wait(ps);
565 if (ret < 0)
566 return ret;
567
568 return 0;
569 }
570
571 static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
572 int stat, u32 *val)
573 {
574 u32 _val;
575 int ret;
576
577 *val = 0;
578
579 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
580 GLOBAL_STATS_OP_READ_CAPTURED |
581 GLOBAL_STATS_OP_HIST_RX_TX | stat);
582 if (ret < 0)
583 return;
584
585 ret = _mv88e6xxx_stats_wait(ps);
586 if (ret < 0)
587 return;
588
589 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
590 if (ret < 0)
591 return;
592
593 _val = ret << 16;
594
595 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
596 if (ret < 0)
597 return;
598
599 *val = _val | ret;
600 }
601
602 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
603 { "in_good_octets", 8, 0x00, BANK0, },
604 { "in_bad_octets", 4, 0x02, BANK0, },
605 { "in_unicast", 4, 0x04, BANK0, },
606 { "in_broadcasts", 4, 0x06, BANK0, },
607 { "in_multicasts", 4, 0x07, BANK0, },
608 { "in_pause", 4, 0x16, BANK0, },
609 { "in_undersize", 4, 0x18, BANK0, },
610 { "in_fragments", 4, 0x19, BANK0, },
611 { "in_oversize", 4, 0x1a, BANK0, },
612 { "in_jabber", 4, 0x1b, BANK0, },
613 { "in_rx_error", 4, 0x1c, BANK0, },
614 { "in_fcs_error", 4, 0x1d, BANK0, },
615 { "out_octets", 8, 0x0e, BANK0, },
616 { "out_unicast", 4, 0x10, BANK0, },
617 { "out_broadcasts", 4, 0x13, BANK0, },
618 { "out_multicasts", 4, 0x12, BANK0, },
619 { "out_pause", 4, 0x15, BANK0, },
620 { "excessive", 4, 0x11, BANK0, },
621 { "collisions", 4, 0x1e, BANK0, },
622 { "deferred", 4, 0x05, BANK0, },
623 { "single", 4, 0x14, BANK0, },
624 { "multiple", 4, 0x17, BANK0, },
625 { "out_fcs_error", 4, 0x03, BANK0, },
626 { "late", 4, 0x1f, BANK0, },
627 { "hist_64bytes", 4, 0x08, BANK0, },
628 { "hist_65_127bytes", 4, 0x09, BANK0, },
629 { "hist_128_255bytes", 4, 0x0a, BANK0, },
630 { "hist_256_511bytes", 4, 0x0b, BANK0, },
631 { "hist_512_1023bytes", 4, 0x0c, BANK0, },
632 { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
633 { "sw_in_discards", 4, 0x10, PORT, },
634 { "sw_in_filtered", 2, 0x12, PORT, },
635 { "sw_out_filtered", 2, 0x13, PORT, },
636 { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
637 { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
638 { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
639 { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
640 { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
641 { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
642 { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
643 { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
644 { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
645 { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
646 { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
647 { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
648 { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
649 { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
650 { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
651 { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
652 { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
653 { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
654 { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
655 { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
656 { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
657 { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
658 { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
659 { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
660 { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
661 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
662 };
663
664 static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
665 struct mv88e6xxx_hw_stat *stat)
666 {
667 switch (stat->type) {
668 case BANK0:
669 return true;
670 case BANK1:
671 return mv88e6xxx_6320_family(ps);
672 case PORT:
673 return mv88e6xxx_6095_family(ps) ||
674 mv88e6xxx_6185_family(ps) ||
675 mv88e6xxx_6097_family(ps) ||
676 mv88e6xxx_6165_family(ps) ||
677 mv88e6xxx_6351_family(ps) ||
678 mv88e6xxx_6352_family(ps);
679 }
680 return false;
681 }
682
683 static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
684 struct mv88e6xxx_hw_stat *s,
685 int port)
686 {
687 u32 low;
688 u32 high = 0;
689 int ret;
690 u64 value;
691
692 switch (s->type) {
693 case PORT:
694 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
695 if (ret < 0)
696 return UINT64_MAX;
697
698 low = ret;
699 if (s->sizeof_stat == 4) {
700 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
701 s->reg + 1);
702 if (ret < 0)
703 return UINT64_MAX;
704 high = ret;
705 }
706 break;
707 case BANK0:
708 case BANK1:
709 _mv88e6xxx_stats_read(ps, s->reg, &low);
710 if (s->sizeof_stat == 8)
711 _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
712 }
713 value = (((u64)high) << 16) | low;
714 return value;
715 }
716
717 static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
718 uint8_t *data)
719 {
720 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
721 struct mv88e6xxx_hw_stat *stat;
722 int i, j;
723
724 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
725 stat = &mv88e6xxx_hw_stats[i];
726 if (mv88e6xxx_has_stat(ps, stat)) {
727 memcpy(data + j * ETH_GSTRING_LEN, stat->string,
728 ETH_GSTRING_LEN);
729 j++;
730 }
731 }
732 }
733
734 static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
735 {
736 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
737 struct mv88e6xxx_hw_stat *stat;
738 int i, j;
739
740 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
741 stat = &mv88e6xxx_hw_stats[i];
742 if (mv88e6xxx_has_stat(ps, stat))
743 j++;
744 }
745 return j;
746 }
747
748 static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
749 uint64_t *data)
750 {
751 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
752 struct mv88e6xxx_hw_stat *stat;
753 int ret;
754 int i, j;
755
756 mutex_lock(&ps->smi_mutex);
757
758 ret = _mv88e6xxx_stats_snapshot(ps, port);
759 if (ret < 0) {
760 mutex_unlock(&ps->smi_mutex);
761 return;
762 }
763 for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
764 stat = &mv88e6xxx_hw_stats[i];
765 if (mv88e6xxx_has_stat(ps, stat)) {
766 data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
767 j++;
768 }
769 }
770
771 mutex_unlock(&ps->smi_mutex);
772 }
773
774 static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
775 {
776 return 32 * sizeof(u16);
777 }
778
779 static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
780 struct ethtool_regs *regs, void *_p)
781 {
782 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
783 u16 *p = _p;
784 int i;
785
786 regs->version = 0;
787
788 memset(p, 0xff, 32 * sizeof(u16));
789
790 mutex_lock(&ps->smi_mutex);
791
792 for (i = 0; i < 32; i++) {
793 int ret;
794
795 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i);
796 if (ret >= 0)
797 p[i] = ret;
798 }
799
800 mutex_unlock(&ps->smi_mutex);
801 }
802
803 static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
804 u16 mask)
805 {
806 unsigned long timeout = jiffies + HZ / 10;
807
808 while (time_before(jiffies, timeout)) {
809 int ret;
810
811 ret = _mv88e6xxx_reg_read(ps, reg, offset);
812 if (ret < 0)
813 return ret;
814 if (!(ret & mask))
815 return 0;
816
817 usleep_range(1000, 2000);
818 }
819 return -ETIMEDOUT;
820 }
821
822 static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
823 int offset, u16 mask)
824 {
825 int ret;
826
827 mutex_lock(&ps->smi_mutex);
828 ret = _mv88e6xxx_wait(ps, reg, offset, mask);
829 mutex_unlock(&ps->smi_mutex);
830
831 return ret;
832 }
833
834 static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
835 {
836 return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
837 GLOBAL2_SMI_OP_BUSY);
838 }
839
840 static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
841 {
842 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
843
844 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
845 GLOBAL2_EEPROM_OP_LOAD);
846 }
847
848 static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
849 {
850 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
851
852 return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
853 GLOBAL2_EEPROM_OP_BUSY);
854 }
855
856 static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
857 {
858 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
859 int ret;
860
861 mutex_lock(&ps->eeprom_mutex);
862
863 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
864 GLOBAL2_EEPROM_OP_READ |
865 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
866 if (ret < 0)
867 goto error;
868
869 ret = mv88e6xxx_eeprom_busy_wait(ds);
870 if (ret < 0)
871 goto error;
872
873 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
874 error:
875 mutex_unlock(&ps->eeprom_mutex);
876 return ret;
877 }
878
879 static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
880 struct ethtool_eeprom *eeprom, u8 *data)
881 {
882 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
883 int offset;
884 int len;
885 int ret;
886
887 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
888 return -EOPNOTSUPP;
889
890 offset = eeprom->offset;
891 len = eeprom->len;
892 eeprom->len = 0;
893
894 eeprom->magic = 0xc3ec4951;
895
896 ret = mv88e6xxx_eeprom_load_wait(ds);
897 if (ret < 0)
898 return ret;
899
900 if (offset & 1) {
901 int word;
902
903 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
904 if (word < 0)
905 return word;
906
907 *data++ = (word >> 8) & 0xff;
908
909 offset++;
910 len--;
911 eeprom->len++;
912 }
913
914 while (len >= 2) {
915 int word;
916
917 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
918 if (word < 0)
919 return word;
920
921 *data++ = word & 0xff;
922 *data++ = (word >> 8) & 0xff;
923
924 offset += 2;
925 len -= 2;
926 eeprom->len += 2;
927 }
928
929 if (len) {
930 int word;
931
932 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
933 if (word < 0)
934 return word;
935
936 *data++ = word & 0xff;
937
938 offset++;
939 len--;
940 eeprom->len++;
941 }
942
943 return 0;
944 }
945
946 static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
947 {
948 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
949 int ret;
950
951 ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
952 if (ret < 0)
953 return ret;
954
955 if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
956 return -EROFS;
957
958 return 0;
959 }
960
961 static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
962 u16 data)
963 {
964 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
965 int ret;
966
967 mutex_lock(&ps->eeprom_mutex);
968
969 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
970 if (ret < 0)
971 goto error;
972
973 ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
974 GLOBAL2_EEPROM_OP_WRITE |
975 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
976 if (ret < 0)
977 goto error;
978
979 ret = mv88e6xxx_eeprom_busy_wait(ds);
980 error:
981 mutex_unlock(&ps->eeprom_mutex);
982 return ret;
983 }
984
985 static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
986 struct ethtool_eeprom *eeprom, u8 *data)
987 {
988 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
989 int offset;
990 int ret;
991 int len;
992
993 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
994 return -EOPNOTSUPP;
995
996 if (eeprom->magic != 0xc3ec4951)
997 return -EINVAL;
998
999 ret = mv88e6xxx_eeprom_is_readonly(ds);
1000 if (ret)
1001 return ret;
1002
1003 offset = eeprom->offset;
1004 len = eeprom->len;
1005 eeprom->len = 0;
1006
1007 ret = mv88e6xxx_eeprom_load_wait(ds);
1008 if (ret < 0)
1009 return ret;
1010
1011 if (offset & 1) {
1012 int word;
1013
1014 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1015 if (word < 0)
1016 return word;
1017
1018 word = (*data++ << 8) | (word & 0xff);
1019
1020 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1021 if (ret < 0)
1022 return ret;
1023
1024 offset++;
1025 len--;
1026 eeprom->len++;
1027 }
1028
1029 while (len >= 2) {
1030 int word;
1031
1032 word = *data++;
1033 word |= *data++ << 8;
1034
1035 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1036 if (ret < 0)
1037 return ret;
1038
1039 offset += 2;
1040 len -= 2;
1041 eeprom->len += 2;
1042 }
1043
1044 if (len) {
1045 int word;
1046
1047 word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
1048 if (word < 0)
1049 return word;
1050
1051 word = (word & 0xff00) | *data++;
1052
1053 ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
1054 if (ret < 0)
1055 return ret;
1056
1057 offset++;
1058 len--;
1059 eeprom->len++;
1060 }
1061
1062 return 0;
1063 }
1064
1065 static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
1066 {
1067 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
1068 GLOBAL_ATU_OP_BUSY);
1069 }
1070
1071 static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
1072 int addr, int regnum)
1073 {
1074 int ret;
1075
1076 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1077 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
1078 regnum);
1079 if (ret < 0)
1080 return ret;
1081
1082 ret = _mv88e6xxx_phy_wait(ps);
1083 if (ret < 0)
1084 return ret;
1085
1086 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
1087
1088 return ret;
1089 }
1090
1091 static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
1092 int addr, int regnum, u16 val)
1093 {
1094 int ret;
1095
1096 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
1097 if (ret < 0)
1098 return ret;
1099
1100 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1101 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
1102 regnum);
1103
1104 return _mv88e6xxx_phy_wait(ps);
1105 }
1106
1107 static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
1108 struct ethtool_eee *e)
1109 {
1110 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1111 int reg;
1112
1113 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
1114 return -EOPNOTSUPP;
1115
1116 mutex_lock(&ps->smi_mutex);
1117
1118 reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1119 if (reg < 0)
1120 goto out;
1121
1122 e->eee_enabled = !!(reg & 0x0200);
1123 e->tx_lpi_enabled = !!(reg & 0x0100);
1124
1125 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
1126 if (reg < 0)
1127 goto out;
1128
1129 e->eee_active = !!(reg & PORT_STATUS_EEE);
1130 reg = 0;
1131
1132 out:
1133 mutex_unlock(&ps->smi_mutex);
1134 return reg;
1135 }
1136
1137 static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1138 struct phy_device *phydev, struct ethtool_eee *e)
1139 {
1140 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1141 int reg;
1142 int ret;
1143
1144 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
1145 return -EOPNOTSUPP;
1146
1147 mutex_lock(&ps->smi_mutex);
1148
1149 ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1150 if (ret < 0)
1151 goto out;
1152
1153 reg = ret & ~0x0300;
1154 if (e->eee_enabled)
1155 reg |= 0x0200;
1156 if (e->tx_lpi_enabled)
1157 reg |= 0x0100;
1158
1159 ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
1160 out:
1161 mutex_unlock(&ps->smi_mutex);
1162
1163 return ret;
1164 }
1165
1166 static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
1167 {
1168 int ret;
1169
1170 if (mv88e6xxx_has_fid_reg(ps)) {
1171 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1172 if (ret < 0)
1173 return ret;
1174 } else if (mv88e6xxx_num_databases(ps) == 256) {
1175 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1176 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
1177 if (ret < 0)
1178 return ret;
1179
1180 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
1181 (ret & 0xfff) |
1182 ((fid << 8) & 0xf000));
1183 if (ret < 0)
1184 return ret;
1185
1186 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1187 cmd |= fid & 0xf;
1188 }
1189
1190 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
1191 if (ret < 0)
1192 return ret;
1193
1194 return _mv88e6xxx_atu_wait(ps);
1195 }
1196
1197 static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
1198 struct mv88e6xxx_atu_entry *entry)
1199 {
1200 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1201
1202 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1203 unsigned int mask, shift;
1204
1205 if (entry->trunk) {
1206 data |= GLOBAL_ATU_DATA_TRUNK;
1207 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1208 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1209 } else {
1210 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1211 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1212 }
1213
1214 data |= (entry->portv_trunkid << shift) & mask;
1215 }
1216
1217 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1218 }
1219
1220 static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
1221 struct mv88e6xxx_atu_entry *entry,
1222 bool static_too)
1223 {
1224 int op;
1225 int err;
1226
1227 err = _mv88e6xxx_atu_wait(ps);
1228 if (err)
1229 return err;
1230
1231 err = _mv88e6xxx_atu_data_write(ps, entry);
1232 if (err)
1233 return err;
1234
1235 if (entry->fid) {
1236 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1237 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1238 } else {
1239 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1240 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1241 }
1242
1243 return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
1244 }
1245
1246 static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
1247 u16 fid, bool static_too)
1248 {
1249 struct mv88e6xxx_atu_entry entry = {
1250 .fid = fid,
1251 .state = 0, /* EntryState bits must be 0 */
1252 };
1253
1254 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1255 }
1256
1257 static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
1258 int from_port, int to_port, bool static_too)
1259 {
1260 struct mv88e6xxx_atu_entry entry = {
1261 .trunk = false,
1262 .fid = fid,
1263 };
1264
1265 /* EntryState bits must be 0xF */
1266 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1267
1268 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1269 entry.portv_trunkid = (to_port & 0x0f) << 4;
1270 entry.portv_trunkid |= from_port & 0x0f;
1271
1272 return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1273 }
1274
1275 static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
1276 int port, bool static_too)
1277 {
1278 /* Destination port 0xF means remove the entries */
1279 return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
1280 }
1281
1282 static const char * const mv88e6xxx_port_state_names[] = {
1283 [PORT_CONTROL_STATE_DISABLED] = "Disabled",
1284 [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
1285 [PORT_CONTROL_STATE_LEARNING] = "Learning",
1286 [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
1287 };
1288
1289 static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
1290 u8 state)
1291 {
1292 struct dsa_switch *ds = ps->ds;
1293 int reg, ret = 0;
1294 u8 oldstate;
1295
1296 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
1297 if (reg < 0)
1298 return reg;
1299
1300 oldstate = reg & PORT_CONTROL_STATE_MASK;
1301
1302 if (oldstate != state) {
1303 /* Flush forwarding database if we're moving a port
1304 * from Learning or Forwarding state to Disabled or
1305 * Blocking or Listening state.
1306 */
1307 if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
1308 oldstate == PORT_CONTROL_STATE_FORWARDING)
1309 && (state == PORT_CONTROL_STATE_DISABLED ||
1310 state == PORT_CONTROL_STATE_BLOCKING)) {
1311 ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
1312 if (ret)
1313 return ret;
1314 }
1315
1316 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1317 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
1318 reg);
1319 if (ret)
1320 return ret;
1321
1322 netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
1323 mv88e6xxx_port_state_names[state],
1324 mv88e6xxx_port_state_names[oldstate]);
1325 }
1326
1327 return ret;
1328 }
1329
1330 static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
1331 int port)
1332 {
1333 struct net_device *bridge = ps->ports[port].bridge_dev;
1334 const u16 mask = (1 << ps->info->num_ports) - 1;
1335 struct dsa_switch *ds = ps->ds;
1336 u16 output_ports = 0;
1337 int reg;
1338 int i;
1339
1340 /* allow CPU port or DSA link(s) to send frames to every port */
1341 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
1342 output_ports = mask;
1343 } else {
1344 for (i = 0; i < ps->info->num_ports; ++i) {
1345 /* allow sending frames to every group member */
1346 if (bridge && ps->ports[i].bridge_dev == bridge)
1347 output_ports |= BIT(i);
1348
1349 /* allow sending frames to CPU port and DSA link(s) */
1350 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
1351 output_ports |= BIT(i);
1352 }
1353 }
1354
1355 /* prevent frames from going back out of the port they came in on */
1356 output_ports &= ~BIT(port);
1357
1358 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1359 if (reg < 0)
1360 return reg;
1361
1362 reg &= ~mask;
1363 reg |= output_ports & mask;
1364
1365 return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
1366 }
1367
1368 static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
1369 u8 state)
1370 {
1371 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1372 int stp_state;
1373
1374 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
1375 return;
1376
1377 switch (state) {
1378 case BR_STATE_DISABLED:
1379 stp_state = PORT_CONTROL_STATE_DISABLED;
1380 break;
1381 case BR_STATE_BLOCKING:
1382 case BR_STATE_LISTENING:
1383 stp_state = PORT_CONTROL_STATE_BLOCKING;
1384 break;
1385 case BR_STATE_LEARNING:
1386 stp_state = PORT_CONTROL_STATE_LEARNING;
1387 break;
1388 case BR_STATE_FORWARDING:
1389 default:
1390 stp_state = PORT_CONTROL_STATE_FORWARDING;
1391 break;
1392 }
1393
1394 /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
1395 * so we can not update the port state directly but need to schedule it.
1396 */
1397 ps->ports[port].state = stp_state;
1398 set_bit(port, ps->port_state_update_mask);
1399 schedule_work(&ps->bridge_work);
1400 }
1401
1402 static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
1403 u16 *new, u16 *old)
1404 {
1405 struct dsa_switch *ds = ps->ds;
1406 u16 pvid;
1407 int ret;
1408
1409 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
1410 if (ret < 0)
1411 return ret;
1412
1413 pvid = ret & PORT_DEFAULT_VLAN_MASK;
1414
1415 if (new) {
1416 ret &= ~PORT_DEFAULT_VLAN_MASK;
1417 ret |= *new & PORT_DEFAULT_VLAN_MASK;
1418
1419 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
1420 PORT_DEFAULT_VLAN, ret);
1421 if (ret < 0)
1422 return ret;
1423
1424 netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
1425 pvid);
1426 }
1427
1428 if (old)
1429 *old = pvid;
1430
1431 return 0;
1432 }
1433
1434 static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
1435 int port, u16 *pvid)
1436 {
1437 return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
1438 }
1439
1440 static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
1441 int port, u16 pvid)
1442 {
1443 return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
1444 }
1445
1446 static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
1447 {
1448 return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
1449 GLOBAL_VTU_OP_BUSY);
1450 }
1451
1452 static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
1453 {
1454 int ret;
1455
1456 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
1457 if (ret < 0)
1458 return ret;
1459
1460 return _mv88e6xxx_vtu_wait(ps);
1461 }
1462
1463 static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
1464 {
1465 int ret;
1466
1467 ret = _mv88e6xxx_vtu_wait(ps);
1468 if (ret < 0)
1469 return ret;
1470
1471 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
1472 }
1473
1474 static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1475 struct mv88e6xxx_vtu_stu_entry *entry,
1476 unsigned int nibble_offset)
1477 {
1478 u16 regs[3];
1479 int i;
1480 int ret;
1481
1482 for (i = 0; i < 3; ++i) {
1483 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1484 GLOBAL_VTU_DATA_0_3 + i);
1485 if (ret < 0)
1486 return ret;
1487
1488 regs[i] = ret;
1489 }
1490
1491 for (i = 0; i < ps->info->num_ports; ++i) {
1492 unsigned int shift = (i % 4) * 4 + nibble_offset;
1493 u16 reg = regs[i / 4];
1494
1495 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1496 }
1497
1498 return 0;
1499 }
1500
1501 static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1502 struct mv88e6xxx_vtu_stu_entry *entry,
1503 unsigned int nibble_offset)
1504 {
1505 u16 regs[3] = { 0 };
1506 int i;
1507 int ret;
1508
1509 for (i = 0; i < ps->info->num_ports; ++i) {
1510 unsigned int shift = (i % 4) * 4 + nibble_offset;
1511 u8 data = entry->data[i];
1512
1513 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1514 }
1515
1516 for (i = 0; i < 3; ++i) {
1517 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
1518 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1519 if (ret < 0)
1520 return ret;
1521 }
1522
1523 return 0;
1524 }
1525
1526 static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
1527 {
1528 return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
1529 vid & GLOBAL_VTU_VID_MASK);
1530 }
1531
1532 static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
1533 struct mv88e6xxx_vtu_stu_entry *entry)
1534 {
1535 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1536 int ret;
1537
1538 ret = _mv88e6xxx_vtu_wait(ps);
1539 if (ret < 0)
1540 return ret;
1541
1542 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
1543 if (ret < 0)
1544 return ret;
1545
1546 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1547 if (ret < 0)
1548 return ret;
1549
1550 next.vid = ret & GLOBAL_VTU_VID_MASK;
1551 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1552
1553 if (next.valid) {
1554 ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 0);
1555 if (ret < 0)
1556 return ret;
1557
1558 if (mv88e6xxx_has_fid_reg(ps)) {
1559 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1560 GLOBAL_VTU_FID);
1561 if (ret < 0)
1562 return ret;
1563
1564 next.fid = ret & GLOBAL_VTU_FID_MASK;
1565 } else if (mv88e6xxx_num_databases(ps) == 256) {
1566 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1567 * VTU DBNum[3:0] are located in VTU Operation 3:0
1568 */
1569 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1570 GLOBAL_VTU_OP);
1571 if (ret < 0)
1572 return ret;
1573
1574 next.fid = (ret & 0xf00) >> 4;
1575 next.fid |= ret & 0xf;
1576 }
1577
1578 if (mv88e6xxx_has_stu(ps)) {
1579 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1580 GLOBAL_VTU_SID);
1581 if (ret < 0)
1582 return ret;
1583
1584 next.sid = ret & GLOBAL_VTU_SID_MASK;
1585 }
1586 }
1587
1588 *entry = next;
1589 return 0;
1590 }
1591
1592 static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
1593 struct switchdev_obj_port_vlan *vlan,
1594 int (*cb)(struct switchdev_obj *obj))
1595 {
1596 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1597 struct mv88e6xxx_vtu_stu_entry next;
1598 u16 pvid;
1599 int err;
1600
1601 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
1602 return -EOPNOTSUPP;
1603
1604 mutex_lock(&ps->smi_mutex);
1605
1606 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
1607 if (err)
1608 goto unlock;
1609
1610 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1611 if (err)
1612 goto unlock;
1613
1614 do {
1615 err = _mv88e6xxx_vtu_getnext(ps, &next);
1616 if (err)
1617 break;
1618
1619 if (!next.valid)
1620 break;
1621
1622 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1623 continue;
1624
1625 /* reinit and dump this VLAN obj */
1626 vlan->vid_begin = vlan->vid_end = next.vid;
1627 vlan->flags = 0;
1628
1629 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1630 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1631
1632 if (next.vid == pvid)
1633 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1634
1635 err = cb(&vlan->obj);
1636 if (err)
1637 break;
1638 } while (next.vid < GLOBAL_VTU_VID_MASK);
1639
1640 unlock:
1641 mutex_unlock(&ps->smi_mutex);
1642
1643 return err;
1644 }
1645
1646 static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
1647 struct mv88e6xxx_vtu_stu_entry *entry)
1648 {
1649 u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
1650 u16 reg = 0;
1651 int ret;
1652
1653 ret = _mv88e6xxx_vtu_wait(ps);
1654 if (ret < 0)
1655 return ret;
1656
1657 if (!entry->valid)
1658 goto loadpurge;
1659
1660 /* Write port member tags */
1661 ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
1662 if (ret < 0)
1663 return ret;
1664
1665 if (mv88e6xxx_has_stu(ps)) {
1666 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1667 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1668 if (ret < 0)
1669 return ret;
1670 }
1671
1672 if (mv88e6xxx_has_fid_reg(ps)) {
1673 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1674 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1675 if (ret < 0)
1676 return ret;
1677 } else if (mv88e6xxx_num_databases(ps) == 256) {
1678 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1679 * VTU DBNum[3:0] are located in VTU Operation 3:0
1680 */
1681 op |= (entry->fid & 0xf0) << 8;
1682 op |= entry->fid & 0xf;
1683 }
1684
1685 reg = GLOBAL_VTU_VID_VALID;
1686 loadpurge:
1687 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1688 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1689 if (ret < 0)
1690 return ret;
1691
1692 return _mv88e6xxx_vtu_cmd(ps, op);
1693 }
1694
1695 static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
1696 struct mv88e6xxx_vtu_stu_entry *entry)
1697 {
1698 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1699 int ret;
1700
1701 ret = _mv88e6xxx_vtu_wait(ps);
1702 if (ret < 0)
1703 return ret;
1704
1705 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
1706 sid & GLOBAL_VTU_SID_MASK);
1707 if (ret < 0)
1708 return ret;
1709
1710 ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
1711 if (ret < 0)
1712 return ret;
1713
1714 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
1715 if (ret < 0)
1716 return ret;
1717
1718 next.sid = ret & GLOBAL_VTU_SID_MASK;
1719
1720 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1721 if (ret < 0)
1722 return ret;
1723
1724 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1725
1726 if (next.valid) {
1727 ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 2);
1728 if (ret < 0)
1729 return ret;
1730 }
1731
1732 *entry = next;
1733 return 0;
1734 }
1735
1736 static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
1737 struct mv88e6xxx_vtu_stu_entry *entry)
1738 {
1739 u16 reg = 0;
1740 int ret;
1741
1742 ret = _mv88e6xxx_vtu_wait(ps);
1743 if (ret < 0)
1744 return ret;
1745
1746 if (!entry->valid)
1747 goto loadpurge;
1748
1749 /* Write port states */
1750 ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
1751 if (ret < 0)
1752 return ret;
1753
1754 reg = GLOBAL_VTU_VID_VALID;
1755 loadpurge:
1756 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1757 if (ret < 0)
1758 return ret;
1759
1760 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1761 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1762 if (ret < 0)
1763 return ret;
1764
1765 return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1766 }
1767
1768 static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
1769 u16 *new, u16 *old)
1770 {
1771 struct dsa_switch *ds = ps->ds;
1772 u16 upper_mask;
1773 u16 fid;
1774 int ret;
1775
1776 if (mv88e6xxx_num_databases(ps) == 4096)
1777 upper_mask = 0xff;
1778 else if (mv88e6xxx_num_databases(ps) == 256)
1779 upper_mask = 0xf;
1780 else
1781 return -EOPNOTSUPP;
1782
1783 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1784 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1785 if (ret < 0)
1786 return ret;
1787
1788 fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
1789
1790 if (new) {
1791 ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
1792 ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
1793
1794 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
1795 ret);
1796 if (ret < 0)
1797 return ret;
1798 }
1799
1800 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1801 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
1802 if (ret < 0)
1803 return ret;
1804
1805 fid |= (ret & upper_mask) << 4;
1806
1807 if (new) {
1808 ret &= ~upper_mask;
1809 ret |= (*new >> 4) & upper_mask;
1810
1811 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
1812 ret);
1813 if (ret < 0)
1814 return ret;
1815
1816 netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
1817 }
1818
1819 if (old)
1820 *old = fid;
1821
1822 return 0;
1823 }
1824
1825 static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
1826 int port, u16 *fid)
1827 {
1828 return _mv88e6xxx_port_fid(ps, port, NULL, fid);
1829 }
1830
1831 static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
1832 int port, u16 fid)
1833 {
1834 return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
1835 }
1836
1837 static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1838 {
1839 DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
1840 struct mv88e6xxx_vtu_stu_entry vlan;
1841 int i, err;
1842
1843 bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
1844
1845 /* Set every FID bit used by the (un)bridged ports */
1846 for (i = 0; i < ps->info->num_ports; ++i) {
1847 err = _mv88e6xxx_port_fid_get(ps, i, fid);
1848 if (err)
1849 return err;
1850
1851 set_bit(*fid, fid_bitmap);
1852 }
1853
1854 /* Set every FID bit used by the VLAN entries */
1855 err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1856 if (err)
1857 return err;
1858
1859 do {
1860 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1861 if (err)
1862 return err;
1863
1864 if (!vlan.valid)
1865 break;
1866
1867 set_bit(vlan.fid, fid_bitmap);
1868 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
1869
1870 /* The reset value 0x000 is used to indicate that multiple address
1871 * databases are not needed. Return the next positive available.
1872 */
1873 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1874 if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
1875 return -ENOSPC;
1876
1877 /* Clear the database */
1878 return _mv88e6xxx_atu_flush(ps, *fid, true);
1879 }
1880
1881 static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1882 struct mv88e6xxx_vtu_stu_entry *entry)
1883 {
1884 struct dsa_switch *ds = ps->ds;
1885 struct mv88e6xxx_vtu_stu_entry vlan = {
1886 .valid = true,
1887 .vid = vid,
1888 };
1889 int i, err;
1890
1891 err = _mv88e6xxx_fid_new(ps, &vlan.fid);
1892 if (err)
1893 return err;
1894
1895 /* exclude all ports except the CPU and DSA ports */
1896 for (i = 0; i < ps->info->num_ports; ++i)
1897 vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
1898 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1899 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1900
1901 if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
1902 mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
1903 struct mv88e6xxx_vtu_stu_entry vstp;
1904
1905 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1906 * implemented, only one STU entry is needed to cover all VTU
1907 * entries. Thus, validate the SID 0.
1908 */
1909 vlan.sid = 0;
1910 err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
1911 if (err)
1912 return err;
1913
1914 if (vstp.sid != vlan.sid || !vstp.valid) {
1915 memset(&vstp, 0, sizeof(vstp));
1916 vstp.valid = true;
1917 vstp.sid = vlan.sid;
1918
1919 err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
1920 if (err)
1921 return err;
1922 }
1923 }
1924
1925 *entry = vlan;
1926 return 0;
1927 }
1928
1929 static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1930 struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
1931 {
1932 int err;
1933
1934 if (!vid)
1935 return -EINVAL;
1936
1937 err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
1938 if (err)
1939 return err;
1940
1941 err = _mv88e6xxx_vtu_getnext(ps, entry);
1942 if (err)
1943 return err;
1944
1945 if (entry->vid != vid || !entry->valid) {
1946 if (!creat)
1947 return -EOPNOTSUPP;
1948 /* -ENOENT would've been more appropriate, but switchdev expects
1949 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1950 */
1951
1952 err = _mv88e6xxx_vtu_new(ps, vid, entry);
1953 }
1954
1955 return err;
1956 }
1957
1958 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1959 u16 vid_begin, u16 vid_end)
1960 {
1961 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1962 struct mv88e6xxx_vtu_stu_entry vlan;
1963 int i, err;
1964
1965 if (!vid_begin)
1966 return -EOPNOTSUPP;
1967
1968 mutex_lock(&ps->smi_mutex);
1969
1970 err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
1971 if (err)
1972 goto unlock;
1973
1974 do {
1975 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1976 if (err)
1977 goto unlock;
1978
1979 if (!vlan.valid)
1980 break;
1981
1982 if (vlan.vid > vid_end)
1983 break;
1984
1985 for (i = 0; i < ps->info->num_ports; ++i) {
1986 if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
1987 continue;
1988
1989 if (vlan.data[i] ==
1990 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1991 continue;
1992
1993 if (ps->ports[i].bridge_dev ==
1994 ps->ports[port].bridge_dev)
1995 break; /* same bridge, check next VLAN */
1996
1997 netdev_warn(ds->ports[port],
1998 "hardware VLAN %d already used by %s\n",
1999 vlan.vid,
2000 netdev_name(ps->ports[i].bridge_dev));
2001 err = -EOPNOTSUPP;
2002 goto unlock;
2003 }
2004 } while (vlan.vid < vid_end);
2005
2006 unlock:
2007 mutex_unlock(&ps->smi_mutex);
2008
2009 return err;
2010 }
2011
2012 static const char * const mv88e6xxx_port_8021q_mode_names[] = {
2013 [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
2014 [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
2015 [PORT_CONTROL_2_8021Q_CHECK] = "Check",
2016 [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
2017 };
2018
2019 static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
2020 bool vlan_filtering)
2021 {
2022 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2023 u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
2024 PORT_CONTROL_2_8021Q_DISABLED;
2025 int ret;
2026
2027 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2028 return -EOPNOTSUPP;
2029
2030 mutex_lock(&ps->smi_mutex);
2031
2032 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
2033 if (ret < 0)
2034 goto unlock;
2035
2036 old = ret & PORT_CONTROL_2_8021Q_MASK;
2037
2038 if (new != old) {
2039 ret &= ~PORT_CONTROL_2_8021Q_MASK;
2040 ret |= new & PORT_CONTROL_2_8021Q_MASK;
2041
2042 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
2043 ret);
2044 if (ret < 0)
2045 goto unlock;
2046
2047 netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
2048 mv88e6xxx_port_8021q_mode_names[new],
2049 mv88e6xxx_port_8021q_mode_names[old]);
2050 }
2051
2052 ret = 0;
2053 unlock:
2054 mutex_unlock(&ps->smi_mutex);
2055
2056 return ret;
2057 }
2058
2059 static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
2060 const struct switchdev_obj_port_vlan *vlan,
2061 struct switchdev_trans *trans)
2062 {
2063 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2064 int err;
2065
2066 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2067 return -EOPNOTSUPP;
2068
2069 /* If the requested port doesn't belong to the same bridge as the VLAN
2070 * members, do not support it (yet) and fallback to software VLAN.
2071 */
2072 err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
2073 vlan->vid_end);
2074 if (err)
2075 return err;
2076
2077 /* We don't need any dynamic resource from the kernel (yet),
2078 * so skip the prepare phase.
2079 */
2080 return 0;
2081 }
2082
2083 static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
2084 u16 vid, bool untagged)
2085 {
2086 struct mv88e6xxx_vtu_stu_entry vlan;
2087 int err;
2088
2089 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
2090 if (err)
2091 return err;
2092
2093 vlan.data[port] = untagged ?
2094 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
2095 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
2096
2097 return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2098 }
2099
2100 static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
2101 const struct switchdev_obj_port_vlan *vlan,
2102 struct switchdev_trans *trans)
2103 {
2104 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2105 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2106 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2107 u16 vid;
2108
2109 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2110 return;
2111
2112 mutex_lock(&ps->smi_mutex);
2113
2114 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
2115 if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
2116 netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
2117 vid, untagged ? 'u' : 't');
2118
2119 if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
2120 netdev_err(ds->ports[port], "failed to set PVID %d\n",
2121 vlan->vid_end);
2122
2123 mutex_unlock(&ps->smi_mutex);
2124 }
2125
2126 static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
2127 int port, u16 vid)
2128 {
2129 struct dsa_switch *ds = ps->ds;
2130 struct mv88e6xxx_vtu_stu_entry vlan;
2131 int i, err;
2132
2133 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2134 if (err)
2135 return err;
2136
2137 /* Tell switchdev if this VLAN is handled in software */
2138 if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
2139 return -EOPNOTSUPP;
2140
2141 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
2142
2143 /* keep the VLAN unless all ports are excluded */
2144 vlan.valid = false;
2145 for (i = 0; i < ps->info->num_ports; ++i) {
2146 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2147 continue;
2148
2149 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
2150 vlan.valid = true;
2151 break;
2152 }
2153 }
2154
2155 err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2156 if (err)
2157 return err;
2158
2159 return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
2160 }
2161
2162 static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
2163 const struct switchdev_obj_port_vlan *vlan)
2164 {
2165 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2166 u16 pvid, vid;
2167 int err = 0;
2168
2169 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
2170 return -EOPNOTSUPP;
2171
2172 mutex_lock(&ps->smi_mutex);
2173
2174 err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
2175 if (err)
2176 goto unlock;
2177
2178 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
2179 err = _mv88e6xxx_port_vlan_del(ps, port, vid);
2180 if (err)
2181 goto unlock;
2182
2183 if (vid == pvid) {
2184 err = _mv88e6xxx_port_pvid_set(ps, port, 0);
2185 if (err)
2186 goto unlock;
2187 }
2188 }
2189
2190 unlock:
2191 mutex_unlock(&ps->smi_mutex);
2192
2193 return err;
2194 }
2195
2196 static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
2197 const unsigned char *addr)
2198 {
2199 int i, ret;
2200
2201 for (i = 0; i < 3; i++) {
2202 ret = _mv88e6xxx_reg_write(
2203 ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
2204 (addr[i * 2] << 8) | addr[i * 2 + 1]);
2205 if (ret < 0)
2206 return ret;
2207 }
2208
2209 return 0;
2210 }
2211
2212 static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
2213 unsigned char *addr)
2214 {
2215 int i, ret;
2216
2217 for (i = 0; i < 3; i++) {
2218 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
2219 GLOBAL_ATU_MAC_01 + i);
2220 if (ret < 0)
2221 return ret;
2222 addr[i * 2] = ret >> 8;
2223 addr[i * 2 + 1] = ret & 0xff;
2224 }
2225
2226 return 0;
2227 }
2228
2229 static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
2230 struct mv88e6xxx_atu_entry *entry)
2231 {
2232 int ret;
2233
2234 ret = _mv88e6xxx_atu_wait(ps);
2235 if (ret < 0)
2236 return ret;
2237
2238 ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
2239 if (ret < 0)
2240 return ret;
2241
2242 ret = _mv88e6xxx_atu_data_write(ps, entry);
2243 if (ret < 0)
2244 return ret;
2245
2246 return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
2247 }
2248
2249 static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
2250 const unsigned char *addr, u16 vid,
2251 u8 state)
2252 {
2253 struct mv88e6xxx_atu_entry entry = { 0 };
2254 struct mv88e6xxx_vtu_stu_entry vlan;
2255 int err;
2256
2257 /* Null VLAN ID corresponds to the port private database */
2258 if (vid == 0)
2259 err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
2260 else
2261 err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2262 if (err)
2263 return err;
2264
2265 entry.fid = vlan.fid;
2266 entry.state = state;
2267 ether_addr_copy(entry.mac, addr);
2268 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2269 entry.trunk = false;
2270 entry.portv_trunkid = BIT(port);
2271 }
2272
2273 return _mv88e6xxx_atu_load(ps, &entry);
2274 }
2275
2276 static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
2277 const struct switchdev_obj_port_fdb *fdb,
2278 struct switchdev_trans *trans)
2279 {
2280 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2281
2282 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2283 return -EOPNOTSUPP;
2284
2285 /* We don't need any dynamic resource from the kernel (yet),
2286 * so skip the prepare phase.
2287 */
2288 return 0;
2289 }
2290
2291 static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
2292 const struct switchdev_obj_port_fdb *fdb,
2293 struct switchdev_trans *trans)
2294 {
2295 int state = is_multicast_ether_addr(fdb->addr) ?
2296 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2297 GLOBAL_ATU_DATA_STATE_UC_STATIC;
2298 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2299
2300 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2301 return;
2302
2303 mutex_lock(&ps->smi_mutex);
2304 if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
2305 netdev_err(ds->ports[port], "failed to load MAC address\n");
2306 mutex_unlock(&ps->smi_mutex);
2307 }
2308
2309 static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2310 const struct switchdev_obj_port_fdb *fdb)
2311 {
2312 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2313 int ret;
2314
2315 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2316 return -EOPNOTSUPP;
2317
2318 mutex_lock(&ps->smi_mutex);
2319 ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
2320 GLOBAL_ATU_DATA_STATE_UNUSED);
2321 mutex_unlock(&ps->smi_mutex);
2322
2323 return ret;
2324 }
2325
2326 static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
2327 struct mv88e6xxx_atu_entry *entry)
2328 {
2329 struct mv88e6xxx_atu_entry next = { 0 };
2330 int ret;
2331
2332 next.fid = fid;
2333
2334 ret = _mv88e6xxx_atu_wait(ps);
2335 if (ret < 0)
2336 return ret;
2337
2338 ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2339 if (ret < 0)
2340 return ret;
2341
2342 ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
2343 if (ret < 0)
2344 return ret;
2345
2346 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
2347 if (ret < 0)
2348 return ret;
2349
2350 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
2351 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
2352 unsigned int mask, shift;
2353
2354 if (ret & GLOBAL_ATU_DATA_TRUNK) {
2355 next.trunk = true;
2356 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
2357 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
2358 } else {
2359 next.trunk = false;
2360 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
2361 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
2362 }
2363
2364 next.portv_trunkid = (ret & mask) >> shift;
2365 }
2366
2367 *entry = next;
2368 return 0;
2369 }
2370
2371 static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
2372 u16 fid, u16 vid, int port,
2373 struct switchdev_obj_port_fdb *fdb,
2374 int (*cb)(struct switchdev_obj *obj))
2375 {
2376 struct mv88e6xxx_atu_entry addr = {
2377 .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2378 };
2379 int err;
2380
2381 err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
2382 if (err)
2383 return err;
2384
2385 do {
2386 err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
2387 if (err)
2388 break;
2389
2390 if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
2391 break;
2392
2393 if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
2394 bool is_static = addr.state ==
2395 (is_multicast_ether_addr(addr.mac) ?
2396 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2397 GLOBAL_ATU_DATA_STATE_UC_STATIC);
2398
2399 fdb->vid = vid;
2400 ether_addr_copy(fdb->addr, addr.mac);
2401 fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
2402
2403 err = cb(&fdb->obj);
2404 if (err)
2405 break;
2406 }
2407 } while (!is_broadcast_ether_addr(addr.mac));
2408
2409 return err;
2410 }
2411
2412 static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
2413 struct switchdev_obj_port_fdb *fdb,
2414 int (*cb)(struct switchdev_obj *obj))
2415 {
2416 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2417 struct mv88e6xxx_vtu_stu_entry vlan = {
2418 .vid = GLOBAL_VTU_VID_MASK, /* all ones */
2419 };
2420 u16 fid;
2421 int err;
2422
2423 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
2424 return -EOPNOTSUPP;
2425
2426 mutex_lock(&ps->smi_mutex);
2427
2428 /* Dump port's default Filtering Information Database (VLAN ID 0) */
2429 err = _mv88e6xxx_port_fid_get(ps, port, &fid);
2430 if (err)
2431 goto unlock;
2432
2433 err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
2434 if (err)
2435 goto unlock;
2436
2437 /* Dump VLANs' Filtering Information Databases */
2438 err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
2439 if (err)
2440 goto unlock;
2441
2442 do {
2443 err = _mv88e6xxx_vtu_getnext(ps, &vlan);
2444 if (err)
2445 break;
2446
2447 if (!vlan.valid)
2448 break;
2449
2450 err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
2451 fdb, cb);
2452 if (err)
2453 break;
2454 } while (vlan.vid < GLOBAL_VTU_VID_MASK);
2455
2456 unlock:
2457 mutex_unlock(&ps->smi_mutex);
2458
2459 return err;
2460 }
2461
2462 static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2463 struct net_device *bridge)
2464 {
2465 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2466 int i, err = 0;
2467
2468 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
2469 return -EOPNOTSUPP;
2470
2471 mutex_lock(&ps->smi_mutex);
2472
2473 /* Assign the bridge and remap each port's VLANTable */
2474 ps->ports[port].bridge_dev = bridge;
2475
2476 for (i = 0; i < ps->info->num_ports; ++i) {
2477 if (ps->ports[i].bridge_dev == bridge) {
2478 err = _mv88e6xxx_port_based_vlan_map(ps, i);
2479 if (err)
2480 break;
2481 }
2482 }
2483
2484 mutex_unlock(&ps->smi_mutex);
2485
2486 return err;
2487 }
2488
2489 static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2490 {
2491 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2492 struct net_device *bridge = ps->ports[port].bridge_dev;
2493 int i;
2494
2495 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
2496 return;
2497
2498 mutex_lock(&ps->smi_mutex);
2499
2500 /* Unassign the bridge and remap each port's VLANTable */
2501 ps->ports[port].bridge_dev = NULL;
2502
2503 for (i = 0; i < ps->info->num_ports; ++i)
2504 if (i == port || ps->ports[i].bridge_dev == bridge)
2505 if (_mv88e6xxx_port_based_vlan_map(ps, i))
2506 netdev_warn(ds->ports[i], "failed to remap\n");
2507
2508 mutex_unlock(&ps->smi_mutex);
2509 }
2510
2511 static void mv88e6xxx_bridge_work(struct work_struct *work)
2512 {
2513 struct mv88e6xxx_priv_state *ps;
2514 struct dsa_switch *ds;
2515 int port;
2516
2517 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
2518 ds = ps->ds;
2519
2520 mutex_lock(&ps->smi_mutex);
2521
2522 for (port = 0; port < ps->info->num_ports; ++port)
2523 if (test_and_clear_bit(port, ps->port_state_update_mask) &&
2524 _mv88e6xxx_port_state(ps, port, ps->ports[port].state))
2525 netdev_warn(ds->ports[port],
2526 "failed to update state to %s\n",
2527 mv88e6xxx_port_state_names[ps->ports[port].state]);
2528
2529 mutex_unlock(&ps->smi_mutex);
2530 }
2531
2532 static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
2533 int port, int page, int reg, int val)
2534 {
2535 int ret;
2536
2537 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2538 if (ret < 0)
2539 goto restore_page_0;
2540
2541 ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
2542 restore_page_0:
2543 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2544
2545 return ret;
2546 }
2547
2548 static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
2549 int port, int page, int reg)
2550 {
2551 int ret;
2552
2553 ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2554 if (ret < 0)
2555 goto restore_page_0;
2556
2557 ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
2558 restore_page_0:
2559 _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2560
2561 return ret;
2562 }
2563
2564 static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
2565 {
2566 bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE);
2567 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2568 struct gpio_desc *gpiod = ps->ds->pd->reset;
2569 unsigned long timeout;
2570 int ret;
2571 int i;
2572
2573 /* Set all ports to the disabled state. */
2574 for (i = 0; i < ps->info->num_ports; i++) {
2575 ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
2576 if (ret < 0)
2577 return ret;
2578
2579 ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
2580 ret & 0xfffc);
2581 if (ret)
2582 return ret;
2583 }
2584
2585 /* Wait for transmit queues to drain. */
2586 usleep_range(2000, 4000);
2587
2588 /* If there is a gpio connected to the reset pin, toggle it */
2589 if (gpiod) {
2590 gpiod_set_value_cansleep(gpiod, 1);
2591 usleep_range(10000, 20000);
2592 gpiod_set_value_cansleep(gpiod, 0);
2593 usleep_range(10000, 20000);
2594 }
2595
2596 /* Reset the switch. Keep the PPU active if requested. The PPU
2597 * needs to be active to support indirect phy register access
2598 * through global registers 0x18 and 0x19.
2599 */
2600 if (ppu_active)
2601 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
2602 else
2603 ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
2604 if (ret)
2605 return ret;
2606
2607 /* Wait up to one second for reset to complete. */
2608 timeout = jiffies + 1 * HZ;
2609 while (time_before(jiffies, timeout)) {
2610 ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
2611 if (ret < 0)
2612 return ret;
2613
2614 if ((ret & is_reset) == is_reset)
2615 break;
2616 usleep_range(1000, 2000);
2617 }
2618 if (time_after(jiffies, timeout))
2619 ret = -ETIMEDOUT;
2620 else
2621 ret = 0;
2622
2623 return ret;
2624 }
2625
2626 static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
2627 {
2628 int ret;
2629
2630 ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2631 MII_BMCR);
2632 if (ret < 0)
2633 return ret;
2634
2635 if (ret & BMCR_PDOWN) {
2636 ret &= ~BMCR_PDOWN;
2637 ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
2638 PAGE_FIBER_SERDES, MII_BMCR,
2639 ret);
2640 }
2641
2642 return ret;
2643 }
2644
2645 static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
2646 {
2647 struct dsa_switch *ds = ps->ds;
2648 int ret;
2649 u16 reg;
2650
2651 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2652 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2653 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2654 mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
2655 /* MAC Forcing register: don't force link, speed,
2656 * duplex or flow control state to any particular
2657 * values on physical ports, but force the CPU port
2658 * and all DSA ports to their maximum bandwidth and
2659 * full duplex.
2660 */
2661 reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
2662 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2663 reg &= ~PORT_PCS_CTRL_UNFORCED;
2664 reg |= PORT_PCS_CTRL_FORCE_LINK |
2665 PORT_PCS_CTRL_LINK_UP |
2666 PORT_PCS_CTRL_DUPLEX_FULL |
2667 PORT_PCS_CTRL_FORCE_DUPLEX;
2668 if (mv88e6xxx_6065_family(ps))
2669 reg |= PORT_PCS_CTRL_100;
2670 else
2671 reg |= PORT_PCS_CTRL_1000;
2672 } else {
2673 reg |= PORT_PCS_CTRL_UNFORCED;
2674 }
2675
2676 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2677 PORT_PCS_CTRL, reg);
2678 if (ret)
2679 return ret;
2680 }
2681
2682 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2683 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2684 * tunneling, determine priority by looking at 802.1p and IP
2685 * priority fields (IP prio has precedence), and set STP state
2686 * to Forwarding.
2687 *
2688 * If this is the CPU link, use DSA or EDSA tagging depending
2689 * on which tagging mode was configured.
2690 *
2691 * If this is a link to another switch, use DSA tagging mode.
2692 *
2693 * If this is the upstream port for this switch, enable
2694 * forwarding of unknown unicasts and multicasts.
2695 */
2696 reg = 0;
2697 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2698 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2699 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2700 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
2701 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2702 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2703 PORT_CONTROL_STATE_FORWARDING;
2704 if (dsa_is_cpu_port(ds, port)) {
2705 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2706 reg |= PORT_CONTROL_DSA_TAG;
2707 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2708 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2709 mv88e6xxx_6320_family(ps)) {
2710 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2711 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2712 else
2713 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2714 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2715 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2716 }
2717
2718 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2719 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2720 mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
2721 mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
2722 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2723 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2724 }
2725 }
2726 if (dsa_is_dsa_port(ds, port)) {
2727 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2728 reg |= PORT_CONTROL_DSA_TAG;
2729 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2730 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2731 mv88e6xxx_6320_family(ps)) {
2732 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2733 }
2734
2735 if (port == dsa_upstream_port(ds))
2736 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2737 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2738 }
2739 if (reg) {
2740 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2741 PORT_CONTROL, reg);
2742 if (ret)
2743 return ret;
2744 }
2745
2746 /* If this port is connected to a SerDes, make sure the SerDes is not
2747 * powered down.
2748 */
2749 if (mv88e6xxx_6352_family(ps)) {
2750 ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
2751 if (ret < 0)
2752 return ret;
2753 ret &= PORT_STATUS_CMODE_MASK;
2754 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2755 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2756 (ret == PORT_STATUS_CMODE_SGMII)) {
2757 ret = mv88e6xxx_power_on_serdes(ps);
2758 if (ret < 0)
2759 return ret;
2760 }
2761 }
2762
2763 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2764 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2765 * untagged frames on this port, do a destination address lookup on all
2766 * received packets as usual, disable ARP mirroring and don't send a
2767 * copy of all transmitted/received frames on this port to the CPU.
2768 */
2769 reg = 0;
2770 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2771 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2772 mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
2773 mv88e6xxx_6185_family(ps))
2774 reg = PORT_CONTROL_2_MAP_DA;
2775
2776 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2777 mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
2778 reg |= PORT_CONTROL_2_JUMBO_10240;
2779
2780 if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
2781 /* Set the upstream port this port should use */
2782 reg |= dsa_upstream_port(ds);
2783 /* enable forwarding of unknown multicast addresses to
2784 * the upstream port
2785 */
2786 if (port == dsa_upstream_port(ds))
2787 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2788 }
2789
2790 reg |= PORT_CONTROL_2_8021Q_DISABLED;
2791
2792 if (reg) {
2793 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2794 PORT_CONTROL_2, reg);
2795 if (ret)
2796 return ret;
2797 }
2798
2799 /* Port Association Vector: when learning source addresses
2800 * of packets, add the address to the address database using
2801 * a port bitmap that has only the bit for this port set and
2802 * the other bits clear.
2803 */
2804 reg = 1 << port;
2805 /* Disable learning for CPU port */
2806 if (dsa_is_cpu_port(ds, port))
2807 reg = 0;
2808
2809 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2810 if (ret)
2811 return ret;
2812
2813 /* Egress rate control 2: disable egress rate control. */
2814 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
2815 0x0000);
2816 if (ret)
2817 return ret;
2818
2819 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2820 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2821 mv88e6xxx_6320_family(ps)) {
2822 /* Do not limit the period of time that this port can
2823 * be paused for by the remote end or the period of
2824 * time that this port can pause the remote end.
2825 */
2826 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2827 PORT_PAUSE_CTRL, 0x0000);
2828 if (ret)
2829 return ret;
2830
2831 /* Port ATU control: disable limiting the number of
2832 * address database entries that this port is allowed
2833 * to use.
2834 */
2835 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2836 PORT_ATU_CONTROL, 0x0000);
2837 /* Priority Override: disable DA, SA and VTU priority
2838 * override.
2839 */
2840 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2841 PORT_PRI_OVERRIDE, 0x0000);
2842 if (ret)
2843 return ret;
2844
2845 /* Port Ethertype: use the Ethertype DSA Ethertype
2846 * value.
2847 */
2848 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2849 PORT_ETH_TYPE, ETH_P_EDSA);
2850 if (ret)
2851 return ret;
2852 /* Tag Remap: use an identity 802.1p prio -> switch
2853 * prio mapping.
2854 */
2855 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2856 PORT_TAG_REGMAP_0123, 0x3210);
2857 if (ret)
2858 return ret;
2859
2860 /* Tag Remap 2: use an identity 802.1p prio -> switch
2861 * prio mapping.
2862 */
2863 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2864 PORT_TAG_REGMAP_4567, 0x7654);
2865 if (ret)
2866 return ret;
2867 }
2868
2869 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
2870 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
2871 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
2872 mv88e6xxx_6320_family(ps)) {
2873 /* Rate Control: disable ingress rate limiting. */
2874 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2875 PORT_RATE_CONTROL, 0x0001);
2876 if (ret)
2877 return ret;
2878 }
2879
2880 /* Port Control 1: disable trunking, disable sending
2881 * learning messages to this port.
2882 */
2883 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2884 if (ret)
2885 return ret;
2886
2887 /* Port based VLAN map: give each port the same default address
2888 * database, and allow bidirectional communication between the
2889 * CPU and DSA port(s), and the other ports.
2890 */
2891 ret = _mv88e6xxx_port_fid_set(ps, port, 0);
2892 if (ret)
2893 return ret;
2894
2895 ret = _mv88e6xxx_port_based_vlan_map(ps, port);
2896 if (ret)
2897 return ret;
2898
2899 /* Default VLAN ID and priority: don't set a default VLAN
2900 * ID, and set the default packet priority to zero.
2901 */
2902 ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
2903 0x0000);
2904 if (ret)
2905 return ret;
2906
2907 return 0;
2908 }
2909
2910 static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
2911 {
2912 struct dsa_switch *ds = ps->ds;
2913 u32 upstream_port = dsa_upstream_port(ds);
2914 u16 reg;
2915 int err;
2916 int i;
2917
2918 /* Enable the PHY Polling Unit if present, don't discard any packets,
2919 * and mask all interrupt sources.
2920 */
2921 reg = 0;
2922 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) ||
2923 mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE))
2924 reg |= GLOBAL_CONTROL_PPU_ENABLE;
2925
2926 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg);
2927 if (err)
2928 return err;
2929
2930 /* Configure the upstream port, and configure it as the port to which
2931 * ingress and egress and ARP monitor frames are to be sent.
2932 */
2933 reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
2934 upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
2935 upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
2936 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
2937 if (err)
2938 return err;
2939
2940 /* Disable remote management, and set the switch's DSA device number. */
2941 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2,
2942 GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
2943 (ds->index & 0x1f));
2944 if (err)
2945 return err;
2946
2947 /* Set the default address aging time to 5 minutes, and
2948 * enable address learn messages to be sent to all message
2949 * ports.
2950 */
2951 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
2952 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2953 if (err)
2954 return err;
2955
2956 /* Configure the IP ToS mapping registers. */
2957 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2958 if (err)
2959 return err;
2960 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2961 if (err)
2962 return err;
2963 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2964 if (err)
2965 return err;
2966 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2967 if (err)
2968 return err;
2969 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2970 if (err)
2971 return err;
2972 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2973 if (err)
2974 return err;
2975 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2976 if (err)
2977 return err;
2978 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2979 if (err)
2980 return err;
2981
2982 /* Configure the IEEE 802.1p priority mapping register. */
2983 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2984 if (err)
2985 return err;
2986
2987 /* Send all frames with destination addresses matching
2988 * 01:80:c2:00:00:0x to the CPU port.
2989 */
2990 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2991 if (err)
2992 return err;
2993
2994 /* Ignore removed tag data on doubly tagged packets, disable
2995 * flow control messages, force flow control priority to the
2996 * highest, and send all special multicast frames to the CPU
2997 * port at the highest priority.
2998 */
2999 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
3000 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
3001 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
3002 if (err)
3003 return err;
3004
3005 /* Program the DSA routing table. */
3006 for (i = 0; i < 32; i++) {
3007 int nexthop = 0x1f;
3008
3009 if (ps->ds->pd->rtable &&
3010 i != ps->ds->index && i < ps->ds->dst->pd->nr_chips)
3011 nexthop = ps->ds->pd->rtable[i] & 0x1f;
3012
3013 err = _mv88e6xxx_reg_write(
3014 ps, REG_GLOBAL2,
3015 GLOBAL2_DEVICE_MAPPING,
3016 GLOBAL2_DEVICE_MAPPING_UPDATE |
3017 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
3018 if (err)
3019 return err;
3020 }
3021
3022 /* Clear all trunk masks. */
3023 for (i = 0; i < 8; i++) {
3024 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
3025 0x8000 |
3026 (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
3027 ((1 << ps->info->num_ports) - 1));
3028 if (err)
3029 return err;
3030 }
3031
3032 /* Clear all trunk mappings. */
3033 for (i = 0; i < 16; i++) {
3034 err = _mv88e6xxx_reg_write(
3035 ps, REG_GLOBAL2,
3036 GLOBAL2_TRUNK_MAPPING,
3037 GLOBAL2_TRUNK_MAPPING_UPDATE |
3038 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
3039 if (err)
3040 return err;
3041 }
3042
3043 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
3044 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
3045 mv88e6xxx_6320_family(ps)) {
3046 /* Send all frames with destination addresses matching
3047 * 01:80:c2:00:00:2x to the CPU port.
3048 */
3049 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3050 GLOBAL2_MGMT_EN_2X, 0xffff);
3051 if (err)
3052 return err;
3053
3054 /* Initialise cross-chip port VLAN table to reset
3055 * defaults.
3056 */
3057 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3058 GLOBAL2_PVT_ADDR, 0x9000);
3059 if (err)
3060 return err;
3061
3062 /* Clear the priority override table. */
3063 for (i = 0; i < 16; i++) {
3064 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3065 GLOBAL2_PRIO_OVERRIDE,
3066 0x8000 | (i << 8));
3067 if (err)
3068 return err;
3069 }
3070 }
3071
3072 if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
3073 mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
3074 mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
3075 mv88e6xxx_6320_family(ps)) {
3076 /* Disable ingress rate limiting by resetting all
3077 * ingress rate limit registers to their initial
3078 * state.
3079 */
3080 for (i = 0; i < ps->info->num_ports; i++) {
3081 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
3082 GLOBAL2_INGRESS_OP,
3083 0x9000 | (i << 8));
3084 if (err)
3085 return err;
3086 }
3087 }
3088
3089 /* Clear the statistics counters for all ports */
3090 err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
3091 GLOBAL_STATS_OP_FLUSH_ALL);
3092 if (err)
3093 return err;
3094
3095 /* Wait for the flush to complete. */
3096 err = _mv88e6xxx_stats_wait(ps);
3097 if (err)
3098 return err;
3099
3100 /* Clear all ATU entries */
3101 err = _mv88e6xxx_atu_flush(ps, 0, true);
3102 if (err)
3103 return err;
3104
3105 /* Clear all the VTU and STU entries */
3106 err = _mv88e6xxx_vtu_stu_flush(ps);
3107 if (err < 0)
3108 return err;
3109
3110 return err;
3111 }
3112
3113 static int mv88e6xxx_setup(struct dsa_switch *ds)
3114 {
3115 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3116 int err;
3117 int i;
3118
3119 ps->ds = ds;
3120
3121 mutex_init(&ps->smi_mutex);
3122
3123 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
3124
3125 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
3126 mutex_init(&ps->eeprom_mutex);
3127
3128 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3129 mv88e6xxx_ppu_state_init(ps);
3130
3131 mutex_lock(&ps->smi_mutex);
3132
3133 err = mv88e6xxx_switch_reset(ps);
3134 if (err)
3135 goto unlock;
3136
3137 err = mv88e6xxx_setup_global(ps);
3138 if (err)
3139 goto unlock;
3140
3141 for (i = 0; i < ps->info->num_ports; i++) {
3142 err = mv88e6xxx_setup_port(ps, i);
3143 if (err)
3144 goto unlock;
3145 }
3146
3147 unlock:
3148 mutex_unlock(&ps->smi_mutex);
3149
3150 return err;
3151 }
3152
3153 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
3154 {
3155 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3156 int ret;
3157
3158 mutex_lock(&ps->smi_mutex);
3159 ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
3160 mutex_unlock(&ps->smi_mutex);
3161
3162 return ret;
3163 }
3164
3165 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
3166 int reg, int val)
3167 {
3168 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3169 int ret;
3170
3171 mutex_lock(&ps->smi_mutex);
3172 ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
3173 mutex_unlock(&ps->smi_mutex);
3174
3175 return ret;
3176 }
3177
3178 static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
3179 int port)
3180 {
3181 if (port >= 0 && port < ps->info->num_ports)
3182 return port;
3183 return -EINVAL;
3184 }
3185
3186 static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
3187 {
3188 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3189 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3190 int ret;
3191
3192 if (addr < 0)
3193 return 0xffff;
3194
3195 mutex_lock(&ps->smi_mutex);
3196
3197 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3198 ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
3199 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3200 ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
3201 else
3202 ret = _mv88e6xxx_phy_read(ps, addr, regnum);
3203
3204 mutex_unlock(&ps->smi_mutex);
3205 return ret;
3206 }
3207
3208 static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
3209 u16 val)
3210 {
3211 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3212 int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3213 int ret;
3214
3215 if (addr < 0)
3216 return 0xffff;
3217
3218 mutex_lock(&ps->smi_mutex);
3219
3220 if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
3221 ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
3222 else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
3223 ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
3224 else
3225 ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
3226
3227 mutex_unlock(&ps->smi_mutex);
3228 return ret;
3229 }
3230
3231 #ifdef CONFIG_NET_DSA_HWMON
3232
3233 static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
3234 {
3235 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3236 int ret;
3237 int val;
3238
3239 *temp = 0;
3240
3241 mutex_lock(&ps->smi_mutex);
3242
3243 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
3244 if (ret < 0)
3245 goto error;
3246
3247 /* Enable temperature sensor */
3248 ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3249 if (ret < 0)
3250 goto error;
3251
3252 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
3253 if (ret < 0)
3254 goto error;
3255
3256 /* Wait for temperature to stabilize */
3257 usleep_range(10000, 12000);
3258
3259 val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3260 if (val < 0) {
3261 ret = val;
3262 goto error;
3263 }
3264
3265 /* Disable temperature sensor */
3266 ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
3267 if (ret < 0)
3268 goto error;
3269
3270 *temp = ((val & 0x1f) - 5) * 5;
3271
3272 error:
3273 _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
3274 mutex_unlock(&ps->smi_mutex);
3275 return ret;
3276 }
3277
3278 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
3279 {
3280 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3281 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3282 int ret;
3283
3284 *temp = 0;
3285
3286 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
3287 if (ret < 0)
3288 return ret;
3289
3290 *temp = (ret & 0xff) - 25;
3291
3292 return 0;
3293 }
3294
3295 static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
3296 {
3297 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3298
3299 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
3300 return -EOPNOTSUPP;
3301
3302 if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
3303 return mv88e63xx_get_temp(ds, temp);
3304
3305 return mv88e61xx_get_temp(ds, temp);
3306 }
3307
3308 static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
3309 {
3310 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3311 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3312 int ret;
3313
3314 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3315 return -EOPNOTSUPP;
3316
3317 *temp = 0;
3318
3319 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3320 if (ret < 0)
3321 return ret;
3322
3323 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
3324
3325 return 0;
3326 }
3327
3328 static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
3329 {
3330 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3331 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3332 int ret;
3333
3334 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3335 return -EOPNOTSUPP;
3336
3337 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3338 if (ret < 0)
3339 return ret;
3340 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
3341 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
3342 (ret & 0xe0ff) | (temp << 8));
3343 }
3344
3345 static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
3346 {
3347 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3348 int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3349 int ret;
3350
3351 if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
3352 return -EOPNOTSUPP;
3353
3354 *alarm = false;
3355
3356 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
3357 if (ret < 0)
3358 return ret;
3359
3360 *alarm = !!(ret & 0x40);
3361
3362 return 0;
3363 }
3364 #endif /* CONFIG_NET_DSA_HWMON */
3365
3366 static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3367 [MV88E6085] = {
3368 .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
3369 .family = MV88E6XXX_FAMILY_6097,
3370 .name = "Marvell 88E6085",
3371 .num_databases = 4096,
3372 .num_ports = 10,
3373 .flags = MV88E6XXX_FLAGS_FAMILY_6097,
3374 },
3375
3376 [MV88E6095] = {
3377 .prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
3378 .family = MV88E6XXX_FAMILY_6095,
3379 .name = "Marvell 88E6095/88E6095F",
3380 .num_databases = 256,
3381 .num_ports = 11,
3382 .flags = MV88E6XXX_FLAGS_FAMILY_6095,
3383 },
3384
3385 [MV88E6123] = {
3386 .prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
3387 .family = MV88E6XXX_FAMILY_6165,
3388 .name = "Marvell 88E6123",
3389 .num_databases = 4096,
3390 .num_ports = 3,
3391 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3392 },
3393
3394 [MV88E6131] = {
3395 .prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
3396 .family = MV88E6XXX_FAMILY_6185,
3397 .name = "Marvell 88E6131",
3398 .num_databases = 256,
3399 .num_ports = 8,
3400 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
3401 },
3402
3403 [MV88E6161] = {
3404 .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
3405 .family = MV88E6XXX_FAMILY_6165,
3406 .name = "Marvell 88E6161",
3407 .num_databases = 4096,
3408 .num_ports = 6,
3409 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3410 },
3411
3412 [MV88E6165] = {
3413 .prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
3414 .family = MV88E6XXX_FAMILY_6165,
3415 .name = "Marvell 88E6165",
3416 .num_databases = 4096,
3417 .num_ports = 6,
3418 .flags = MV88E6XXX_FLAGS_FAMILY_6165,
3419 },
3420
3421 [MV88E6171] = {
3422 .prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
3423 .family = MV88E6XXX_FAMILY_6351,
3424 .name = "Marvell 88E6171",
3425 .num_databases = 4096,
3426 .num_ports = 7,
3427 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3428 },
3429
3430 [MV88E6172] = {
3431 .prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
3432 .family = MV88E6XXX_FAMILY_6352,
3433 .name = "Marvell 88E6172",
3434 .num_databases = 4096,
3435 .num_ports = 7,
3436 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3437 },
3438
3439 [MV88E6175] = {
3440 .prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
3441 .family = MV88E6XXX_FAMILY_6351,
3442 .name = "Marvell 88E6175",
3443 .num_databases = 4096,
3444 .num_ports = 7,
3445 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3446 },
3447
3448 [MV88E6176] = {
3449 .prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
3450 .family = MV88E6XXX_FAMILY_6352,
3451 .name = "Marvell 88E6176",
3452 .num_databases = 4096,
3453 .num_ports = 7,
3454 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3455 },
3456
3457 [MV88E6185] = {
3458 .prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
3459 .family = MV88E6XXX_FAMILY_6185,
3460 .name = "Marvell 88E6185",
3461 .num_databases = 256,
3462 .num_ports = 10,
3463 .flags = MV88E6XXX_FLAGS_FAMILY_6185,
3464 },
3465
3466 [MV88E6240] = {
3467 .prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
3468 .family = MV88E6XXX_FAMILY_6352,
3469 .name = "Marvell 88E6240",
3470 .num_databases = 4096,
3471 .num_ports = 7,
3472 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3473 },
3474
3475 [MV88E6320] = {
3476 .prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
3477 .family = MV88E6XXX_FAMILY_6320,
3478 .name = "Marvell 88E6320",
3479 .num_databases = 4096,
3480 .num_ports = 7,
3481 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
3482 },
3483
3484 [MV88E6321] = {
3485 .prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
3486 .family = MV88E6XXX_FAMILY_6320,
3487 .name = "Marvell 88E6321",
3488 .num_databases = 4096,
3489 .num_ports = 7,
3490 .flags = MV88E6XXX_FLAGS_FAMILY_6320,
3491 },
3492
3493 [MV88E6350] = {
3494 .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
3495 .family = MV88E6XXX_FAMILY_6351,
3496 .name = "Marvell 88E6350",
3497 .num_databases = 4096,
3498 .num_ports = 7,
3499 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3500 },
3501
3502 [MV88E6351] = {
3503 .prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
3504 .family = MV88E6XXX_FAMILY_6351,
3505 .name = "Marvell 88E6351",
3506 .num_databases = 4096,
3507 .num_ports = 7,
3508 .flags = MV88E6XXX_FLAGS_FAMILY_6351,
3509 },
3510
3511 [MV88E6352] = {
3512 .prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
3513 .family = MV88E6XXX_FAMILY_6352,
3514 .name = "Marvell 88E6352",
3515 .num_databases = 4096,
3516 .num_ports = 7,
3517 .flags = MV88E6XXX_FLAGS_FAMILY_6352,
3518 },
3519 };
3520
3521 static const struct mv88e6xxx_info *
3522 mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
3523 unsigned int num)
3524 {
3525 int i;
3526
3527 for (i = 0; i < num; ++i)
3528 if (table[i].prod_num == prod_num)
3529 return &table[i];
3530
3531 return NULL;
3532 }
3533
3534 static const char *mv88e6xxx_probe(struct device *dsa_dev,
3535 struct device *host_dev, int sw_addr,
3536 void **priv)
3537 {
3538 const struct mv88e6xxx_info *info;
3539 struct mv88e6xxx_priv_state *ps;
3540 struct mii_bus *bus;
3541 const char *name;
3542 int id, prod_num, rev;
3543
3544 bus = dsa_host_dev_to_mii_bus(host_dev);
3545 if (!bus)
3546 return NULL;
3547
3548 id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
3549 if (id < 0)
3550 return NULL;
3551
3552 prod_num = (id & 0xfff0) >> 4;
3553 rev = id & 0x000f;
3554
3555 info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
3556 ARRAY_SIZE(mv88e6xxx_table));
3557 if (!info)
3558 return NULL;
3559
3560 name = info->name;
3561
3562 ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
3563 if (!ps)
3564 return NULL;
3565
3566 ps->bus = bus;
3567 ps->sw_addr = sw_addr;
3568 ps->info = info;
3569
3570 *priv = ps;
3571
3572 dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
3573 prod_num, name, rev);
3574
3575 return name;
3576 }
3577
3578 struct dsa_switch_driver mv88e6xxx_switch_driver = {
3579 .tag_protocol = DSA_TAG_PROTO_EDSA,
3580 .probe = mv88e6xxx_probe,
3581 .setup = mv88e6xxx_setup,
3582 .set_addr = mv88e6xxx_set_addr,
3583 .phy_read = mv88e6xxx_phy_read,
3584 .phy_write = mv88e6xxx_phy_write,
3585 .adjust_link = mv88e6xxx_adjust_link,
3586 .get_strings = mv88e6xxx_get_strings,
3587 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
3588 .get_sset_count = mv88e6xxx_get_sset_count,
3589 .set_eee = mv88e6xxx_set_eee,
3590 .get_eee = mv88e6xxx_get_eee,
3591 #ifdef CONFIG_NET_DSA_HWMON
3592 .get_temp = mv88e6xxx_get_temp,
3593 .get_temp_limit = mv88e6xxx_get_temp_limit,
3594 .set_temp_limit = mv88e6xxx_set_temp_limit,
3595 .get_temp_alarm = mv88e6xxx_get_temp_alarm,
3596 #endif
3597 .get_eeprom = mv88e6xxx_get_eeprom,
3598 .set_eeprom = mv88e6xxx_set_eeprom,
3599 .get_regs_len = mv88e6xxx_get_regs_len,
3600 .get_regs = mv88e6xxx_get_regs,
3601 .port_bridge_join = mv88e6xxx_port_bridge_join,
3602 .port_bridge_leave = mv88e6xxx_port_bridge_leave,
3603 .port_stp_state_set = mv88e6xxx_port_stp_state_set,
3604 .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
3605 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
3606 .port_vlan_add = mv88e6xxx_port_vlan_add,
3607 .port_vlan_del = mv88e6xxx_port_vlan_del,
3608 .port_vlan_dump = mv88e6xxx_port_vlan_dump,
3609 .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
3610 .port_fdb_add = mv88e6xxx_port_fdb_add,
3611 .port_fdb_del = mv88e6xxx_port_fdb_del,
3612 .port_fdb_dump = mv88e6xxx_port_fdb_dump,
3613 };
3614
3615 static int __init mv88e6xxx_init(void)
3616 {
3617 register_switch_driver(&mv88e6xxx_switch_driver);
3618
3619 return 0;
3620 }
3621 module_init(mv88e6xxx_init);
3622
3623 static void __exit mv88e6xxx_cleanup(void)
3624 {
3625 unregister_switch_driver(&mv88e6xxx_switch_driver);
3626 }
3627 module_exit(mv88e6xxx_cleanup);
3628
3629 MODULE_ALIAS("platform:mv88e6085");
3630 MODULE_ALIAS("platform:mv88e6095");
3631 MODULE_ALIAS("platform:mv88e6095f");
3632 MODULE_ALIAS("platform:mv88e6123");
3633 MODULE_ALIAS("platform:mv88e6131");
3634 MODULE_ALIAS("platform:mv88e6161");
3635 MODULE_ALIAS("platform:mv88e6165");
3636 MODULE_ALIAS("platform:mv88e6171");
3637 MODULE_ALIAS("platform:mv88e6172");
3638 MODULE_ALIAS("platform:mv88e6175");
3639 MODULE_ALIAS("platform:mv88e6176");
3640 MODULE_ALIAS("platform:mv88e6320");
3641 MODULE_ALIAS("platform:mv88e6321");
3642 MODULE_ALIAS("platform:mv88e6350");
3643 MODULE_ALIAS("platform:mv88e6351");
3644 MODULE_ALIAS("platform:mv88e6352");
3645 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3646 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3647 MODULE_LICENSE("GPL");
This page took 0.106274 seconds and 4 git commands to generate.