2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/phy.h>
25 #include <net/switchdev.h>
26 #include "mv88e6xxx.h"
28 static void assert_smi_lock(struct mv88e6xxx_priv_state
*ps
)
30 if (unlikely(!mutex_is_locked(&ps
->smi_mutex
))) {
31 dev_err(ps
->dev
, "SMI lock not held!\n");
36 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
37 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
38 * will be directly accessible on some {device address,register address}
39 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
40 * will only respond to SMI transactions to that specific address, and
41 * an indirect addressing mechanism needs to be used to access its
44 static int mv88e6xxx_reg_wait_ready(struct mii_bus
*bus
, int sw_addr
)
49 for (i
= 0; i
< 16; i
++) {
50 ret
= mdiobus_read_nested(bus
, sw_addr
, SMI_CMD
);
54 if ((ret
& SMI_CMD_BUSY
) == 0)
61 static int __mv88e6xxx_reg_read(struct mii_bus
*bus
, int sw_addr
, int addr
,
67 return mdiobus_read_nested(bus
, addr
, reg
);
69 /* Wait for the bus to become free. */
70 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
74 /* Transmit the read command. */
75 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_CMD
,
76 SMI_CMD_OP_22_READ
| (addr
<< 5) | reg
);
80 /* Wait for the read command to complete. */
81 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
86 ret
= mdiobus_read_nested(bus
, sw_addr
, SMI_DATA
);
93 static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state
*ps
,
100 ret
= __mv88e6xxx_reg_read(ps
->bus
, ps
->sw_addr
, addr
, reg
);
104 dev_dbg(ps
->dev
, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
110 int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state
*ps
, int addr
, int reg
)
114 mutex_lock(&ps
->smi_mutex
);
115 ret
= _mv88e6xxx_reg_read(ps
, addr
, reg
);
116 mutex_unlock(&ps
->smi_mutex
);
121 static int __mv88e6xxx_reg_write(struct mii_bus
*bus
, int sw_addr
, int addr
,
127 return mdiobus_write_nested(bus
, addr
, reg
, val
);
129 /* Wait for the bus to become free. */
130 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
134 /* Transmit the data to write. */
135 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_DATA
, val
);
139 /* Transmit the write command. */
140 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_CMD
,
141 SMI_CMD_OP_22_WRITE
| (addr
<< 5) | reg
);
145 /* Wait for the write command to complete. */
146 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
153 static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state
*ps
, int addr
,
158 dev_dbg(ps
->dev
, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
161 return __mv88e6xxx_reg_write(ps
->bus
, ps
->sw_addr
, addr
, reg
, val
);
164 int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state
*ps
, int addr
,
169 mutex_lock(&ps
->smi_mutex
);
170 ret
= _mv88e6xxx_reg_write(ps
, addr
, reg
, val
);
171 mutex_unlock(&ps
->smi_mutex
);
176 static int mv88e6xxx_set_addr_direct(struct dsa_switch
*ds
, u8
*addr
)
178 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
181 err
= mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_MAC_01
,
182 (addr
[0] << 8) | addr
[1]);
186 err
= mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_MAC_23
,
187 (addr
[2] << 8) | addr
[3]);
191 return mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_MAC_45
,
192 (addr
[4] << 8) | addr
[5]);
195 static int mv88e6xxx_set_addr_indirect(struct dsa_switch
*ds
, u8
*addr
)
197 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
201 for (i
= 0; i
< 6; i
++) {
204 /* Write the MAC address byte. */
205 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SWITCH_MAC
,
206 GLOBAL2_SWITCH_MAC_BUSY
|
211 /* Wait for the write to complete. */
212 for (j
= 0; j
< 16; j
++) {
213 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL2
,
218 if ((ret
& GLOBAL2_SWITCH_MAC_BUSY
) == 0)
228 int mv88e6xxx_set_addr(struct dsa_switch
*ds
, u8
*addr
)
230 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
232 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_SWITCH_MAC
))
233 return mv88e6xxx_set_addr_indirect(ds
, addr
);
235 return mv88e6xxx_set_addr_direct(ds
, addr
);
238 static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state
*ps
, int addr
,
242 return _mv88e6xxx_reg_read(ps
, addr
, regnum
);
246 static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state
*ps
, int addr
,
250 return _mv88e6xxx_reg_write(ps
, addr
, regnum
, val
);
254 static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state
*ps
)
257 unsigned long timeout
;
259 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_CONTROL
);
263 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_CONTROL
,
264 ret
& ~GLOBAL_CONTROL_PPU_ENABLE
);
268 timeout
= jiffies
+ 1 * HZ
;
269 while (time_before(jiffies
, timeout
)) {
270 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATUS
);
274 usleep_range(1000, 2000);
275 if ((ret
& GLOBAL_STATUS_PPU_MASK
) !=
276 GLOBAL_STATUS_PPU_POLLING
)
283 static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state
*ps
)
286 unsigned long timeout
;
288 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_CONTROL
);
292 err
= mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_CONTROL
,
293 ret
| GLOBAL_CONTROL_PPU_ENABLE
);
297 timeout
= jiffies
+ 1 * HZ
;
298 while (time_before(jiffies
, timeout
)) {
299 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATUS
);
303 usleep_range(1000, 2000);
304 if ((ret
& GLOBAL_STATUS_PPU_MASK
) ==
305 GLOBAL_STATUS_PPU_POLLING
)
312 static void mv88e6xxx_ppu_reenable_work(struct work_struct
*ugly
)
314 struct mv88e6xxx_priv_state
*ps
;
316 ps
= container_of(ugly
, struct mv88e6xxx_priv_state
, ppu_work
);
317 if (mutex_trylock(&ps
->ppu_mutex
)) {
318 if (mv88e6xxx_ppu_enable(ps
) == 0)
319 ps
->ppu_disabled
= 0;
320 mutex_unlock(&ps
->ppu_mutex
);
324 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps
)
326 struct mv88e6xxx_priv_state
*ps
= (void *)_ps
;
328 schedule_work(&ps
->ppu_work
);
331 static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state
*ps
)
335 mutex_lock(&ps
->ppu_mutex
);
337 /* If the PHY polling unit is enabled, disable it so that
338 * we can access the PHY registers. If it was already
339 * disabled, cancel the timer that is going to re-enable
342 if (!ps
->ppu_disabled
) {
343 ret
= mv88e6xxx_ppu_disable(ps
);
345 mutex_unlock(&ps
->ppu_mutex
);
348 ps
->ppu_disabled
= 1;
350 del_timer(&ps
->ppu_timer
);
357 static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state
*ps
)
359 /* Schedule a timer to re-enable the PHY polling unit. */
360 mod_timer(&ps
->ppu_timer
, jiffies
+ msecs_to_jiffies(10));
361 mutex_unlock(&ps
->ppu_mutex
);
364 void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state
*ps
)
366 mutex_init(&ps
->ppu_mutex
);
367 INIT_WORK(&ps
->ppu_work
, mv88e6xxx_ppu_reenable_work
);
368 init_timer(&ps
->ppu_timer
);
369 ps
->ppu_timer
.data
= (unsigned long)ps
;
370 ps
->ppu_timer
.function
= mv88e6xxx_ppu_reenable_timer
;
373 static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state
*ps
, int addr
,
378 ret
= mv88e6xxx_ppu_access_get(ps
);
380 ret
= _mv88e6xxx_reg_read(ps
, addr
, regnum
);
381 mv88e6xxx_ppu_access_put(ps
);
387 static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state
*ps
, int addr
,
392 ret
= mv88e6xxx_ppu_access_get(ps
);
394 ret
= _mv88e6xxx_reg_write(ps
, addr
, regnum
, val
);
395 mv88e6xxx_ppu_access_put(ps
);
401 static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state
*ps
)
403 return ps
->info
->family
== MV88E6XXX_FAMILY_6065
;
406 static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state
*ps
)
408 return ps
->info
->family
== MV88E6XXX_FAMILY_6095
;
411 static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state
*ps
)
413 return ps
->info
->family
== MV88E6XXX_FAMILY_6097
;
416 static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state
*ps
)
418 return ps
->info
->family
== MV88E6XXX_FAMILY_6165
;
421 static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state
*ps
)
423 return ps
->info
->family
== MV88E6XXX_FAMILY_6185
;
426 static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state
*ps
)
428 return ps
->info
->family
== MV88E6XXX_FAMILY_6320
;
431 static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state
*ps
)
433 return ps
->info
->family
== MV88E6XXX_FAMILY_6351
;
436 static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state
*ps
)
438 return ps
->info
->family
== MV88E6XXX_FAMILY_6352
;
441 static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state
*ps
)
443 return ps
->info
->num_databases
;
446 static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state
*ps
)
448 /* Does the device have dedicated FID registers for ATU and VTU ops? */
449 if (mv88e6xxx_6097_family(ps
) || mv88e6xxx_6165_family(ps
) ||
450 mv88e6xxx_6351_family(ps
) || mv88e6xxx_6352_family(ps
))
456 static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state
*ps
)
458 /* Does the device have STU and dedicated SID registers for VTU ops? */
459 if (mv88e6xxx_6097_family(ps
) || mv88e6xxx_6165_family(ps
) ||
460 mv88e6xxx_6351_family(ps
) || mv88e6xxx_6352_family(ps
))
466 /* We expect the switch to perform auto negotiation if there is a real
467 * phy. However, in the case of a fixed link phy, we force the port
468 * settings from the fixed link settings.
470 static void mv88e6xxx_adjust_link(struct dsa_switch
*ds
, int port
,
471 struct phy_device
*phydev
)
473 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
477 if (!phy_is_pseudo_fixed_link(phydev
))
480 mutex_lock(&ps
->smi_mutex
);
482 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_PCS_CTRL
);
486 reg
= ret
& ~(PORT_PCS_CTRL_LINK_UP
|
487 PORT_PCS_CTRL_FORCE_LINK
|
488 PORT_PCS_CTRL_DUPLEX_FULL
|
489 PORT_PCS_CTRL_FORCE_DUPLEX
|
490 PORT_PCS_CTRL_UNFORCED
);
492 reg
|= PORT_PCS_CTRL_FORCE_LINK
;
494 reg
|= PORT_PCS_CTRL_LINK_UP
;
496 if (mv88e6xxx_6065_family(ps
) && phydev
->speed
> SPEED_100
)
499 switch (phydev
->speed
) {
501 reg
|= PORT_PCS_CTRL_1000
;
504 reg
|= PORT_PCS_CTRL_100
;
507 reg
|= PORT_PCS_CTRL_10
;
510 pr_info("Unknown speed");
514 reg
|= PORT_PCS_CTRL_FORCE_DUPLEX
;
515 if (phydev
->duplex
== DUPLEX_FULL
)
516 reg
|= PORT_PCS_CTRL_DUPLEX_FULL
;
518 if ((mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
)) &&
519 (port
>= ps
->info
->num_ports
- 2)) {
520 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_RXID
)
521 reg
|= PORT_PCS_CTRL_RGMII_DELAY_RXCLK
;
522 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
523 reg
|= PORT_PCS_CTRL_RGMII_DELAY_TXCLK
;
524 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_ID
)
525 reg
|= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK
|
526 PORT_PCS_CTRL_RGMII_DELAY_TXCLK
);
528 _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_PCS_CTRL
, reg
);
531 mutex_unlock(&ps
->smi_mutex
);
534 static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state
*ps
)
539 for (i
= 0; i
< 10; i
++) {
540 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
);
541 if ((ret
& GLOBAL_STATS_OP_BUSY
) == 0)
548 static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state
*ps
,
553 if (mv88e6xxx_6320_family(ps
) || mv88e6xxx_6352_family(ps
))
554 port
= (port
+ 1) << 5;
556 /* Snapshot the hardware statistics counters for this port. */
557 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
,
558 GLOBAL_STATS_OP_CAPTURE_PORT
|
559 GLOBAL_STATS_OP_HIST_RX_TX
| port
);
563 /* Wait for the snapshotting to complete. */
564 ret
= _mv88e6xxx_stats_wait(ps
);
571 static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state
*ps
,
579 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
,
580 GLOBAL_STATS_OP_READ_CAPTURED
|
581 GLOBAL_STATS_OP_HIST_RX_TX
| stat
);
585 ret
= _mv88e6xxx_stats_wait(ps
);
589 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_32
);
595 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_01
);
602 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats
[] = {
603 { "in_good_octets", 8, 0x00, BANK0
, },
604 { "in_bad_octets", 4, 0x02, BANK0
, },
605 { "in_unicast", 4, 0x04, BANK0
, },
606 { "in_broadcasts", 4, 0x06, BANK0
, },
607 { "in_multicasts", 4, 0x07, BANK0
, },
608 { "in_pause", 4, 0x16, BANK0
, },
609 { "in_undersize", 4, 0x18, BANK0
, },
610 { "in_fragments", 4, 0x19, BANK0
, },
611 { "in_oversize", 4, 0x1a, BANK0
, },
612 { "in_jabber", 4, 0x1b, BANK0
, },
613 { "in_rx_error", 4, 0x1c, BANK0
, },
614 { "in_fcs_error", 4, 0x1d, BANK0
, },
615 { "out_octets", 8, 0x0e, BANK0
, },
616 { "out_unicast", 4, 0x10, BANK0
, },
617 { "out_broadcasts", 4, 0x13, BANK0
, },
618 { "out_multicasts", 4, 0x12, BANK0
, },
619 { "out_pause", 4, 0x15, BANK0
, },
620 { "excessive", 4, 0x11, BANK0
, },
621 { "collisions", 4, 0x1e, BANK0
, },
622 { "deferred", 4, 0x05, BANK0
, },
623 { "single", 4, 0x14, BANK0
, },
624 { "multiple", 4, 0x17, BANK0
, },
625 { "out_fcs_error", 4, 0x03, BANK0
, },
626 { "late", 4, 0x1f, BANK0
, },
627 { "hist_64bytes", 4, 0x08, BANK0
, },
628 { "hist_65_127bytes", 4, 0x09, BANK0
, },
629 { "hist_128_255bytes", 4, 0x0a, BANK0
, },
630 { "hist_256_511bytes", 4, 0x0b, BANK0
, },
631 { "hist_512_1023bytes", 4, 0x0c, BANK0
, },
632 { "hist_1024_max_bytes", 4, 0x0d, BANK0
, },
633 { "sw_in_discards", 4, 0x10, PORT
, },
634 { "sw_in_filtered", 2, 0x12, PORT
, },
635 { "sw_out_filtered", 2, 0x13, PORT
, },
636 { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
637 { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
638 { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
639 { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
640 { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
641 { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
642 { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
643 { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
644 { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
645 { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
646 { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
647 { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
648 { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
649 { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
650 { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
651 { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
652 { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
653 { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
654 { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
655 { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
656 { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
657 { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
658 { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
659 { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
660 { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
661 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
664 static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state
*ps
,
665 struct mv88e6xxx_hw_stat
*stat
)
667 switch (stat
->type
) {
671 return mv88e6xxx_6320_family(ps
);
673 return mv88e6xxx_6095_family(ps
) ||
674 mv88e6xxx_6185_family(ps
) ||
675 mv88e6xxx_6097_family(ps
) ||
676 mv88e6xxx_6165_family(ps
) ||
677 mv88e6xxx_6351_family(ps
) ||
678 mv88e6xxx_6352_family(ps
);
683 static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state
*ps
,
684 struct mv88e6xxx_hw_stat
*s
,
694 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), s
->reg
);
699 if (s
->sizeof_stat
== 4) {
700 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
),
709 _mv88e6xxx_stats_read(ps
, s
->reg
, &low
);
710 if (s
->sizeof_stat
== 8)
711 _mv88e6xxx_stats_read(ps
, s
->reg
+ 1, &high
);
713 value
= (((u64
)high
) << 16) | low
;
717 static void mv88e6xxx_get_strings(struct dsa_switch
*ds
, int port
,
720 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
721 struct mv88e6xxx_hw_stat
*stat
;
724 for (i
= 0, j
= 0; i
< ARRAY_SIZE(mv88e6xxx_hw_stats
); i
++) {
725 stat
= &mv88e6xxx_hw_stats
[i
];
726 if (mv88e6xxx_has_stat(ps
, stat
)) {
727 memcpy(data
+ j
* ETH_GSTRING_LEN
, stat
->string
,
734 static int mv88e6xxx_get_sset_count(struct dsa_switch
*ds
)
736 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
737 struct mv88e6xxx_hw_stat
*stat
;
740 for (i
= 0, j
= 0; i
< ARRAY_SIZE(mv88e6xxx_hw_stats
); i
++) {
741 stat
= &mv88e6xxx_hw_stats
[i
];
742 if (mv88e6xxx_has_stat(ps
, stat
))
748 static void mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
, int port
,
751 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
752 struct mv88e6xxx_hw_stat
*stat
;
756 mutex_lock(&ps
->smi_mutex
);
758 ret
= _mv88e6xxx_stats_snapshot(ps
, port
);
760 mutex_unlock(&ps
->smi_mutex
);
763 for (i
= 0, j
= 0; i
< ARRAY_SIZE(mv88e6xxx_hw_stats
); i
++) {
764 stat
= &mv88e6xxx_hw_stats
[i
];
765 if (mv88e6xxx_has_stat(ps
, stat
)) {
766 data
[j
] = _mv88e6xxx_get_ethtool_stat(ps
, stat
, port
);
771 mutex_unlock(&ps
->smi_mutex
);
774 static int mv88e6xxx_get_regs_len(struct dsa_switch
*ds
, int port
)
776 return 32 * sizeof(u16
);
779 static void mv88e6xxx_get_regs(struct dsa_switch
*ds
, int port
,
780 struct ethtool_regs
*regs
, void *_p
)
782 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
788 memset(p
, 0xff, 32 * sizeof(u16
));
790 mutex_lock(&ps
->smi_mutex
);
792 for (i
= 0; i
< 32; i
++) {
795 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), i
);
800 mutex_unlock(&ps
->smi_mutex
);
803 static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state
*ps
, int reg
, int offset
,
806 unsigned long timeout
= jiffies
+ HZ
/ 10;
808 while (time_before(jiffies
, timeout
)) {
811 ret
= _mv88e6xxx_reg_read(ps
, reg
, offset
);
817 usleep_range(1000, 2000);
822 static int mv88e6xxx_wait(struct mv88e6xxx_priv_state
*ps
, int reg
,
823 int offset
, u16 mask
)
827 mutex_lock(&ps
->smi_mutex
);
828 ret
= _mv88e6xxx_wait(ps
, reg
, offset
, mask
);
829 mutex_unlock(&ps
->smi_mutex
);
834 static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state
*ps
)
836 return _mv88e6xxx_wait(ps
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
837 GLOBAL2_SMI_OP_BUSY
);
840 static int mv88e6xxx_eeprom_load_wait(struct dsa_switch
*ds
)
842 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
844 return mv88e6xxx_wait(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
845 GLOBAL2_EEPROM_OP_LOAD
);
848 static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch
*ds
)
850 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
852 return mv88e6xxx_wait(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
853 GLOBAL2_EEPROM_OP_BUSY
);
856 static int mv88e6xxx_read_eeprom_word(struct dsa_switch
*ds
, int addr
)
858 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
861 mutex_lock(&ps
->eeprom_mutex
);
863 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
864 GLOBAL2_EEPROM_OP_READ
|
865 (addr
& GLOBAL2_EEPROM_OP_ADDR_MASK
));
869 ret
= mv88e6xxx_eeprom_busy_wait(ds
);
873 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_DATA
);
875 mutex_unlock(&ps
->eeprom_mutex
);
879 static int mv88e6xxx_get_eeprom(struct dsa_switch
*ds
,
880 struct ethtool_eeprom
*eeprom
, u8
*data
)
882 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
887 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEPROM
))
890 offset
= eeprom
->offset
;
894 eeprom
->magic
= 0xc3ec4951;
896 ret
= mv88e6xxx_eeprom_load_wait(ds
);
903 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
907 *data
++ = (word
>> 8) & 0xff;
917 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
921 *data
++ = word
& 0xff;
922 *data
++ = (word
>> 8) & 0xff;
932 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
936 *data
++ = word
& 0xff;
946 static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch
*ds
)
948 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
951 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
);
955 if (!(ret
& GLOBAL2_EEPROM_OP_WRITE_EN
))
961 static int mv88e6xxx_write_eeprom_word(struct dsa_switch
*ds
, int addr
,
964 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
967 mutex_lock(&ps
->eeprom_mutex
);
969 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_DATA
, data
);
973 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
974 GLOBAL2_EEPROM_OP_WRITE
|
975 (addr
& GLOBAL2_EEPROM_OP_ADDR_MASK
));
979 ret
= mv88e6xxx_eeprom_busy_wait(ds
);
981 mutex_unlock(&ps
->eeprom_mutex
);
985 static int mv88e6xxx_set_eeprom(struct dsa_switch
*ds
,
986 struct ethtool_eeprom
*eeprom
, u8
*data
)
988 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
993 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEPROM
))
996 if (eeprom
->magic
!= 0xc3ec4951)
999 ret
= mv88e6xxx_eeprom_is_readonly(ds
);
1003 offset
= eeprom
->offset
;
1007 ret
= mv88e6xxx_eeprom_load_wait(ds
);
1014 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
1018 word
= (*data
++ << 8) | (word
& 0xff);
1020 ret
= mv88e6xxx_write_eeprom_word(ds
, offset
>> 1, word
);
1033 word
|= *data
++ << 8;
1035 ret
= mv88e6xxx_write_eeprom_word(ds
, offset
>> 1, word
);
1047 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
1051 word
= (word
& 0xff00) | *data
++;
1053 ret
= mv88e6xxx_write_eeprom_word(ds
, offset
>> 1, word
);
1065 static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state
*ps
)
1067 return _mv88e6xxx_wait(ps
, REG_GLOBAL
, GLOBAL_ATU_OP
,
1068 GLOBAL_ATU_OP_BUSY
);
1071 static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state
*ps
,
1072 int addr
, int regnum
)
1076 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
1077 GLOBAL2_SMI_OP_22_READ
| (addr
<< 5) |
1082 ret
= _mv88e6xxx_phy_wait(ps
);
1086 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
);
1091 static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state
*ps
,
1092 int addr
, int regnum
, u16 val
)
1096 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
, val
);
1100 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
1101 GLOBAL2_SMI_OP_22_WRITE
| (addr
<< 5) |
1104 return _mv88e6xxx_phy_wait(ps
);
1107 static int mv88e6xxx_get_eee(struct dsa_switch
*ds
, int port
,
1108 struct ethtool_eee
*e
)
1110 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1113 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEE
))
1116 mutex_lock(&ps
->smi_mutex
);
1118 reg
= _mv88e6xxx_phy_read_indirect(ps
, port
, 16);
1122 e
->eee_enabled
= !!(reg
& 0x0200);
1123 e
->tx_lpi_enabled
= !!(reg
& 0x0100);
1125 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_STATUS
);
1129 e
->eee_active
= !!(reg
& PORT_STATUS_EEE
);
1133 mutex_unlock(&ps
->smi_mutex
);
1137 static int mv88e6xxx_set_eee(struct dsa_switch
*ds
, int port
,
1138 struct phy_device
*phydev
, struct ethtool_eee
*e
)
1140 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1144 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEE
))
1147 mutex_lock(&ps
->smi_mutex
);
1149 ret
= _mv88e6xxx_phy_read_indirect(ps
, port
, 16);
1153 reg
= ret
& ~0x0300;
1156 if (e
->tx_lpi_enabled
)
1159 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, 16, reg
);
1161 mutex_unlock(&ps
->smi_mutex
);
1166 static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state
*ps
, u16 fid
, u16 cmd
)
1170 if (mv88e6xxx_has_fid_reg(ps
)) {
1171 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_FID
, fid
);
1174 } else if (mv88e6xxx_num_databases(ps
) == 256) {
1175 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1176 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_ATU_CONTROL
);
1180 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_CONTROL
,
1182 ((fid
<< 8) & 0xf000));
1186 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1190 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_OP
, cmd
);
1194 return _mv88e6xxx_atu_wait(ps
);
1197 static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state
*ps
,
1198 struct mv88e6xxx_atu_entry
*entry
)
1200 u16 data
= entry
->state
& GLOBAL_ATU_DATA_STATE_MASK
;
1202 if (entry
->state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
1203 unsigned int mask
, shift
;
1206 data
|= GLOBAL_ATU_DATA_TRUNK
;
1207 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
1208 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
1210 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
1211 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
1214 data
|= (entry
->portv_trunkid
<< shift
) & mask
;
1217 return _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_DATA
, data
);
1220 static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state
*ps
,
1221 struct mv88e6xxx_atu_entry
*entry
,
1227 err
= _mv88e6xxx_atu_wait(ps
);
1231 err
= _mv88e6xxx_atu_data_write(ps
, entry
);
1236 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB
:
1237 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB
;
1239 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL
:
1240 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC
;
1243 return _mv88e6xxx_atu_cmd(ps
, entry
->fid
, op
);
1246 static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state
*ps
,
1247 u16 fid
, bool static_too
)
1249 struct mv88e6xxx_atu_entry entry
= {
1251 .state
= 0, /* EntryState bits must be 0 */
1254 return _mv88e6xxx_atu_flush_move(ps
, &entry
, static_too
);
1257 static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state
*ps
, u16 fid
,
1258 int from_port
, int to_port
, bool static_too
)
1260 struct mv88e6xxx_atu_entry entry
= {
1265 /* EntryState bits must be 0xF */
1266 entry
.state
= GLOBAL_ATU_DATA_STATE_MASK
;
1268 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1269 entry
.portv_trunkid
= (to_port
& 0x0f) << 4;
1270 entry
.portv_trunkid
|= from_port
& 0x0f;
1272 return _mv88e6xxx_atu_flush_move(ps
, &entry
, static_too
);
1275 static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state
*ps
, u16 fid
,
1276 int port
, bool static_too
)
1278 /* Destination port 0xF means remove the entries */
1279 return _mv88e6xxx_atu_move(ps
, fid
, port
, 0x0f, static_too
);
1282 static const char * const mv88e6xxx_port_state_names
[] = {
1283 [PORT_CONTROL_STATE_DISABLED
] = "Disabled",
1284 [PORT_CONTROL_STATE_BLOCKING
] = "Blocking/Listening",
1285 [PORT_CONTROL_STATE_LEARNING
] = "Learning",
1286 [PORT_CONTROL_STATE_FORWARDING
] = "Forwarding",
1289 static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state
*ps
, int port
,
1292 struct dsa_switch
*ds
= ps
->ds
;
1296 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_CONTROL
);
1300 oldstate
= reg
& PORT_CONTROL_STATE_MASK
;
1302 if (oldstate
!= state
) {
1303 /* Flush forwarding database if we're moving a port
1304 * from Learning or Forwarding state to Disabled or
1305 * Blocking or Listening state.
1307 if ((oldstate
== PORT_CONTROL_STATE_LEARNING
||
1308 oldstate
== PORT_CONTROL_STATE_FORWARDING
)
1309 && (state
== PORT_CONTROL_STATE_DISABLED
||
1310 state
== PORT_CONTROL_STATE_BLOCKING
)) {
1311 ret
= _mv88e6xxx_atu_remove(ps
, 0, port
, false);
1316 reg
= (reg
& ~PORT_CONTROL_STATE_MASK
) | state
;
1317 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL
,
1322 netdev_dbg(ds
->ports
[port
], "PortState %s (was %s)\n",
1323 mv88e6xxx_port_state_names
[state
],
1324 mv88e6xxx_port_state_names
[oldstate
]);
1330 static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state
*ps
,
1333 struct net_device
*bridge
= ps
->ports
[port
].bridge_dev
;
1334 const u16 mask
= (1 << ps
->info
->num_ports
) - 1;
1335 struct dsa_switch
*ds
= ps
->ds
;
1336 u16 output_ports
= 0;
1340 /* allow CPU port or DSA link(s) to send frames to every port */
1341 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
)) {
1342 output_ports
= mask
;
1344 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1345 /* allow sending frames to every group member */
1346 if (bridge
&& ps
->ports
[i
].bridge_dev
== bridge
)
1347 output_ports
|= BIT(i
);
1349 /* allow sending frames to CPU port and DSA link(s) */
1350 if (dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
))
1351 output_ports
|= BIT(i
);
1355 /* prevent frames from going back out of the port they came in on */
1356 output_ports
&= ~BIT(port
);
1358 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_BASE_VLAN
);
1363 reg
|= output_ports
& mask
;
1365 return _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_BASE_VLAN
, reg
);
1368 static void mv88e6xxx_port_stp_state_set(struct dsa_switch
*ds
, int port
,
1371 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1374 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PORTSTATE
))
1378 case BR_STATE_DISABLED
:
1379 stp_state
= PORT_CONTROL_STATE_DISABLED
;
1381 case BR_STATE_BLOCKING
:
1382 case BR_STATE_LISTENING
:
1383 stp_state
= PORT_CONTROL_STATE_BLOCKING
;
1385 case BR_STATE_LEARNING
:
1386 stp_state
= PORT_CONTROL_STATE_LEARNING
;
1388 case BR_STATE_FORWARDING
:
1390 stp_state
= PORT_CONTROL_STATE_FORWARDING
;
1394 /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
1395 * so we can not update the port state directly but need to schedule it.
1397 ps
->ports
[port
].state
= stp_state
;
1398 set_bit(port
, ps
->port_state_update_mask
);
1399 schedule_work(&ps
->bridge_work
);
1402 static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state
*ps
, int port
,
1405 struct dsa_switch
*ds
= ps
->ds
;
1409 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_DEFAULT_VLAN
);
1413 pvid
= ret
& PORT_DEFAULT_VLAN_MASK
;
1416 ret
&= ~PORT_DEFAULT_VLAN_MASK
;
1417 ret
|= *new & PORT_DEFAULT_VLAN_MASK
;
1419 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
1420 PORT_DEFAULT_VLAN
, ret
);
1424 netdev_dbg(ds
->ports
[port
], "DefaultVID %d (was %d)\n", *new,
1434 static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state
*ps
,
1435 int port
, u16
*pvid
)
1437 return _mv88e6xxx_port_pvid(ps
, port
, NULL
, pvid
);
1440 static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state
*ps
,
1443 return _mv88e6xxx_port_pvid(ps
, port
, &pvid
, NULL
);
1446 static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state
*ps
)
1448 return _mv88e6xxx_wait(ps
, REG_GLOBAL
, GLOBAL_VTU_OP
,
1449 GLOBAL_VTU_OP_BUSY
);
1452 static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state
*ps
, u16 op
)
1456 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_OP
, op
);
1460 return _mv88e6xxx_vtu_wait(ps
);
1463 static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state
*ps
)
1467 ret
= _mv88e6xxx_vtu_wait(ps
);
1471 return _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_FLUSH_ALL
);
1474 static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state
*ps
,
1475 struct mv88e6xxx_vtu_stu_entry
*entry
,
1476 unsigned int nibble_offset
)
1482 for (i
= 0; i
< 3; ++i
) {
1483 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1484 GLOBAL_VTU_DATA_0_3
+ i
);
1491 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1492 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1493 u16 reg
= regs
[i
/ 4];
1495 entry
->data
[i
] = (reg
>> shift
) & GLOBAL_VTU_STU_DATA_MASK
;
1501 static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state
*ps
,
1502 struct mv88e6xxx_vtu_stu_entry
*entry
,
1503 unsigned int nibble_offset
)
1505 u16 regs
[3] = { 0 };
1509 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1510 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1511 u8 data
= entry
->data
[i
];
1513 regs
[i
/ 4] |= (data
& GLOBAL_VTU_STU_DATA_MASK
) << shift
;
1516 for (i
= 0; i
< 3; ++i
) {
1517 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
,
1518 GLOBAL_VTU_DATA_0_3
+ i
, regs
[i
]);
1526 static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state
*ps
, u16 vid
)
1528 return _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
,
1529 vid
& GLOBAL_VTU_VID_MASK
);
1532 static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state
*ps
,
1533 struct mv88e6xxx_vtu_stu_entry
*entry
)
1535 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1538 ret
= _mv88e6xxx_vtu_wait(ps
);
1542 ret
= _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_VTU_GET_NEXT
);
1546 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1550 next
.vid
= ret
& GLOBAL_VTU_VID_MASK
;
1551 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1554 ret
= _mv88e6xxx_vtu_stu_data_read(ps
, &next
, 0);
1558 if (mv88e6xxx_has_fid_reg(ps
)) {
1559 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1564 next
.fid
= ret
& GLOBAL_VTU_FID_MASK
;
1565 } else if (mv88e6xxx_num_databases(ps
) == 256) {
1566 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1567 * VTU DBNum[3:0] are located in VTU Operation 3:0
1569 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1574 next
.fid
= (ret
& 0xf00) >> 4;
1575 next
.fid
|= ret
& 0xf;
1578 if (mv88e6xxx_has_stu(ps
)) {
1579 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1584 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1592 static int mv88e6xxx_port_vlan_dump(struct dsa_switch
*ds
, int port
,
1593 struct switchdev_obj_port_vlan
*vlan
,
1594 int (*cb
)(struct switchdev_obj
*obj
))
1596 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1597 struct mv88e6xxx_vtu_stu_entry next
;
1601 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_VTU
))
1604 mutex_lock(&ps
->smi_mutex
);
1606 err
= _mv88e6xxx_port_pvid_get(ps
, port
, &pvid
);
1610 err
= _mv88e6xxx_vtu_vid_write(ps
, GLOBAL_VTU_VID_MASK
);
1615 err
= _mv88e6xxx_vtu_getnext(ps
, &next
);
1622 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
)
1625 /* reinit and dump this VLAN obj */
1626 vlan
->vid_begin
= vlan
->vid_end
= next
.vid
;
1629 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
)
1630 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1632 if (next
.vid
== pvid
)
1633 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1635 err
= cb(&vlan
->obj
);
1638 } while (next
.vid
< GLOBAL_VTU_VID_MASK
);
1641 mutex_unlock(&ps
->smi_mutex
);
1646 static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state
*ps
,
1647 struct mv88e6xxx_vtu_stu_entry
*entry
)
1649 u16 op
= GLOBAL_VTU_OP_VTU_LOAD_PURGE
;
1653 ret
= _mv88e6xxx_vtu_wait(ps
);
1660 /* Write port member tags */
1661 ret
= _mv88e6xxx_vtu_stu_data_write(ps
, entry
, 0);
1665 if (mv88e6xxx_has_stu(ps
)) {
1666 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1667 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1672 if (mv88e6xxx_has_fid_reg(ps
)) {
1673 reg
= entry
->fid
& GLOBAL_VTU_FID_MASK
;
1674 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_FID
, reg
);
1677 } else if (mv88e6xxx_num_databases(ps
) == 256) {
1678 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1679 * VTU DBNum[3:0] are located in VTU Operation 3:0
1681 op
|= (entry
->fid
& 0xf0) << 8;
1682 op
|= entry
->fid
& 0xf;
1685 reg
= GLOBAL_VTU_VID_VALID
;
1687 reg
|= entry
->vid
& GLOBAL_VTU_VID_MASK
;
1688 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1692 return _mv88e6xxx_vtu_cmd(ps
, op
);
1695 static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state
*ps
, u8 sid
,
1696 struct mv88e6xxx_vtu_stu_entry
*entry
)
1698 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1701 ret
= _mv88e6xxx_vtu_wait(ps
);
1705 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
,
1706 sid
& GLOBAL_VTU_SID_MASK
);
1710 ret
= _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_STU_GET_NEXT
);
1714 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
);
1718 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1720 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1724 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1727 ret
= _mv88e6xxx_vtu_stu_data_read(ps
, &next
, 2);
1736 static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state
*ps
,
1737 struct mv88e6xxx_vtu_stu_entry
*entry
)
1742 ret
= _mv88e6xxx_vtu_wait(ps
);
1749 /* Write port states */
1750 ret
= _mv88e6xxx_vtu_stu_data_write(ps
, entry
, 2);
1754 reg
= GLOBAL_VTU_VID_VALID
;
1756 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1760 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1761 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1765 return _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_STU_LOAD_PURGE
);
1768 static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state
*ps
, int port
,
1771 struct dsa_switch
*ds
= ps
->ds
;
1776 if (mv88e6xxx_num_databases(ps
) == 4096)
1778 else if (mv88e6xxx_num_databases(ps
) == 256)
1783 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1784 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_BASE_VLAN
);
1788 fid
= (ret
& PORT_BASE_VLAN_FID_3_0_MASK
) >> 12;
1791 ret
&= ~PORT_BASE_VLAN_FID_3_0_MASK
;
1792 ret
|= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK
;
1794 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_BASE_VLAN
,
1800 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1801 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_CONTROL_1
);
1805 fid
|= (ret
& upper_mask
) << 4;
1809 ret
|= (*new >> 4) & upper_mask
;
1811 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL_1
,
1816 netdev_dbg(ds
->ports
[port
], "FID %d (was %d)\n", *new, fid
);
1825 static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state
*ps
,
1828 return _mv88e6xxx_port_fid(ps
, port
, NULL
, fid
);
1831 static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state
*ps
,
1834 return _mv88e6xxx_port_fid(ps
, port
, &fid
, NULL
);
1837 static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state
*ps
, u16
*fid
)
1839 DECLARE_BITMAP(fid_bitmap
, MV88E6XXX_N_FID
);
1840 struct mv88e6xxx_vtu_stu_entry vlan
;
1843 bitmap_zero(fid_bitmap
, MV88E6XXX_N_FID
);
1845 /* Set every FID bit used by the (un)bridged ports */
1846 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1847 err
= _mv88e6xxx_port_fid_get(ps
, i
, fid
);
1851 set_bit(*fid
, fid_bitmap
);
1854 /* Set every FID bit used by the VLAN entries */
1855 err
= _mv88e6xxx_vtu_vid_write(ps
, GLOBAL_VTU_VID_MASK
);
1860 err
= _mv88e6xxx_vtu_getnext(ps
, &vlan
);
1867 set_bit(vlan
.fid
, fid_bitmap
);
1868 } while (vlan
.vid
< GLOBAL_VTU_VID_MASK
);
1870 /* The reset value 0x000 is used to indicate that multiple address
1871 * databases are not needed. Return the next positive available.
1873 *fid
= find_next_zero_bit(fid_bitmap
, MV88E6XXX_N_FID
, 1);
1874 if (unlikely(*fid
>= mv88e6xxx_num_databases(ps
)))
1877 /* Clear the database */
1878 return _mv88e6xxx_atu_flush(ps
, *fid
, true);
1881 static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state
*ps
, u16 vid
,
1882 struct mv88e6xxx_vtu_stu_entry
*entry
)
1884 struct dsa_switch
*ds
= ps
->ds
;
1885 struct mv88e6xxx_vtu_stu_entry vlan
= {
1891 err
= _mv88e6xxx_fid_new(ps
, &vlan
.fid
);
1895 /* exclude all ports except the CPU and DSA ports */
1896 for (i
= 0; i
< ps
->info
->num_ports
; ++i
)
1897 vlan
.data
[i
] = dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
)
1898 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1899 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
1901 if (mv88e6xxx_6097_family(ps
) || mv88e6xxx_6165_family(ps
) ||
1902 mv88e6xxx_6351_family(ps
) || mv88e6xxx_6352_family(ps
)) {
1903 struct mv88e6xxx_vtu_stu_entry vstp
;
1905 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1906 * implemented, only one STU entry is needed to cover all VTU
1907 * entries. Thus, validate the SID 0.
1910 err
= _mv88e6xxx_stu_getnext(ps
, GLOBAL_VTU_SID_MASK
, &vstp
);
1914 if (vstp
.sid
!= vlan
.sid
|| !vstp
.valid
) {
1915 memset(&vstp
, 0, sizeof(vstp
));
1917 vstp
.sid
= vlan
.sid
;
1919 err
= _mv88e6xxx_stu_loadpurge(ps
, &vstp
);
1929 static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state
*ps
, u16 vid
,
1930 struct mv88e6xxx_vtu_stu_entry
*entry
, bool creat
)
1937 err
= _mv88e6xxx_vtu_vid_write(ps
, vid
- 1);
1941 err
= _mv88e6xxx_vtu_getnext(ps
, entry
);
1945 if (entry
->vid
!= vid
|| !entry
->valid
) {
1948 /* -ENOENT would've been more appropriate, but switchdev expects
1949 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1952 err
= _mv88e6xxx_vtu_new(ps
, vid
, entry
);
1958 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch
*ds
, int port
,
1959 u16 vid_begin
, u16 vid_end
)
1961 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1962 struct mv88e6xxx_vtu_stu_entry vlan
;
1968 mutex_lock(&ps
->smi_mutex
);
1970 err
= _mv88e6xxx_vtu_vid_write(ps
, vid_begin
- 1);
1975 err
= _mv88e6xxx_vtu_getnext(ps
, &vlan
);
1982 if (vlan
.vid
> vid_end
)
1985 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1986 if (dsa_is_dsa_port(ds
, i
) || dsa_is_cpu_port(ds
, i
))
1990 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
)
1993 if (ps
->ports
[i
].bridge_dev
==
1994 ps
->ports
[port
].bridge_dev
)
1995 break; /* same bridge, check next VLAN */
1997 netdev_warn(ds
->ports
[port
],
1998 "hardware VLAN %d already used by %s\n",
2000 netdev_name(ps
->ports
[i
].bridge_dev
));
2004 } while (vlan
.vid
< vid_end
);
2007 mutex_unlock(&ps
->smi_mutex
);
2012 static const char * const mv88e6xxx_port_8021q_mode_names
[] = {
2013 [PORT_CONTROL_2_8021Q_DISABLED
] = "Disabled",
2014 [PORT_CONTROL_2_8021Q_FALLBACK
] = "Fallback",
2015 [PORT_CONTROL_2_8021Q_CHECK
] = "Check",
2016 [PORT_CONTROL_2_8021Q_SECURE
] = "Secure",
2019 static int mv88e6xxx_port_vlan_filtering(struct dsa_switch
*ds
, int port
,
2020 bool vlan_filtering
)
2022 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2023 u16 old
, new = vlan_filtering
? PORT_CONTROL_2_8021Q_SECURE
:
2024 PORT_CONTROL_2_8021Q_DISABLED
;
2027 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_VTU
))
2030 mutex_lock(&ps
->smi_mutex
);
2032 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_CONTROL_2
);
2036 old
= ret
& PORT_CONTROL_2_8021Q_MASK
;
2039 ret
&= ~PORT_CONTROL_2_8021Q_MASK
;
2040 ret
|= new & PORT_CONTROL_2_8021Q_MASK
;
2042 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL_2
,
2047 netdev_dbg(ds
->ports
[port
], "802.1Q Mode %s (was %s)\n",
2048 mv88e6xxx_port_8021q_mode_names
[new],
2049 mv88e6xxx_port_8021q_mode_names
[old
]);
2054 mutex_unlock(&ps
->smi_mutex
);
2059 static int mv88e6xxx_port_vlan_prepare(struct dsa_switch
*ds
, int port
,
2060 const struct switchdev_obj_port_vlan
*vlan
,
2061 struct switchdev_trans
*trans
)
2063 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2066 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_VTU
))
2069 /* If the requested port doesn't belong to the same bridge as the VLAN
2070 * members, do not support it (yet) and fallback to software VLAN.
2072 err
= mv88e6xxx_port_check_hw_vlan(ds
, port
, vlan
->vid_begin
,
2077 /* We don't need any dynamic resource from the kernel (yet),
2078 * so skip the prepare phase.
2083 static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state
*ps
, int port
,
2084 u16 vid
, bool untagged
)
2086 struct mv88e6xxx_vtu_stu_entry vlan
;
2089 err
= _mv88e6xxx_vtu_get(ps
, vid
, &vlan
, true);
2093 vlan
.data
[port
] = untagged
?
2094 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
:
2095 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
;
2097 return _mv88e6xxx_vtu_loadpurge(ps
, &vlan
);
2100 static void mv88e6xxx_port_vlan_add(struct dsa_switch
*ds
, int port
,
2101 const struct switchdev_obj_port_vlan
*vlan
,
2102 struct switchdev_trans
*trans
)
2104 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2105 bool untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
2106 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
2109 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_VTU
))
2112 mutex_lock(&ps
->smi_mutex
);
2114 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
)
2115 if (_mv88e6xxx_port_vlan_add(ps
, port
, vid
, untagged
))
2116 netdev_err(ds
->ports
[port
], "failed to add VLAN %d%c\n",
2117 vid
, untagged
? 'u' : 't');
2119 if (pvid
&& _mv88e6xxx_port_pvid_set(ps
, port
, vlan
->vid_end
))
2120 netdev_err(ds
->ports
[port
], "failed to set PVID %d\n",
2123 mutex_unlock(&ps
->smi_mutex
);
2126 static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state
*ps
,
2129 struct dsa_switch
*ds
= ps
->ds
;
2130 struct mv88e6xxx_vtu_stu_entry vlan
;
2133 err
= _mv88e6xxx_vtu_get(ps
, vid
, &vlan
, false);
2137 /* Tell switchdev if this VLAN is handled in software */
2138 if (vlan
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
)
2141 vlan
.data
[port
] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
2143 /* keep the VLAN unless all ports are excluded */
2145 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
2146 if (dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
))
2149 if (vlan
.data
[i
] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
) {
2155 err
= _mv88e6xxx_vtu_loadpurge(ps
, &vlan
);
2159 return _mv88e6xxx_atu_remove(ps
, vlan
.fid
, port
, false);
2162 static int mv88e6xxx_port_vlan_del(struct dsa_switch
*ds
, int port
,
2163 const struct switchdev_obj_port_vlan
*vlan
)
2165 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2169 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_VTU
))
2172 mutex_lock(&ps
->smi_mutex
);
2174 err
= _mv88e6xxx_port_pvid_get(ps
, port
, &pvid
);
2178 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
2179 err
= _mv88e6xxx_port_vlan_del(ps
, port
, vid
);
2184 err
= _mv88e6xxx_port_pvid_set(ps
, port
, 0);
2191 mutex_unlock(&ps
->smi_mutex
);
2196 static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state
*ps
,
2197 const unsigned char *addr
)
2201 for (i
= 0; i
< 3; i
++) {
2202 ret
= _mv88e6xxx_reg_write(
2203 ps
, REG_GLOBAL
, GLOBAL_ATU_MAC_01
+ i
,
2204 (addr
[i
* 2] << 8) | addr
[i
* 2 + 1]);
2212 static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state
*ps
,
2213 unsigned char *addr
)
2217 for (i
= 0; i
< 3; i
++) {
2218 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
2219 GLOBAL_ATU_MAC_01
+ i
);
2222 addr
[i
* 2] = ret
>> 8;
2223 addr
[i
* 2 + 1] = ret
& 0xff;
2229 static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state
*ps
,
2230 struct mv88e6xxx_atu_entry
*entry
)
2234 ret
= _mv88e6xxx_atu_wait(ps
);
2238 ret
= _mv88e6xxx_atu_mac_write(ps
, entry
->mac
);
2242 ret
= _mv88e6xxx_atu_data_write(ps
, entry
);
2246 return _mv88e6xxx_atu_cmd(ps
, entry
->fid
, GLOBAL_ATU_OP_LOAD_DB
);
2249 static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state
*ps
, int port
,
2250 const unsigned char *addr
, u16 vid
,
2253 struct mv88e6xxx_atu_entry entry
= { 0 };
2254 struct mv88e6xxx_vtu_stu_entry vlan
;
2257 /* Null VLAN ID corresponds to the port private database */
2259 err
= _mv88e6xxx_port_fid_get(ps
, port
, &vlan
.fid
);
2261 err
= _mv88e6xxx_vtu_get(ps
, vid
, &vlan
, false);
2265 entry
.fid
= vlan
.fid
;
2266 entry
.state
= state
;
2267 ether_addr_copy(entry
.mac
, addr
);
2268 if (state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
2269 entry
.trunk
= false;
2270 entry
.portv_trunkid
= BIT(port
);
2273 return _mv88e6xxx_atu_load(ps
, &entry
);
2276 static int mv88e6xxx_port_fdb_prepare(struct dsa_switch
*ds
, int port
,
2277 const struct switchdev_obj_port_fdb
*fdb
,
2278 struct switchdev_trans
*trans
)
2280 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2282 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_ATU
))
2285 /* We don't need any dynamic resource from the kernel (yet),
2286 * so skip the prepare phase.
2291 static void mv88e6xxx_port_fdb_add(struct dsa_switch
*ds
, int port
,
2292 const struct switchdev_obj_port_fdb
*fdb
,
2293 struct switchdev_trans
*trans
)
2295 int state
= is_multicast_ether_addr(fdb
->addr
) ?
2296 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
2297 GLOBAL_ATU_DATA_STATE_UC_STATIC
;
2298 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2300 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_ATU
))
2303 mutex_lock(&ps
->smi_mutex
);
2304 if (_mv88e6xxx_port_fdb_load(ps
, port
, fdb
->addr
, fdb
->vid
, state
))
2305 netdev_err(ds
->ports
[port
], "failed to load MAC address\n");
2306 mutex_unlock(&ps
->smi_mutex
);
2309 static int mv88e6xxx_port_fdb_del(struct dsa_switch
*ds
, int port
,
2310 const struct switchdev_obj_port_fdb
*fdb
)
2312 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2315 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_ATU
))
2318 mutex_lock(&ps
->smi_mutex
);
2319 ret
= _mv88e6xxx_port_fdb_load(ps
, port
, fdb
->addr
, fdb
->vid
,
2320 GLOBAL_ATU_DATA_STATE_UNUSED
);
2321 mutex_unlock(&ps
->smi_mutex
);
2326 static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state
*ps
, u16 fid
,
2327 struct mv88e6xxx_atu_entry
*entry
)
2329 struct mv88e6xxx_atu_entry next
= { 0 };
2334 ret
= _mv88e6xxx_atu_wait(ps
);
2338 ret
= _mv88e6xxx_atu_cmd(ps
, fid
, GLOBAL_ATU_OP_GET_NEXT_DB
);
2342 ret
= _mv88e6xxx_atu_mac_read(ps
, next
.mac
);
2346 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_ATU_DATA
);
2350 next
.state
= ret
& GLOBAL_ATU_DATA_STATE_MASK
;
2351 if (next
.state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
2352 unsigned int mask
, shift
;
2354 if (ret
& GLOBAL_ATU_DATA_TRUNK
) {
2356 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
2357 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
2360 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
2361 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
2364 next
.portv_trunkid
= (ret
& mask
) >> shift
;
2371 static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state
*ps
,
2372 u16 fid
, u16 vid
, int port
,
2373 struct switchdev_obj_port_fdb
*fdb
,
2374 int (*cb
)(struct switchdev_obj
*obj
))
2376 struct mv88e6xxx_atu_entry addr
= {
2377 .mac
= { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2381 err
= _mv88e6xxx_atu_mac_write(ps
, addr
.mac
);
2386 err
= _mv88e6xxx_atu_getnext(ps
, fid
, &addr
);
2390 if (addr
.state
== GLOBAL_ATU_DATA_STATE_UNUSED
)
2393 if (!addr
.trunk
&& addr
.portv_trunkid
& BIT(port
)) {
2394 bool is_static
= addr
.state
==
2395 (is_multicast_ether_addr(addr
.mac
) ?
2396 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
2397 GLOBAL_ATU_DATA_STATE_UC_STATIC
);
2400 ether_addr_copy(fdb
->addr
, addr
.mac
);
2401 fdb
->ndm_state
= is_static
? NUD_NOARP
: NUD_REACHABLE
;
2403 err
= cb(&fdb
->obj
);
2407 } while (!is_broadcast_ether_addr(addr
.mac
));
2412 static int mv88e6xxx_port_fdb_dump(struct dsa_switch
*ds
, int port
,
2413 struct switchdev_obj_port_fdb
*fdb
,
2414 int (*cb
)(struct switchdev_obj
*obj
))
2416 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2417 struct mv88e6xxx_vtu_stu_entry vlan
= {
2418 .vid
= GLOBAL_VTU_VID_MASK
, /* all ones */
2423 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_ATU
))
2426 mutex_lock(&ps
->smi_mutex
);
2428 /* Dump port's default Filtering Information Database (VLAN ID 0) */
2429 err
= _mv88e6xxx_port_fid_get(ps
, port
, &fid
);
2433 err
= _mv88e6xxx_port_fdb_dump_one(ps
, fid
, 0, port
, fdb
, cb
);
2437 /* Dump VLANs' Filtering Information Databases */
2438 err
= _mv88e6xxx_vtu_vid_write(ps
, vlan
.vid
);
2443 err
= _mv88e6xxx_vtu_getnext(ps
, &vlan
);
2450 err
= _mv88e6xxx_port_fdb_dump_one(ps
, vlan
.fid
, vlan
.vid
, port
,
2454 } while (vlan
.vid
< GLOBAL_VTU_VID_MASK
);
2457 mutex_unlock(&ps
->smi_mutex
);
2462 static int mv88e6xxx_port_bridge_join(struct dsa_switch
*ds
, int port
,
2463 struct net_device
*bridge
)
2465 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2468 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_VLANTABLE
))
2471 mutex_lock(&ps
->smi_mutex
);
2473 /* Assign the bridge and remap each port's VLANTable */
2474 ps
->ports
[port
].bridge_dev
= bridge
;
2476 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
2477 if (ps
->ports
[i
].bridge_dev
== bridge
) {
2478 err
= _mv88e6xxx_port_based_vlan_map(ps
, i
);
2484 mutex_unlock(&ps
->smi_mutex
);
2489 static void mv88e6xxx_port_bridge_leave(struct dsa_switch
*ds
, int port
)
2491 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2492 struct net_device
*bridge
= ps
->ports
[port
].bridge_dev
;
2495 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_VLANTABLE
))
2498 mutex_lock(&ps
->smi_mutex
);
2500 /* Unassign the bridge and remap each port's VLANTable */
2501 ps
->ports
[port
].bridge_dev
= NULL
;
2503 for (i
= 0; i
< ps
->info
->num_ports
; ++i
)
2504 if (i
== port
|| ps
->ports
[i
].bridge_dev
== bridge
)
2505 if (_mv88e6xxx_port_based_vlan_map(ps
, i
))
2506 netdev_warn(ds
->ports
[i
], "failed to remap\n");
2508 mutex_unlock(&ps
->smi_mutex
);
2511 static void mv88e6xxx_bridge_work(struct work_struct
*work
)
2513 struct mv88e6xxx_priv_state
*ps
;
2514 struct dsa_switch
*ds
;
2517 ps
= container_of(work
, struct mv88e6xxx_priv_state
, bridge_work
);
2520 mutex_lock(&ps
->smi_mutex
);
2522 for (port
= 0; port
< ps
->info
->num_ports
; ++port
)
2523 if (test_and_clear_bit(port
, ps
->port_state_update_mask
) &&
2524 _mv88e6xxx_port_state(ps
, port
, ps
->ports
[port
].state
))
2525 netdev_warn(ds
->ports
[port
],
2526 "failed to update state to %s\n",
2527 mv88e6xxx_port_state_names
[ps
->ports
[port
].state
]);
2529 mutex_unlock(&ps
->smi_mutex
);
2532 static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state
*ps
,
2533 int port
, int page
, int reg
, int val
)
2537 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, page
);
2539 goto restore_page_0
;
2541 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, reg
, val
);
2543 _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, 0x0);
2548 static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state
*ps
,
2549 int port
, int page
, int reg
)
2553 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, page
);
2555 goto restore_page_0
;
2557 ret
= _mv88e6xxx_phy_read_indirect(ps
, port
, reg
);
2559 _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, 0x0);
2564 static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state
*ps
)
2566 bool ppu_active
= mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU_ACTIVE
);
2567 u16 is_reset
= (ppu_active
? 0x8800 : 0xc800);
2568 struct gpio_desc
*gpiod
= ps
->ds
->pd
->reset
;
2569 unsigned long timeout
;
2573 /* Set all ports to the disabled state. */
2574 for (i
= 0; i
< ps
->info
->num_ports
; i
++) {
2575 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(i
), PORT_CONTROL
);
2579 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(i
), PORT_CONTROL
,
2585 /* Wait for transmit queues to drain. */
2586 usleep_range(2000, 4000);
2588 /* If there is a gpio connected to the reset pin, toggle it */
2590 gpiod_set_value_cansleep(gpiod
, 1);
2591 usleep_range(10000, 20000);
2592 gpiod_set_value_cansleep(gpiod
, 0);
2593 usleep_range(10000, 20000);
2596 /* Reset the switch. Keep the PPU active if requested. The PPU
2597 * needs to be active to support indirect phy register access
2598 * through global registers 0x18 and 0x19.
2601 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, 0x04, 0xc000);
2603 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, 0x04, 0xc400);
2607 /* Wait up to one second for reset to complete. */
2608 timeout
= jiffies
+ 1 * HZ
;
2609 while (time_before(jiffies
, timeout
)) {
2610 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, 0x00);
2614 if ((ret
& is_reset
) == is_reset
)
2616 usleep_range(1000, 2000);
2618 if (time_after(jiffies
, timeout
))
2626 static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state
*ps
)
2630 ret
= _mv88e6xxx_phy_page_read(ps
, REG_FIBER_SERDES
, PAGE_FIBER_SERDES
,
2635 if (ret
& BMCR_PDOWN
) {
2637 ret
= _mv88e6xxx_phy_page_write(ps
, REG_FIBER_SERDES
,
2638 PAGE_FIBER_SERDES
, MII_BMCR
,
2645 static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state
*ps
, int port
)
2647 struct dsa_switch
*ds
= ps
->ds
;
2651 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2652 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2653 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6095_family(ps
) ||
2654 mv88e6xxx_6065_family(ps
) || mv88e6xxx_6320_family(ps
)) {
2655 /* MAC Forcing register: don't force link, speed,
2656 * duplex or flow control state to any particular
2657 * values on physical ports, but force the CPU port
2658 * and all DSA ports to their maximum bandwidth and
2661 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_PCS_CTRL
);
2662 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
)) {
2663 reg
&= ~PORT_PCS_CTRL_UNFORCED
;
2664 reg
|= PORT_PCS_CTRL_FORCE_LINK
|
2665 PORT_PCS_CTRL_LINK_UP
|
2666 PORT_PCS_CTRL_DUPLEX_FULL
|
2667 PORT_PCS_CTRL_FORCE_DUPLEX
;
2668 if (mv88e6xxx_6065_family(ps
))
2669 reg
|= PORT_PCS_CTRL_100
;
2671 reg
|= PORT_PCS_CTRL_1000
;
2673 reg
|= PORT_PCS_CTRL_UNFORCED
;
2676 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2677 PORT_PCS_CTRL
, reg
);
2682 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2683 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2684 * tunneling, determine priority by looking at 802.1p and IP
2685 * priority fields (IP prio has precedence), and set STP state
2688 * If this is the CPU link, use DSA or EDSA tagging depending
2689 * on which tagging mode was configured.
2691 * If this is a link to another switch, use DSA tagging mode.
2693 * If this is the upstream port for this switch, enable
2694 * forwarding of unknown unicasts and multicasts.
2697 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2698 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2699 mv88e6xxx_6095_family(ps
) || mv88e6xxx_6065_family(ps
) ||
2700 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6320_family(ps
))
2701 reg
= PORT_CONTROL_IGMP_MLD_SNOOP
|
2702 PORT_CONTROL_USE_TAG
| PORT_CONTROL_USE_IP
|
2703 PORT_CONTROL_STATE_FORWARDING
;
2704 if (dsa_is_cpu_port(ds
, port
)) {
2705 if (mv88e6xxx_6095_family(ps
) || mv88e6xxx_6185_family(ps
))
2706 reg
|= PORT_CONTROL_DSA_TAG
;
2707 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2708 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2709 mv88e6xxx_6320_family(ps
)) {
2710 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
2711 reg
|= PORT_CONTROL_FRAME_ETHER_TYPE_DSA
;
2713 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
2714 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
2715 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
2718 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2719 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2720 mv88e6xxx_6095_family(ps
) || mv88e6xxx_6065_family(ps
) ||
2721 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6320_family(ps
)) {
2722 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
2723 reg
|= PORT_CONTROL_EGRESS_ADD_TAG
;
2726 if (dsa_is_dsa_port(ds
, port
)) {
2727 if (mv88e6xxx_6095_family(ps
) || mv88e6xxx_6185_family(ps
))
2728 reg
|= PORT_CONTROL_DSA_TAG
;
2729 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2730 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2731 mv88e6xxx_6320_family(ps
)) {
2732 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
2735 if (port
== dsa_upstream_port(ds
))
2736 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
2737 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
2740 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2746 /* If this port is connected to a SerDes, make sure the SerDes is not
2749 if (mv88e6xxx_6352_family(ps
)) {
2750 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_STATUS
);
2753 ret
&= PORT_STATUS_CMODE_MASK
;
2754 if ((ret
== PORT_STATUS_CMODE_100BASE_X
) ||
2755 (ret
== PORT_STATUS_CMODE_1000BASE_X
) ||
2756 (ret
== PORT_STATUS_CMODE_SGMII
)) {
2757 ret
= mv88e6xxx_power_on_serdes(ps
);
2763 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2764 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2765 * untagged frames on this port, do a destination address lookup on all
2766 * received packets as usual, disable ARP mirroring and don't send a
2767 * copy of all transmitted/received frames on this port to the CPU.
2770 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2771 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2772 mv88e6xxx_6095_family(ps
) || mv88e6xxx_6320_family(ps
) ||
2773 mv88e6xxx_6185_family(ps
))
2774 reg
= PORT_CONTROL_2_MAP_DA
;
2776 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2777 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6320_family(ps
))
2778 reg
|= PORT_CONTROL_2_JUMBO_10240
;
2780 if (mv88e6xxx_6095_family(ps
) || mv88e6xxx_6185_family(ps
)) {
2781 /* Set the upstream port this port should use */
2782 reg
|= dsa_upstream_port(ds
);
2783 /* enable forwarding of unknown multicast addresses to
2786 if (port
== dsa_upstream_port(ds
))
2787 reg
|= PORT_CONTROL_2_FORWARD_UNKNOWN
;
2790 reg
|= PORT_CONTROL_2_8021Q_DISABLED
;
2793 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2794 PORT_CONTROL_2
, reg
);
2799 /* Port Association Vector: when learning source addresses
2800 * of packets, add the address to the address database using
2801 * a port bitmap that has only the bit for this port set and
2802 * the other bits clear.
2805 /* Disable learning for CPU port */
2806 if (dsa_is_cpu_port(ds
, port
))
2809 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_ASSOC_VECTOR
, reg
);
2813 /* Egress rate control 2: disable egress rate control. */
2814 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_RATE_CONTROL_2
,
2819 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2820 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2821 mv88e6xxx_6320_family(ps
)) {
2822 /* Do not limit the period of time that this port can
2823 * be paused for by the remote end or the period of
2824 * time that this port can pause the remote end.
2826 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2827 PORT_PAUSE_CTRL
, 0x0000);
2831 /* Port ATU control: disable limiting the number of
2832 * address database entries that this port is allowed
2835 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2836 PORT_ATU_CONTROL
, 0x0000);
2837 /* Priority Override: disable DA, SA and VTU priority
2840 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2841 PORT_PRI_OVERRIDE
, 0x0000);
2845 /* Port Ethertype: use the Ethertype DSA Ethertype
2848 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2849 PORT_ETH_TYPE
, ETH_P_EDSA
);
2852 /* Tag Remap: use an identity 802.1p prio -> switch
2855 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2856 PORT_TAG_REGMAP_0123
, 0x3210);
2860 /* Tag Remap 2: use an identity 802.1p prio -> switch
2863 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2864 PORT_TAG_REGMAP_4567
, 0x7654);
2869 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2870 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2871 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6095_family(ps
) ||
2872 mv88e6xxx_6320_family(ps
)) {
2873 /* Rate Control: disable ingress rate limiting. */
2874 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2875 PORT_RATE_CONTROL
, 0x0001);
2880 /* Port Control 1: disable trunking, disable sending
2881 * learning messages to this port.
2883 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL_1
, 0x0000);
2887 /* Port based VLAN map: give each port the same default address
2888 * database, and allow bidirectional communication between the
2889 * CPU and DSA port(s), and the other ports.
2891 ret
= _mv88e6xxx_port_fid_set(ps
, port
, 0);
2895 ret
= _mv88e6xxx_port_based_vlan_map(ps
, port
);
2899 /* Default VLAN ID and priority: don't set a default VLAN
2900 * ID, and set the default packet priority to zero.
2902 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_DEFAULT_VLAN
,
2910 static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state
*ps
)
2912 struct dsa_switch
*ds
= ps
->ds
;
2913 u32 upstream_port
= dsa_upstream_port(ds
);
2918 /* Enable the PHY Polling Unit if present, don't discard any packets,
2919 * and mask all interrupt sources.
2922 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU
) ||
2923 mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU_ACTIVE
))
2924 reg
|= GLOBAL_CONTROL_PPU_ENABLE
;
2926 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_CONTROL
, reg
);
2930 /* Configure the upstream port, and configure it as the port to which
2931 * ingress and egress and ARP monitor frames are to be sent.
2933 reg
= upstream_port
<< GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT
|
2934 upstream_port
<< GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT
|
2935 upstream_port
<< GLOBAL_MONITOR_CONTROL_ARP_SHIFT
;
2936 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_MONITOR_CONTROL
, reg
);
2940 /* Disable remote management, and set the switch's DSA device number. */
2941 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_CONTROL_2
,
2942 GLOBAL_CONTROL_2_MULTIPLE_CASCADE
|
2943 (ds
->index
& 0x1f));
2947 /* Set the default address aging time to 5 minutes, and
2948 * enable address learn messages to be sent to all message
2951 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_CONTROL
,
2952 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL
);
2956 /* Configure the IP ToS mapping registers. */
2957 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_0
, 0x0000);
2960 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_1
, 0x0000);
2963 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_2
, 0x5555);
2966 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_3
, 0x5555);
2969 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_4
, 0xaaaa);
2972 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_5
, 0xaaaa);
2975 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_6
, 0xffff);
2978 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_7
, 0xffff);
2982 /* Configure the IEEE 802.1p priority mapping register. */
2983 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IEEE_PRI
, 0xfa41);
2987 /* Send all frames with destination addresses matching
2988 * 01:80:c2:00:00:0x to the CPU port.
2990 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_MGMT_EN_0X
, 0xffff);
2994 /* Ignore removed tag data on doubly tagged packets, disable
2995 * flow control messages, force flow control priority to the
2996 * highest, and send all special multicast frames to the CPU
2997 * port at the highest priority.
2999 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SWITCH_MGMT
,
3000 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU
| 0x70 |
3001 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI
);
3005 /* Program the DSA routing table. */
3006 for (i
= 0; i
< 32; i
++) {
3009 if (ps
->ds
->pd
->rtable
&&
3010 i
!= ps
->ds
->index
&& i
< ps
->ds
->dst
->pd
->nr_chips
)
3011 nexthop
= ps
->ds
->pd
->rtable
[i
] & 0x1f;
3013 err
= _mv88e6xxx_reg_write(
3015 GLOBAL2_DEVICE_MAPPING
,
3016 GLOBAL2_DEVICE_MAPPING_UPDATE
|
3017 (i
<< GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT
) | nexthop
);
3022 /* Clear all trunk masks. */
3023 for (i
= 0; i
< 8; i
++) {
3024 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_TRUNK_MASK
,
3026 (i
<< GLOBAL2_TRUNK_MASK_NUM_SHIFT
) |
3027 ((1 << ps
->info
->num_ports
) - 1));
3032 /* Clear all trunk mappings. */
3033 for (i
= 0; i
< 16; i
++) {
3034 err
= _mv88e6xxx_reg_write(
3036 GLOBAL2_TRUNK_MAPPING
,
3037 GLOBAL2_TRUNK_MAPPING_UPDATE
|
3038 (i
<< GLOBAL2_TRUNK_MAPPING_ID_SHIFT
));
3043 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
3044 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
3045 mv88e6xxx_6320_family(ps
)) {
3046 /* Send all frames with destination addresses matching
3047 * 01:80:c2:00:00:2x to the CPU port.
3049 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
3050 GLOBAL2_MGMT_EN_2X
, 0xffff);
3054 /* Initialise cross-chip port VLAN table to reset
3057 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
3058 GLOBAL2_PVT_ADDR
, 0x9000);
3062 /* Clear the priority override table. */
3063 for (i
= 0; i
< 16; i
++) {
3064 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
3065 GLOBAL2_PRIO_OVERRIDE
,
3072 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
3073 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
3074 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6095_family(ps
) ||
3075 mv88e6xxx_6320_family(ps
)) {
3076 /* Disable ingress rate limiting by resetting all
3077 * ingress rate limit registers to their initial
3080 for (i
= 0; i
< ps
->info
->num_ports
; i
++) {
3081 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
3089 /* Clear the statistics counters for all ports */
3090 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
,
3091 GLOBAL_STATS_OP_FLUSH_ALL
);
3095 /* Wait for the flush to complete. */
3096 err
= _mv88e6xxx_stats_wait(ps
);
3100 /* Clear all ATU entries */
3101 err
= _mv88e6xxx_atu_flush(ps
, 0, true);
3105 /* Clear all the VTU and STU entries */
3106 err
= _mv88e6xxx_vtu_stu_flush(ps
);
3113 static int mv88e6xxx_setup(struct dsa_switch
*ds
)
3115 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3121 mutex_init(&ps
->smi_mutex
);
3123 INIT_WORK(&ps
->bridge_work
, mv88e6xxx_bridge_work
);
3125 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEPROM
))
3126 mutex_init(&ps
->eeprom_mutex
);
3128 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU
))
3129 mv88e6xxx_ppu_state_init(ps
);
3131 mutex_lock(&ps
->smi_mutex
);
3133 err
= mv88e6xxx_switch_reset(ps
);
3137 err
= mv88e6xxx_setup_global(ps
);
3141 for (i
= 0; i
< ps
->info
->num_ports
; i
++) {
3142 err
= mv88e6xxx_setup_port(ps
, i
);
3148 mutex_unlock(&ps
->smi_mutex
);
3153 int mv88e6xxx_phy_page_read(struct dsa_switch
*ds
, int port
, int page
, int reg
)
3155 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3158 mutex_lock(&ps
->smi_mutex
);
3159 ret
= _mv88e6xxx_phy_page_read(ps
, port
, page
, reg
);
3160 mutex_unlock(&ps
->smi_mutex
);
3165 int mv88e6xxx_phy_page_write(struct dsa_switch
*ds
, int port
, int page
,
3168 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3171 mutex_lock(&ps
->smi_mutex
);
3172 ret
= _mv88e6xxx_phy_page_write(ps
, port
, page
, reg
, val
);
3173 mutex_unlock(&ps
->smi_mutex
);
3178 static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state
*ps
,
3181 if (port
>= 0 && port
< ps
->info
->num_ports
)
3186 static int mv88e6xxx_phy_read(struct dsa_switch
*ds
, int port
, int regnum
)
3188 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3189 int addr
= mv88e6xxx_port_to_phy_addr(ps
, port
);
3195 mutex_lock(&ps
->smi_mutex
);
3197 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU
))
3198 ret
= mv88e6xxx_phy_read_ppu(ps
, addr
, regnum
);
3199 else if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_SMI_PHY
))
3200 ret
= _mv88e6xxx_phy_read_indirect(ps
, addr
, regnum
);
3202 ret
= _mv88e6xxx_phy_read(ps
, addr
, regnum
);
3204 mutex_unlock(&ps
->smi_mutex
);
3208 static int mv88e6xxx_phy_write(struct dsa_switch
*ds
, int port
, int regnum
,
3211 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3212 int addr
= mv88e6xxx_port_to_phy_addr(ps
, port
);
3218 mutex_lock(&ps
->smi_mutex
);
3220 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU
))
3221 ret
= mv88e6xxx_phy_write_ppu(ps
, addr
, regnum
, val
);
3222 else if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_SMI_PHY
))
3223 ret
= _mv88e6xxx_phy_write_indirect(ps
, addr
, regnum
, val
);
3225 ret
= _mv88e6xxx_phy_write(ps
, addr
, regnum
, val
);
3227 mutex_unlock(&ps
->smi_mutex
);
3231 #ifdef CONFIG_NET_DSA_HWMON
3233 static int mv88e61xx_get_temp(struct dsa_switch
*ds
, int *temp
)
3235 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3241 mutex_lock(&ps
->smi_mutex
);
3243 ret
= _mv88e6xxx_phy_write(ps
, 0x0, 0x16, 0x6);
3247 /* Enable temperature sensor */
3248 ret
= _mv88e6xxx_phy_read(ps
, 0x0, 0x1a);
3252 ret
= _mv88e6xxx_phy_write(ps
, 0x0, 0x1a, ret
| (1 << 5));
3256 /* Wait for temperature to stabilize */
3257 usleep_range(10000, 12000);
3259 val
= _mv88e6xxx_phy_read(ps
, 0x0, 0x1a);
3265 /* Disable temperature sensor */
3266 ret
= _mv88e6xxx_phy_write(ps
, 0x0, 0x1a, ret
& ~(1 << 5));
3270 *temp
= ((val
& 0x1f) - 5) * 5;
3273 _mv88e6xxx_phy_write(ps
, 0x0, 0x16, 0x0);
3274 mutex_unlock(&ps
->smi_mutex
);
3278 static int mv88e63xx_get_temp(struct dsa_switch
*ds
, int *temp
)
3280 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3281 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3286 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 27);
3290 *temp
= (ret
& 0xff) - 25;
3295 static int mv88e6xxx_get_temp(struct dsa_switch
*ds
, int *temp
)
3297 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3299 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP
))
3302 if (mv88e6xxx_6320_family(ps
) || mv88e6xxx_6352_family(ps
))
3303 return mv88e63xx_get_temp(ds
, temp
);
3305 return mv88e61xx_get_temp(ds
, temp
);
3308 static int mv88e6xxx_get_temp_limit(struct dsa_switch
*ds
, int *temp
)
3310 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3311 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3314 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP_LIMIT
))
3319 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
3323 *temp
= (((ret
>> 8) & 0x1f) * 5) - 25;
3328 static int mv88e6xxx_set_temp_limit(struct dsa_switch
*ds
, int temp
)
3330 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3331 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3334 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP_LIMIT
))
3337 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
3340 temp
= clamp_val(DIV_ROUND_CLOSEST(temp
, 5) + 5, 0, 0x1f);
3341 return mv88e6xxx_phy_page_write(ds
, phy
, 6, 26,
3342 (ret
& 0xe0ff) | (temp
<< 8));
3345 static int mv88e6xxx_get_temp_alarm(struct dsa_switch
*ds
, bool *alarm
)
3347 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3348 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3351 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP_LIMIT
))
3356 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
3360 *alarm
= !!(ret
& 0x40);
3364 #endif /* CONFIG_NET_DSA_HWMON */
3366 static const struct mv88e6xxx_info mv88e6xxx_table
[] = {
3368 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6085
,
3369 .family
= MV88E6XXX_FAMILY_6097
,
3370 .name
= "Marvell 88E6085",
3371 .num_databases
= 4096,
3373 .flags
= MV88E6XXX_FLAGS_FAMILY_6097
,
3377 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6095
,
3378 .family
= MV88E6XXX_FAMILY_6095
,
3379 .name
= "Marvell 88E6095/88E6095F",
3380 .num_databases
= 256,
3382 .flags
= MV88E6XXX_FLAGS_FAMILY_6095
,
3386 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6123
,
3387 .family
= MV88E6XXX_FAMILY_6165
,
3388 .name
= "Marvell 88E6123",
3389 .num_databases
= 4096,
3391 .flags
= MV88E6XXX_FLAGS_FAMILY_6165
,
3395 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6131
,
3396 .family
= MV88E6XXX_FAMILY_6185
,
3397 .name
= "Marvell 88E6131",
3398 .num_databases
= 256,
3400 .flags
= MV88E6XXX_FLAGS_FAMILY_6185
,
3404 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6161
,
3405 .family
= MV88E6XXX_FAMILY_6165
,
3406 .name
= "Marvell 88E6161",
3407 .num_databases
= 4096,
3409 .flags
= MV88E6XXX_FLAGS_FAMILY_6165
,
3413 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6165
,
3414 .family
= MV88E6XXX_FAMILY_6165
,
3415 .name
= "Marvell 88E6165",
3416 .num_databases
= 4096,
3418 .flags
= MV88E6XXX_FLAGS_FAMILY_6165
,
3422 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6171
,
3423 .family
= MV88E6XXX_FAMILY_6351
,
3424 .name
= "Marvell 88E6171",
3425 .num_databases
= 4096,
3427 .flags
= MV88E6XXX_FLAGS_FAMILY_6351
,
3431 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6172
,
3432 .family
= MV88E6XXX_FAMILY_6352
,
3433 .name
= "Marvell 88E6172",
3434 .num_databases
= 4096,
3436 .flags
= MV88E6XXX_FLAGS_FAMILY_6352
,
3440 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6175
,
3441 .family
= MV88E6XXX_FAMILY_6351
,
3442 .name
= "Marvell 88E6175",
3443 .num_databases
= 4096,
3445 .flags
= MV88E6XXX_FLAGS_FAMILY_6351
,
3449 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6176
,
3450 .family
= MV88E6XXX_FAMILY_6352
,
3451 .name
= "Marvell 88E6176",
3452 .num_databases
= 4096,
3454 .flags
= MV88E6XXX_FLAGS_FAMILY_6352
,
3458 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6185
,
3459 .family
= MV88E6XXX_FAMILY_6185
,
3460 .name
= "Marvell 88E6185",
3461 .num_databases
= 256,
3463 .flags
= MV88E6XXX_FLAGS_FAMILY_6185
,
3467 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6240
,
3468 .family
= MV88E6XXX_FAMILY_6352
,
3469 .name
= "Marvell 88E6240",
3470 .num_databases
= 4096,
3472 .flags
= MV88E6XXX_FLAGS_FAMILY_6352
,
3476 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6320
,
3477 .family
= MV88E6XXX_FAMILY_6320
,
3478 .name
= "Marvell 88E6320",
3479 .num_databases
= 4096,
3481 .flags
= MV88E6XXX_FLAGS_FAMILY_6320
,
3485 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6321
,
3486 .family
= MV88E6XXX_FAMILY_6320
,
3487 .name
= "Marvell 88E6321",
3488 .num_databases
= 4096,
3490 .flags
= MV88E6XXX_FLAGS_FAMILY_6320
,
3494 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6350
,
3495 .family
= MV88E6XXX_FAMILY_6351
,
3496 .name
= "Marvell 88E6350",
3497 .num_databases
= 4096,
3499 .flags
= MV88E6XXX_FLAGS_FAMILY_6351
,
3503 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6351
,
3504 .family
= MV88E6XXX_FAMILY_6351
,
3505 .name
= "Marvell 88E6351",
3506 .num_databases
= 4096,
3508 .flags
= MV88E6XXX_FLAGS_FAMILY_6351
,
3512 .prod_num
= PORT_SWITCH_ID_PROD_NUM_6352
,
3513 .family
= MV88E6XXX_FAMILY_6352
,
3514 .name
= "Marvell 88E6352",
3515 .num_databases
= 4096,
3517 .flags
= MV88E6XXX_FLAGS_FAMILY_6352
,
3521 static const struct mv88e6xxx_info
*
3522 mv88e6xxx_lookup_info(unsigned int prod_num
, const struct mv88e6xxx_info
*table
,
3527 for (i
= 0; i
< num
; ++i
)
3528 if (table
[i
].prod_num
== prod_num
)
3534 static const char *mv88e6xxx_probe(struct device
*dsa_dev
,
3535 struct device
*host_dev
, int sw_addr
,
3538 const struct mv88e6xxx_info
*info
;
3539 struct mv88e6xxx_priv_state
*ps
;
3540 struct mii_bus
*bus
;
3542 int id
, prod_num
, rev
;
3544 bus
= dsa_host_dev_to_mii_bus(host_dev
);
3548 id
= __mv88e6xxx_reg_read(bus
, sw_addr
, REG_PORT(0), PORT_SWITCH_ID
);
3552 prod_num
= (id
& 0xfff0) >> 4;
3555 info
= mv88e6xxx_lookup_info(prod_num
, mv88e6xxx_table
,
3556 ARRAY_SIZE(mv88e6xxx_table
));
3562 ps
= devm_kzalloc(dsa_dev
, sizeof(*ps
), GFP_KERNEL
);
3567 ps
->sw_addr
= sw_addr
;
3572 dev_info(&ps
->bus
->dev
, "switch 0x%x probed: %s, revision %u\n",
3573 prod_num
, name
, rev
);
3578 struct dsa_switch_driver mv88e6xxx_switch_driver
= {
3579 .tag_protocol
= DSA_TAG_PROTO_EDSA
,
3580 .probe
= mv88e6xxx_probe
,
3581 .setup
= mv88e6xxx_setup
,
3582 .set_addr
= mv88e6xxx_set_addr
,
3583 .phy_read
= mv88e6xxx_phy_read
,
3584 .phy_write
= mv88e6xxx_phy_write
,
3585 .adjust_link
= mv88e6xxx_adjust_link
,
3586 .get_strings
= mv88e6xxx_get_strings
,
3587 .get_ethtool_stats
= mv88e6xxx_get_ethtool_stats
,
3588 .get_sset_count
= mv88e6xxx_get_sset_count
,
3589 .set_eee
= mv88e6xxx_set_eee
,
3590 .get_eee
= mv88e6xxx_get_eee
,
3591 #ifdef CONFIG_NET_DSA_HWMON
3592 .get_temp
= mv88e6xxx_get_temp
,
3593 .get_temp_limit
= mv88e6xxx_get_temp_limit
,
3594 .set_temp_limit
= mv88e6xxx_set_temp_limit
,
3595 .get_temp_alarm
= mv88e6xxx_get_temp_alarm
,
3597 .get_eeprom
= mv88e6xxx_get_eeprom
,
3598 .set_eeprom
= mv88e6xxx_set_eeprom
,
3599 .get_regs_len
= mv88e6xxx_get_regs_len
,
3600 .get_regs
= mv88e6xxx_get_regs
,
3601 .port_bridge_join
= mv88e6xxx_port_bridge_join
,
3602 .port_bridge_leave
= mv88e6xxx_port_bridge_leave
,
3603 .port_stp_state_set
= mv88e6xxx_port_stp_state_set
,
3604 .port_vlan_filtering
= mv88e6xxx_port_vlan_filtering
,
3605 .port_vlan_prepare
= mv88e6xxx_port_vlan_prepare
,
3606 .port_vlan_add
= mv88e6xxx_port_vlan_add
,
3607 .port_vlan_del
= mv88e6xxx_port_vlan_del
,
3608 .port_vlan_dump
= mv88e6xxx_port_vlan_dump
,
3609 .port_fdb_prepare
= mv88e6xxx_port_fdb_prepare
,
3610 .port_fdb_add
= mv88e6xxx_port_fdb_add
,
3611 .port_fdb_del
= mv88e6xxx_port_fdb_del
,
3612 .port_fdb_dump
= mv88e6xxx_port_fdb_dump
,
3615 static int __init
mv88e6xxx_init(void)
3617 register_switch_driver(&mv88e6xxx_switch_driver
);
3621 module_init(mv88e6xxx_init
);
3623 static void __exit
mv88e6xxx_cleanup(void)
3625 unregister_switch_driver(&mv88e6xxx_switch_driver
);
3627 module_exit(mv88e6xxx_cleanup
);
3629 MODULE_ALIAS("platform:mv88e6085");
3630 MODULE_ALIAS("platform:mv88e6095");
3631 MODULE_ALIAS("platform:mv88e6095f");
3632 MODULE_ALIAS("platform:mv88e6123");
3633 MODULE_ALIAS("platform:mv88e6131");
3634 MODULE_ALIAS("platform:mv88e6161");
3635 MODULE_ALIAS("platform:mv88e6165");
3636 MODULE_ALIAS("platform:mv88e6171");
3637 MODULE_ALIAS("platform:mv88e6172");
3638 MODULE_ALIAS("platform:mv88e6175");
3639 MODULE_ALIAS("platform:mv88e6176");
3640 MODULE_ALIAS("platform:mv88e6320");
3641 MODULE_ALIAS("platform:mv88e6321");
3642 MODULE_ALIAS("platform:mv88e6350");
3643 MODULE_ALIAS("platform:mv88e6351");
3644 MODULE_ALIAS("platform:mv88e6352");
3645 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3646 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3647 MODULE_LICENSE("GPL");