2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
5 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/delay.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_bridge.h>
18 #include <linux/jiffies.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/phy.h>
25 #include <net/switchdev.h>
26 #include "mv88e6xxx.h"
28 static void assert_smi_lock(struct mv88e6xxx_priv_state
*ps
)
30 if (unlikely(!mutex_is_locked(&ps
->smi_mutex
))) {
31 dev_err(ps
->dev
, "SMI lock not held!\n");
36 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
37 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
38 * will be directly accessible on some {device address,register address}
39 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
40 * will only respond to SMI transactions to that specific address, and
41 * an indirect addressing mechanism needs to be used to access its
44 static int mv88e6xxx_reg_wait_ready(struct mii_bus
*bus
, int sw_addr
)
49 for (i
= 0; i
< 16; i
++) {
50 ret
= mdiobus_read_nested(bus
, sw_addr
, SMI_CMD
);
54 if ((ret
& SMI_CMD_BUSY
) == 0)
61 static int __mv88e6xxx_reg_read(struct mii_bus
*bus
, int sw_addr
, int addr
,
67 return mdiobus_read_nested(bus
, addr
, reg
);
69 /* Wait for the bus to become free. */
70 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
74 /* Transmit the read command. */
75 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_CMD
,
76 SMI_CMD_OP_22_READ
| (addr
<< 5) | reg
);
80 /* Wait for the read command to complete. */
81 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
86 ret
= mdiobus_read_nested(bus
, sw_addr
, SMI_DATA
);
93 static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state
*ps
,
100 ret
= __mv88e6xxx_reg_read(ps
->bus
, ps
->sw_addr
, addr
, reg
);
104 dev_dbg(ps
->dev
, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
110 int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state
*ps
, int addr
, int reg
)
114 mutex_lock(&ps
->smi_mutex
);
115 ret
= _mv88e6xxx_reg_read(ps
, addr
, reg
);
116 mutex_unlock(&ps
->smi_mutex
);
121 static int __mv88e6xxx_reg_write(struct mii_bus
*bus
, int sw_addr
, int addr
,
127 return mdiobus_write_nested(bus
, addr
, reg
, val
);
129 /* Wait for the bus to become free. */
130 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
134 /* Transmit the data to write. */
135 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_DATA
, val
);
139 /* Transmit the write command. */
140 ret
= mdiobus_write_nested(bus
, sw_addr
, SMI_CMD
,
141 SMI_CMD_OP_22_WRITE
| (addr
<< 5) | reg
);
145 /* Wait for the write command to complete. */
146 ret
= mv88e6xxx_reg_wait_ready(bus
, sw_addr
);
153 static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state
*ps
, int addr
,
158 dev_dbg(ps
->dev
, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
161 return __mv88e6xxx_reg_write(ps
->bus
, ps
->sw_addr
, addr
, reg
, val
);
164 int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state
*ps
, int addr
,
169 mutex_lock(&ps
->smi_mutex
);
170 ret
= _mv88e6xxx_reg_write(ps
, addr
, reg
, val
);
171 mutex_unlock(&ps
->smi_mutex
);
176 int mv88e6xxx_set_addr_direct(struct dsa_switch
*ds
, u8
*addr
)
178 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
181 err
= mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_MAC_01
,
182 (addr
[0] << 8) | addr
[1]);
186 err
= mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_MAC_23
,
187 (addr
[2] << 8) | addr
[3]);
191 return mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_MAC_45
,
192 (addr
[4] << 8) | addr
[5]);
195 int mv88e6xxx_set_addr_indirect(struct dsa_switch
*ds
, u8
*addr
)
197 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
201 for (i
= 0; i
< 6; i
++) {
204 /* Write the MAC address byte. */
205 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SWITCH_MAC
,
206 GLOBAL2_SWITCH_MAC_BUSY
|
211 /* Wait for the write to complete. */
212 for (j
= 0; j
< 16; j
++) {
213 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL2
,
218 if ((ret
& GLOBAL2_SWITCH_MAC_BUSY
) == 0)
228 static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state
*ps
, int addr
,
232 return _mv88e6xxx_reg_read(ps
, addr
, regnum
);
236 static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state
*ps
, int addr
,
240 return _mv88e6xxx_reg_write(ps
, addr
, regnum
, val
);
244 static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state
*ps
)
247 unsigned long timeout
;
249 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_CONTROL
);
253 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_CONTROL
,
254 ret
& ~GLOBAL_CONTROL_PPU_ENABLE
);
258 timeout
= jiffies
+ 1 * HZ
;
259 while (time_before(jiffies
, timeout
)) {
260 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATUS
);
264 usleep_range(1000, 2000);
265 if ((ret
& GLOBAL_STATUS_PPU_MASK
) !=
266 GLOBAL_STATUS_PPU_POLLING
)
273 static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state
*ps
)
276 unsigned long timeout
;
278 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_CONTROL
);
282 err
= mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_CONTROL
,
283 ret
| GLOBAL_CONTROL_PPU_ENABLE
);
287 timeout
= jiffies
+ 1 * HZ
;
288 while (time_before(jiffies
, timeout
)) {
289 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATUS
);
293 usleep_range(1000, 2000);
294 if ((ret
& GLOBAL_STATUS_PPU_MASK
) ==
295 GLOBAL_STATUS_PPU_POLLING
)
302 static void mv88e6xxx_ppu_reenable_work(struct work_struct
*ugly
)
304 struct mv88e6xxx_priv_state
*ps
;
306 ps
= container_of(ugly
, struct mv88e6xxx_priv_state
, ppu_work
);
307 if (mutex_trylock(&ps
->ppu_mutex
)) {
308 if (mv88e6xxx_ppu_enable(ps
) == 0)
309 ps
->ppu_disabled
= 0;
310 mutex_unlock(&ps
->ppu_mutex
);
314 static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps
)
316 struct mv88e6xxx_priv_state
*ps
= (void *)_ps
;
318 schedule_work(&ps
->ppu_work
);
321 static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state
*ps
)
325 mutex_lock(&ps
->ppu_mutex
);
327 /* If the PHY polling unit is enabled, disable it so that
328 * we can access the PHY registers. If it was already
329 * disabled, cancel the timer that is going to re-enable
332 if (!ps
->ppu_disabled
) {
333 ret
= mv88e6xxx_ppu_disable(ps
);
335 mutex_unlock(&ps
->ppu_mutex
);
338 ps
->ppu_disabled
= 1;
340 del_timer(&ps
->ppu_timer
);
347 static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state
*ps
)
349 /* Schedule a timer to re-enable the PHY polling unit. */
350 mod_timer(&ps
->ppu_timer
, jiffies
+ msecs_to_jiffies(10));
351 mutex_unlock(&ps
->ppu_mutex
);
354 void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state
*ps
)
356 mutex_init(&ps
->ppu_mutex
);
357 INIT_WORK(&ps
->ppu_work
, mv88e6xxx_ppu_reenable_work
);
358 init_timer(&ps
->ppu_timer
);
359 ps
->ppu_timer
.data
= (unsigned long)ps
;
360 ps
->ppu_timer
.function
= mv88e6xxx_ppu_reenable_timer
;
363 static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state
*ps
, int addr
,
368 ret
= mv88e6xxx_ppu_access_get(ps
);
370 ret
= _mv88e6xxx_reg_read(ps
, addr
, regnum
);
371 mv88e6xxx_ppu_access_put(ps
);
377 static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state
*ps
, int addr
,
382 ret
= mv88e6xxx_ppu_access_get(ps
);
384 ret
= _mv88e6xxx_reg_write(ps
, addr
, regnum
, val
);
385 mv88e6xxx_ppu_access_put(ps
);
391 static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state
*ps
)
393 return ps
->info
->family
== MV88E6XXX_FAMILY_6065
;
396 static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state
*ps
)
398 return ps
->info
->family
== MV88E6XXX_FAMILY_6095
;
401 static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state
*ps
)
403 return ps
->info
->family
== MV88E6XXX_FAMILY_6097
;
406 static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state
*ps
)
408 return ps
->info
->family
== MV88E6XXX_FAMILY_6165
;
411 static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state
*ps
)
413 return ps
->info
->family
== MV88E6XXX_FAMILY_6185
;
416 static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state
*ps
)
418 return ps
->info
->family
== MV88E6XXX_FAMILY_6320
;
421 static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state
*ps
)
423 return ps
->info
->family
== MV88E6XXX_FAMILY_6351
;
426 static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state
*ps
)
428 return ps
->info
->family
== MV88E6XXX_FAMILY_6352
;
431 static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state
*ps
)
433 return ps
->info
->num_databases
;
436 static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state
*ps
)
438 /* Does the device have dedicated FID registers for ATU and VTU ops? */
439 if (mv88e6xxx_6097_family(ps
) || mv88e6xxx_6165_family(ps
) ||
440 mv88e6xxx_6351_family(ps
) || mv88e6xxx_6352_family(ps
))
446 static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state
*ps
)
448 /* Does the device have STU and dedicated SID registers for VTU ops? */
449 if (mv88e6xxx_6097_family(ps
) || mv88e6xxx_6165_family(ps
) ||
450 mv88e6xxx_6351_family(ps
) || mv88e6xxx_6352_family(ps
))
456 /* We expect the switch to perform auto negotiation if there is a real
457 * phy. However, in the case of a fixed link phy, we force the port
458 * settings from the fixed link settings.
460 void mv88e6xxx_adjust_link(struct dsa_switch
*ds
, int port
,
461 struct phy_device
*phydev
)
463 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
467 if (!phy_is_pseudo_fixed_link(phydev
))
470 mutex_lock(&ps
->smi_mutex
);
472 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_PCS_CTRL
);
476 reg
= ret
& ~(PORT_PCS_CTRL_LINK_UP
|
477 PORT_PCS_CTRL_FORCE_LINK
|
478 PORT_PCS_CTRL_DUPLEX_FULL
|
479 PORT_PCS_CTRL_FORCE_DUPLEX
|
480 PORT_PCS_CTRL_UNFORCED
);
482 reg
|= PORT_PCS_CTRL_FORCE_LINK
;
484 reg
|= PORT_PCS_CTRL_LINK_UP
;
486 if (mv88e6xxx_6065_family(ps
) && phydev
->speed
> SPEED_100
)
489 switch (phydev
->speed
) {
491 reg
|= PORT_PCS_CTRL_1000
;
494 reg
|= PORT_PCS_CTRL_100
;
497 reg
|= PORT_PCS_CTRL_10
;
500 pr_info("Unknown speed");
504 reg
|= PORT_PCS_CTRL_FORCE_DUPLEX
;
505 if (phydev
->duplex
== DUPLEX_FULL
)
506 reg
|= PORT_PCS_CTRL_DUPLEX_FULL
;
508 if ((mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
)) &&
509 (port
>= ps
->info
->num_ports
- 2)) {
510 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_RXID
)
511 reg
|= PORT_PCS_CTRL_RGMII_DELAY_RXCLK
;
512 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
513 reg
|= PORT_PCS_CTRL_RGMII_DELAY_TXCLK
;
514 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII_ID
)
515 reg
|= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK
|
516 PORT_PCS_CTRL_RGMII_DELAY_TXCLK
);
518 _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_PCS_CTRL
, reg
);
521 mutex_unlock(&ps
->smi_mutex
);
524 static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state
*ps
)
529 for (i
= 0; i
< 10; i
++) {
530 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
);
531 if ((ret
& GLOBAL_STATS_OP_BUSY
) == 0)
538 static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state
*ps
,
543 if (mv88e6xxx_6320_family(ps
) || mv88e6xxx_6352_family(ps
))
544 port
= (port
+ 1) << 5;
546 /* Snapshot the hardware statistics counters for this port. */
547 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
,
548 GLOBAL_STATS_OP_CAPTURE_PORT
|
549 GLOBAL_STATS_OP_HIST_RX_TX
| port
);
553 /* Wait for the snapshotting to complete. */
554 ret
= _mv88e6xxx_stats_wait(ps
);
561 static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state
*ps
,
569 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
,
570 GLOBAL_STATS_OP_READ_CAPTURED
|
571 GLOBAL_STATS_OP_HIST_RX_TX
| stat
);
575 ret
= _mv88e6xxx_stats_wait(ps
);
579 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_32
);
585 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_STATS_COUNTER_01
);
592 static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats
[] = {
593 { "in_good_octets", 8, 0x00, BANK0
, },
594 { "in_bad_octets", 4, 0x02, BANK0
, },
595 { "in_unicast", 4, 0x04, BANK0
, },
596 { "in_broadcasts", 4, 0x06, BANK0
, },
597 { "in_multicasts", 4, 0x07, BANK0
, },
598 { "in_pause", 4, 0x16, BANK0
, },
599 { "in_undersize", 4, 0x18, BANK0
, },
600 { "in_fragments", 4, 0x19, BANK0
, },
601 { "in_oversize", 4, 0x1a, BANK0
, },
602 { "in_jabber", 4, 0x1b, BANK0
, },
603 { "in_rx_error", 4, 0x1c, BANK0
, },
604 { "in_fcs_error", 4, 0x1d, BANK0
, },
605 { "out_octets", 8, 0x0e, BANK0
, },
606 { "out_unicast", 4, 0x10, BANK0
, },
607 { "out_broadcasts", 4, 0x13, BANK0
, },
608 { "out_multicasts", 4, 0x12, BANK0
, },
609 { "out_pause", 4, 0x15, BANK0
, },
610 { "excessive", 4, 0x11, BANK0
, },
611 { "collisions", 4, 0x1e, BANK0
, },
612 { "deferred", 4, 0x05, BANK0
, },
613 { "single", 4, 0x14, BANK0
, },
614 { "multiple", 4, 0x17, BANK0
, },
615 { "out_fcs_error", 4, 0x03, BANK0
, },
616 { "late", 4, 0x1f, BANK0
, },
617 { "hist_64bytes", 4, 0x08, BANK0
, },
618 { "hist_65_127bytes", 4, 0x09, BANK0
, },
619 { "hist_128_255bytes", 4, 0x0a, BANK0
, },
620 { "hist_256_511bytes", 4, 0x0b, BANK0
, },
621 { "hist_512_1023bytes", 4, 0x0c, BANK0
, },
622 { "hist_1024_max_bytes", 4, 0x0d, BANK0
, },
623 { "sw_in_discards", 4, 0x10, PORT
, },
624 { "sw_in_filtered", 2, 0x12, PORT
, },
625 { "sw_out_filtered", 2, 0x13, PORT
, },
626 { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
627 { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
628 { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
629 { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
630 { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
631 { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
632 { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
633 { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
634 { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
635 { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
636 { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
637 { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
638 { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
639 { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
640 { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
641 { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
642 { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
643 { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
644 { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
645 { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
646 { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
647 { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
648 { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
649 { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
650 { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
651 { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1
, BANK1
, },
654 static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state
*ps
,
655 struct mv88e6xxx_hw_stat
*stat
)
657 switch (stat
->type
) {
661 return mv88e6xxx_6320_family(ps
);
663 return mv88e6xxx_6095_family(ps
) ||
664 mv88e6xxx_6185_family(ps
) ||
665 mv88e6xxx_6097_family(ps
) ||
666 mv88e6xxx_6165_family(ps
) ||
667 mv88e6xxx_6351_family(ps
) ||
668 mv88e6xxx_6352_family(ps
);
673 static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state
*ps
,
674 struct mv88e6xxx_hw_stat
*s
,
684 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), s
->reg
);
689 if (s
->sizeof_stat
== 4) {
690 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
),
699 _mv88e6xxx_stats_read(ps
, s
->reg
, &low
);
700 if (s
->sizeof_stat
== 8)
701 _mv88e6xxx_stats_read(ps
, s
->reg
+ 1, &high
);
703 value
= (((u64
)high
) << 16) | low
;
707 void mv88e6xxx_get_strings(struct dsa_switch
*ds
, int port
, uint8_t *data
)
709 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
710 struct mv88e6xxx_hw_stat
*stat
;
713 for (i
= 0, j
= 0; i
< ARRAY_SIZE(mv88e6xxx_hw_stats
); i
++) {
714 stat
= &mv88e6xxx_hw_stats
[i
];
715 if (mv88e6xxx_has_stat(ps
, stat
)) {
716 memcpy(data
+ j
* ETH_GSTRING_LEN
, stat
->string
,
723 int mv88e6xxx_get_sset_count(struct dsa_switch
*ds
)
725 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
726 struct mv88e6xxx_hw_stat
*stat
;
729 for (i
= 0, j
= 0; i
< ARRAY_SIZE(mv88e6xxx_hw_stats
); i
++) {
730 stat
= &mv88e6xxx_hw_stats
[i
];
731 if (mv88e6xxx_has_stat(ps
, stat
))
738 mv88e6xxx_get_ethtool_stats(struct dsa_switch
*ds
,
739 int port
, uint64_t *data
)
741 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
742 struct mv88e6xxx_hw_stat
*stat
;
746 mutex_lock(&ps
->smi_mutex
);
748 ret
= _mv88e6xxx_stats_snapshot(ps
, port
);
750 mutex_unlock(&ps
->smi_mutex
);
753 for (i
= 0, j
= 0; i
< ARRAY_SIZE(mv88e6xxx_hw_stats
); i
++) {
754 stat
= &mv88e6xxx_hw_stats
[i
];
755 if (mv88e6xxx_has_stat(ps
, stat
)) {
756 data
[j
] = _mv88e6xxx_get_ethtool_stat(ps
, stat
, port
);
761 mutex_unlock(&ps
->smi_mutex
);
764 int mv88e6xxx_get_regs_len(struct dsa_switch
*ds
, int port
)
766 return 32 * sizeof(u16
);
769 void mv88e6xxx_get_regs(struct dsa_switch
*ds
, int port
,
770 struct ethtool_regs
*regs
, void *_p
)
772 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
778 memset(p
, 0xff, 32 * sizeof(u16
));
780 for (i
= 0; i
< 32; i
++) {
783 ret
= mv88e6xxx_reg_read(ps
, REG_PORT(port
), i
);
789 static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state
*ps
, int reg
, int offset
,
792 unsigned long timeout
= jiffies
+ HZ
/ 10;
794 while (time_before(jiffies
, timeout
)) {
797 ret
= _mv88e6xxx_reg_read(ps
, reg
, offset
);
803 usleep_range(1000, 2000);
808 static int mv88e6xxx_wait(struct mv88e6xxx_priv_state
*ps
, int reg
,
809 int offset
, u16 mask
)
813 mutex_lock(&ps
->smi_mutex
);
814 ret
= _mv88e6xxx_wait(ps
, reg
, offset
, mask
);
815 mutex_unlock(&ps
->smi_mutex
);
820 static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state
*ps
)
822 return _mv88e6xxx_wait(ps
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
823 GLOBAL2_SMI_OP_BUSY
);
826 static int mv88e6xxx_eeprom_load_wait(struct dsa_switch
*ds
)
828 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
830 return mv88e6xxx_wait(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
831 GLOBAL2_EEPROM_OP_LOAD
);
834 static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch
*ds
)
836 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
838 return mv88e6xxx_wait(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
839 GLOBAL2_EEPROM_OP_BUSY
);
842 static int mv88e6xxx_read_eeprom_word(struct dsa_switch
*ds
, int addr
)
844 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
847 mutex_lock(&ps
->eeprom_mutex
);
849 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
850 GLOBAL2_EEPROM_OP_READ
|
851 (addr
& GLOBAL2_EEPROM_OP_ADDR_MASK
));
855 ret
= mv88e6xxx_eeprom_busy_wait(ds
);
859 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_DATA
);
861 mutex_unlock(&ps
->eeprom_mutex
);
865 int mv88e6xxx_get_eeprom(struct dsa_switch
*ds
, struct ethtool_eeprom
*eeprom
,
868 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
873 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEPROM
))
876 offset
= eeprom
->offset
;
880 eeprom
->magic
= 0xc3ec4951;
882 ret
= mv88e6xxx_eeprom_load_wait(ds
);
889 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
893 *data
++ = (word
>> 8) & 0xff;
903 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
907 *data
++ = word
& 0xff;
908 *data
++ = (word
>> 8) & 0xff;
918 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
922 *data
++ = word
& 0xff;
932 static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch
*ds
)
934 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
937 ret
= mv88e6xxx_reg_read(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
);
941 if (!(ret
& GLOBAL2_EEPROM_OP_WRITE_EN
))
947 static int mv88e6xxx_write_eeprom_word(struct dsa_switch
*ds
, int addr
,
950 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
953 mutex_lock(&ps
->eeprom_mutex
);
955 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_DATA
, data
);
959 ret
= mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_EEPROM_OP
,
960 GLOBAL2_EEPROM_OP_WRITE
|
961 (addr
& GLOBAL2_EEPROM_OP_ADDR_MASK
));
965 ret
= mv88e6xxx_eeprom_busy_wait(ds
);
967 mutex_unlock(&ps
->eeprom_mutex
);
971 int mv88e6xxx_set_eeprom(struct dsa_switch
*ds
, struct ethtool_eeprom
*eeprom
,
974 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
979 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEPROM
))
982 if (eeprom
->magic
!= 0xc3ec4951)
985 ret
= mv88e6xxx_eeprom_is_readonly(ds
);
989 offset
= eeprom
->offset
;
993 ret
= mv88e6xxx_eeprom_load_wait(ds
);
1000 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
1004 word
= (*data
++ << 8) | (word
& 0xff);
1006 ret
= mv88e6xxx_write_eeprom_word(ds
, offset
>> 1, word
);
1019 word
|= *data
++ << 8;
1021 ret
= mv88e6xxx_write_eeprom_word(ds
, offset
>> 1, word
);
1033 word
= mv88e6xxx_read_eeprom_word(ds
, offset
>> 1);
1037 word
= (word
& 0xff00) | *data
++;
1039 ret
= mv88e6xxx_write_eeprom_word(ds
, offset
>> 1, word
);
1051 static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state
*ps
)
1053 return _mv88e6xxx_wait(ps
, REG_GLOBAL
, GLOBAL_ATU_OP
,
1054 GLOBAL_ATU_OP_BUSY
);
1057 static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state
*ps
,
1058 int addr
, int regnum
)
1062 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
1063 GLOBAL2_SMI_OP_22_READ
| (addr
<< 5) |
1068 ret
= _mv88e6xxx_phy_wait(ps
);
1072 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
);
1077 static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state
*ps
,
1078 int addr
, int regnum
, u16 val
)
1082 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SMI_DATA
, val
);
1086 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SMI_OP
,
1087 GLOBAL2_SMI_OP_22_WRITE
| (addr
<< 5) |
1090 return _mv88e6xxx_phy_wait(ps
);
1093 int mv88e6xxx_get_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*e
)
1095 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1098 mutex_lock(&ps
->smi_mutex
);
1100 reg
= _mv88e6xxx_phy_read_indirect(ps
, port
, 16);
1104 e
->eee_enabled
= !!(reg
& 0x0200);
1105 e
->tx_lpi_enabled
= !!(reg
& 0x0100);
1107 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_STATUS
);
1111 e
->eee_active
= !!(reg
& PORT_STATUS_EEE
);
1115 mutex_unlock(&ps
->smi_mutex
);
1119 int mv88e6xxx_set_eee(struct dsa_switch
*ds
, int port
,
1120 struct phy_device
*phydev
, struct ethtool_eee
*e
)
1122 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1126 mutex_lock(&ps
->smi_mutex
);
1128 ret
= _mv88e6xxx_phy_read_indirect(ps
, port
, 16);
1132 reg
= ret
& ~0x0300;
1135 if (e
->tx_lpi_enabled
)
1138 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, 16, reg
);
1140 mutex_unlock(&ps
->smi_mutex
);
1145 static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state
*ps
, u16 fid
, u16 cmd
)
1149 if (mv88e6xxx_has_fid_reg(ps
)) {
1150 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_FID
, fid
);
1153 } else if (mv88e6xxx_num_databases(ps
) == 256) {
1154 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1155 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_ATU_CONTROL
);
1159 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_CONTROL
,
1161 ((fid
<< 8) & 0xf000));
1165 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1169 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_OP
, cmd
);
1173 return _mv88e6xxx_atu_wait(ps
);
1176 static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state
*ps
,
1177 struct mv88e6xxx_atu_entry
*entry
)
1179 u16 data
= entry
->state
& GLOBAL_ATU_DATA_STATE_MASK
;
1181 if (entry
->state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
1182 unsigned int mask
, shift
;
1185 data
|= GLOBAL_ATU_DATA_TRUNK
;
1186 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
1187 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
1189 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
1190 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
1193 data
|= (entry
->portv_trunkid
<< shift
) & mask
;
1196 return _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_DATA
, data
);
1199 static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state
*ps
,
1200 struct mv88e6xxx_atu_entry
*entry
,
1206 err
= _mv88e6xxx_atu_wait(ps
);
1210 err
= _mv88e6xxx_atu_data_write(ps
, entry
);
1215 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB
:
1216 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB
;
1218 op
= static_too
? GLOBAL_ATU_OP_FLUSH_MOVE_ALL
:
1219 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC
;
1222 return _mv88e6xxx_atu_cmd(ps
, entry
->fid
, op
);
1225 static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state
*ps
,
1226 u16 fid
, bool static_too
)
1228 struct mv88e6xxx_atu_entry entry
= {
1230 .state
= 0, /* EntryState bits must be 0 */
1233 return _mv88e6xxx_atu_flush_move(ps
, &entry
, static_too
);
1236 static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state
*ps
, u16 fid
,
1237 int from_port
, int to_port
, bool static_too
)
1239 struct mv88e6xxx_atu_entry entry
= {
1244 /* EntryState bits must be 0xF */
1245 entry
.state
= GLOBAL_ATU_DATA_STATE_MASK
;
1247 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1248 entry
.portv_trunkid
= (to_port
& 0x0f) << 4;
1249 entry
.portv_trunkid
|= from_port
& 0x0f;
1251 return _mv88e6xxx_atu_flush_move(ps
, &entry
, static_too
);
1254 static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state
*ps
, u16 fid
,
1255 int port
, bool static_too
)
1257 /* Destination port 0xF means remove the entries */
1258 return _mv88e6xxx_atu_move(ps
, fid
, port
, 0x0f, static_too
);
1261 static const char * const mv88e6xxx_port_state_names
[] = {
1262 [PORT_CONTROL_STATE_DISABLED
] = "Disabled",
1263 [PORT_CONTROL_STATE_BLOCKING
] = "Blocking/Listening",
1264 [PORT_CONTROL_STATE_LEARNING
] = "Learning",
1265 [PORT_CONTROL_STATE_FORWARDING
] = "Forwarding",
1268 static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state
*ps
, int port
,
1271 struct dsa_switch
*ds
= ps
->ds
;
1275 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_CONTROL
);
1279 oldstate
= reg
& PORT_CONTROL_STATE_MASK
;
1281 if (oldstate
!= state
) {
1282 /* Flush forwarding database if we're moving a port
1283 * from Learning or Forwarding state to Disabled or
1284 * Blocking or Listening state.
1286 if ((oldstate
== PORT_CONTROL_STATE_LEARNING
||
1287 oldstate
== PORT_CONTROL_STATE_FORWARDING
)
1288 && (state
== PORT_CONTROL_STATE_DISABLED
||
1289 state
== PORT_CONTROL_STATE_BLOCKING
)) {
1290 ret
= _mv88e6xxx_atu_remove(ps
, 0, port
, false);
1295 reg
= (reg
& ~PORT_CONTROL_STATE_MASK
) | state
;
1296 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL
,
1301 netdev_dbg(ds
->ports
[port
], "PortState %s (was %s)\n",
1302 mv88e6xxx_port_state_names
[state
],
1303 mv88e6xxx_port_state_names
[oldstate
]);
1309 static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state
*ps
,
1312 struct net_device
*bridge
= ps
->ports
[port
].bridge_dev
;
1313 const u16 mask
= (1 << ps
->info
->num_ports
) - 1;
1314 struct dsa_switch
*ds
= ps
->ds
;
1315 u16 output_ports
= 0;
1319 /* allow CPU port or DSA link(s) to send frames to every port */
1320 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
)) {
1321 output_ports
= mask
;
1323 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1324 /* allow sending frames to every group member */
1325 if (bridge
&& ps
->ports
[i
].bridge_dev
== bridge
)
1326 output_ports
|= BIT(i
);
1328 /* allow sending frames to CPU port and DSA link(s) */
1329 if (dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
))
1330 output_ports
|= BIT(i
);
1334 /* prevent frames from going back out of the port they came in on */
1335 output_ports
&= ~BIT(port
);
1337 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_BASE_VLAN
);
1342 reg
|= output_ports
& mask
;
1344 return _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_BASE_VLAN
, reg
);
1347 void mv88e6xxx_port_stp_state_set(struct dsa_switch
*ds
, int port
, u8 state
)
1349 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1353 case BR_STATE_DISABLED
:
1354 stp_state
= PORT_CONTROL_STATE_DISABLED
;
1356 case BR_STATE_BLOCKING
:
1357 case BR_STATE_LISTENING
:
1358 stp_state
= PORT_CONTROL_STATE_BLOCKING
;
1360 case BR_STATE_LEARNING
:
1361 stp_state
= PORT_CONTROL_STATE_LEARNING
;
1363 case BR_STATE_FORWARDING
:
1365 stp_state
= PORT_CONTROL_STATE_FORWARDING
;
1369 /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
1370 * so we can not update the port state directly but need to schedule it.
1372 ps
->ports
[port
].state
= stp_state
;
1373 set_bit(port
, ps
->port_state_update_mask
);
1374 schedule_work(&ps
->bridge_work
);
1377 static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state
*ps
, int port
,
1380 struct dsa_switch
*ds
= ps
->ds
;
1384 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_DEFAULT_VLAN
);
1388 pvid
= ret
& PORT_DEFAULT_VLAN_MASK
;
1391 ret
&= ~PORT_DEFAULT_VLAN_MASK
;
1392 ret
|= *new & PORT_DEFAULT_VLAN_MASK
;
1394 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
1395 PORT_DEFAULT_VLAN
, ret
);
1399 netdev_dbg(ds
->ports
[port
], "DefaultVID %d (was %d)\n", *new,
1409 static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state
*ps
,
1410 int port
, u16
*pvid
)
1412 return _mv88e6xxx_port_pvid(ps
, port
, NULL
, pvid
);
1415 static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state
*ps
,
1418 return _mv88e6xxx_port_pvid(ps
, port
, &pvid
, NULL
);
1421 static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state
*ps
)
1423 return _mv88e6xxx_wait(ps
, REG_GLOBAL
, GLOBAL_VTU_OP
,
1424 GLOBAL_VTU_OP_BUSY
);
1427 static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state
*ps
, u16 op
)
1431 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_OP
, op
);
1435 return _mv88e6xxx_vtu_wait(ps
);
1438 static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state
*ps
)
1442 ret
= _mv88e6xxx_vtu_wait(ps
);
1446 return _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_FLUSH_ALL
);
1449 static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state
*ps
,
1450 struct mv88e6xxx_vtu_stu_entry
*entry
,
1451 unsigned int nibble_offset
)
1457 for (i
= 0; i
< 3; ++i
) {
1458 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1459 GLOBAL_VTU_DATA_0_3
+ i
);
1466 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1467 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1468 u16 reg
= regs
[i
/ 4];
1470 entry
->data
[i
] = (reg
>> shift
) & GLOBAL_VTU_STU_DATA_MASK
;
1476 static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state
*ps
,
1477 struct mv88e6xxx_vtu_stu_entry
*entry
,
1478 unsigned int nibble_offset
)
1480 u16 regs
[3] = { 0 };
1484 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1485 unsigned int shift
= (i
% 4) * 4 + nibble_offset
;
1486 u8 data
= entry
->data
[i
];
1488 regs
[i
/ 4] |= (data
& GLOBAL_VTU_STU_DATA_MASK
) << shift
;
1491 for (i
= 0; i
< 3; ++i
) {
1492 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
,
1493 GLOBAL_VTU_DATA_0_3
+ i
, regs
[i
]);
1501 static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state
*ps
, u16 vid
)
1503 return _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
,
1504 vid
& GLOBAL_VTU_VID_MASK
);
1507 static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state
*ps
,
1508 struct mv88e6xxx_vtu_stu_entry
*entry
)
1510 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1513 ret
= _mv88e6xxx_vtu_wait(ps
);
1517 ret
= _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_VTU_GET_NEXT
);
1521 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1525 next
.vid
= ret
& GLOBAL_VTU_VID_MASK
;
1526 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1529 ret
= _mv88e6xxx_vtu_stu_data_read(ps
, &next
, 0);
1533 if (mv88e6xxx_has_fid_reg(ps
)) {
1534 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1539 next
.fid
= ret
& GLOBAL_VTU_FID_MASK
;
1540 } else if (mv88e6xxx_num_databases(ps
) == 256) {
1541 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1542 * VTU DBNum[3:0] are located in VTU Operation 3:0
1544 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1549 next
.fid
= (ret
& 0xf00) >> 4;
1550 next
.fid
|= ret
& 0xf;
1553 if (mv88e6xxx_has_stu(ps
)) {
1554 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
1559 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1567 int mv88e6xxx_port_vlan_dump(struct dsa_switch
*ds
, int port
,
1568 struct switchdev_obj_port_vlan
*vlan
,
1569 int (*cb
)(struct switchdev_obj
*obj
))
1571 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1572 struct mv88e6xxx_vtu_stu_entry next
;
1576 mutex_lock(&ps
->smi_mutex
);
1578 err
= _mv88e6xxx_port_pvid_get(ps
, port
, &pvid
);
1582 err
= _mv88e6xxx_vtu_vid_write(ps
, GLOBAL_VTU_VID_MASK
);
1587 err
= _mv88e6xxx_vtu_getnext(ps
, &next
);
1594 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
)
1597 /* reinit and dump this VLAN obj */
1598 vlan
->vid_begin
= vlan
->vid_end
= next
.vid
;
1601 if (next
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
)
1602 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1604 if (next
.vid
== pvid
)
1605 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1607 err
= cb(&vlan
->obj
);
1610 } while (next
.vid
< GLOBAL_VTU_VID_MASK
);
1613 mutex_unlock(&ps
->smi_mutex
);
1618 static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state
*ps
,
1619 struct mv88e6xxx_vtu_stu_entry
*entry
)
1621 u16 op
= GLOBAL_VTU_OP_VTU_LOAD_PURGE
;
1625 ret
= _mv88e6xxx_vtu_wait(ps
);
1632 /* Write port member tags */
1633 ret
= _mv88e6xxx_vtu_stu_data_write(ps
, entry
, 0);
1637 if (mv88e6xxx_has_stu(ps
)) {
1638 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1639 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1644 if (mv88e6xxx_has_fid_reg(ps
)) {
1645 reg
= entry
->fid
& GLOBAL_VTU_FID_MASK
;
1646 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_FID
, reg
);
1649 } else if (mv88e6xxx_num_databases(ps
) == 256) {
1650 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1651 * VTU DBNum[3:0] are located in VTU Operation 3:0
1653 op
|= (entry
->fid
& 0xf0) << 8;
1654 op
|= entry
->fid
& 0xf;
1657 reg
= GLOBAL_VTU_VID_VALID
;
1659 reg
|= entry
->vid
& GLOBAL_VTU_VID_MASK
;
1660 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1664 return _mv88e6xxx_vtu_cmd(ps
, op
);
1667 static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state
*ps
, u8 sid
,
1668 struct mv88e6xxx_vtu_stu_entry
*entry
)
1670 struct mv88e6xxx_vtu_stu_entry next
= { 0 };
1673 ret
= _mv88e6xxx_vtu_wait(ps
);
1677 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
,
1678 sid
& GLOBAL_VTU_SID_MASK
);
1682 ret
= _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_STU_GET_NEXT
);
1686 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
);
1690 next
.sid
= ret
& GLOBAL_VTU_SID_MASK
;
1692 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
);
1696 next
.valid
= !!(ret
& GLOBAL_VTU_VID_VALID
);
1699 ret
= _mv88e6xxx_vtu_stu_data_read(ps
, &next
, 2);
1708 static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state
*ps
,
1709 struct mv88e6xxx_vtu_stu_entry
*entry
)
1714 ret
= _mv88e6xxx_vtu_wait(ps
);
1721 /* Write port states */
1722 ret
= _mv88e6xxx_vtu_stu_data_write(ps
, entry
, 2);
1726 reg
= GLOBAL_VTU_VID_VALID
;
1728 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_VID
, reg
);
1732 reg
= entry
->sid
& GLOBAL_VTU_SID_MASK
;
1733 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_VTU_SID
, reg
);
1737 return _mv88e6xxx_vtu_cmd(ps
, GLOBAL_VTU_OP_STU_LOAD_PURGE
);
1740 static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state
*ps
, int port
,
1743 struct dsa_switch
*ds
= ps
->ds
;
1748 if (mv88e6xxx_num_databases(ps
) == 4096)
1750 else if (mv88e6xxx_num_databases(ps
) == 256)
1755 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1756 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_BASE_VLAN
);
1760 fid
= (ret
& PORT_BASE_VLAN_FID_3_0_MASK
) >> 12;
1763 ret
&= ~PORT_BASE_VLAN_FID_3_0_MASK
;
1764 ret
|= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK
;
1766 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_BASE_VLAN
,
1772 /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1773 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_CONTROL_1
);
1777 fid
|= (ret
& upper_mask
) << 4;
1781 ret
|= (*new >> 4) & upper_mask
;
1783 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL_1
,
1788 netdev_dbg(ds
->ports
[port
], "FID %d (was %d)\n", *new, fid
);
1797 static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state
*ps
,
1800 return _mv88e6xxx_port_fid(ps
, port
, NULL
, fid
);
1803 static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state
*ps
,
1806 return _mv88e6xxx_port_fid(ps
, port
, &fid
, NULL
);
1809 static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state
*ps
, u16
*fid
)
1811 DECLARE_BITMAP(fid_bitmap
, MV88E6XXX_N_FID
);
1812 struct mv88e6xxx_vtu_stu_entry vlan
;
1815 bitmap_zero(fid_bitmap
, MV88E6XXX_N_FID
);
1817 /* Set every FID bit used by the (un)bridged ports */
1818 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1819 err
= _mv88e6xxx_port_fid_get(ps
, i
, fid
);
1823 set_bit(*fid
, fid_bitmap
);
1826 /* Set every FID bit used by the VLAN entries */
1827 err
= _mv88e6xxx_vtu_vid_write(ps
, GLOBAL_VTU_VID_MASK
);
1832 err
= _mv88e6xxx_vtu_getnext(ps
, &vlan
);
1839 set_bit(vlan
.fid
, fid_bitmap
);
1840 } while (vlan
.vid
< GLOBAL_VTU_VID_MASK
);
1842 /* The reset value 0x000 is used to indicate that multiple address
1843 * databases are not needed. Return the next positive available.
1845 *fid
= find_next_zero_bit(fid_bitmap
, MV88E6XXX_N_FID
, 1);
1846 if (unlikely(*fid
>= mv88e6xxx_num_databases(ps
)))
1849 /* Clear the database */
1850 return _mv88e6xxx_atu_flush(ps
, *fid
, true);
1853 static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state
*ps
, u16 vid
,
1854 struct mv88e6xxx_vtu_stu_entry
*entry
)
1856 struct dsa_switch
*ds
= ps
->ds
;
1857 struct mv88e6xxx_vtu_stu_entry vlan
= {
1863 err
= _mv88e6xxx_fid_new(ps
, &vlan
.fid
);
1867 /* exclude all ports except the CPU and DSA ports */
1868 for (i
= 0; i
< ps
->info
->num_ports
; ++i
)
1869 vlan
.data
[i
] = dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
)
1870 ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
1871 : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
1873 if (mv88e6xxx_6097_family(ps
) || mv88e6xxx_6165_family(ps
) ||
1874 mv88e6xxx_6351_family(ps
) || mv88e6xxx_6352_family(ps
)) {
1875 struct mv88e6xxx_vtu_stu_entry vstp
;
1877 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1878 * implemented, only one STU entry is needed to cover all VTU
1879 * entries. Thus, validate the SID 0.
1882 err
= _mv88e6xxx_stu_getnext(ps
, GLOBAL_VTU_SID_MASK
, &vstp
);
1886 if (vstp
.sid
!= vlan
.sid
|| !vstp
.valid
) {
1887 memset(&vstp
, 0, sizeof(vstp
));
1889 vstp
.sid
= vlan
.sid
;
1891 err
= _mv88e6xxx_stu_loadpurge(ps
, &vstp
);
1901 static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state
*ps
, u16 vid
,
1902 struct mv88e6xxx_vtu_stu_entry
*entry
, bool creat
)
1909 err
= _mv88e6xxx_vtu_vid_write(ps
, vid
- 1);
1913 err
= _mv88e6xxx_vtu_getnext(ps
, entry
);
1917 if (entry
->vid
!= vid
|| !entry
->valid
) {
1920 /* -ENOENT would've been more appropriate, but switchdev expects
1921 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
1924 err
= _mv88e6xxx_vtu_new(ps
, vid
, entry
);
1930 static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch
*ds
, int port
,
1931 u16 vid_begin
, u16 vid_end
)
1933 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1934 struct mv88e6xxx_vtu_stu_entry vlan
;
1940 mutex_lock(&ps
->smi_mutex
);
1942 err
= _mv88e6xxx_vtu_vid_write(ps
, vid_begin
- 1);
1947 err
= _mv88e6xxx_vtu_getnext(ps
, &vlan
);
1954 if (vlan
.vid
> vid_end
)
1957 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
1958 if (dsa_is_dsa_port(ds
, i
) || dsa_is_cpu_port(ds
, i
))
1962 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
)
1965 if (ps
->ports
[i
].bridge_dev
==
1966 ps
->ports
[port
].bridge_dev
)
1967 break; /* same bridge, check next VLAN */
1969 netdev_warn(ds
->ports
[port
],
1970 "hardware VLAN %d already used by %s\n",
1972 netdev_name(ps
->ports
[i
].bridge_dev
));
1976 } while (vlan
.vid
< vid_end
);
1979 mutex_unlock(&ps
->smi_mutex
);
1984 static const char * const mv88e6xxx_port_8021q_mode_names
[] = {
1985 [PORT_CONTROL_2_8021Q_DISABLED
] = "Disabled",
1986 [PORT_CONTROL_2_8021Q_FALLBACK
] = "Fallback",
1987 [PORT_CONTROL_2_8021Q_CHECK
] = "Check",
1988 [PORT_CONTROL_2_8021Q_SECURE
] = "Secure",
1991 int mv88e6xxx_port_vlan_filtering(struct dsa_switch
*ds
, int port
,
1992 bool vlan_filtering
)
1994 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
1995 u16 old
, new = vlan_filtering
? PORT_CONTROL_2_8021Q_SECURE
:
1996 PORT_CONTROL_2_8021Q_DISABLED
;
1999 mutex_lock(&ps
->smi_mutex
);
2001 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_CONTROL_2
);
2005 old
= ret
& PORT_CONTROL_2_8021Q_MASK
;
2008 ret
&= ~PORT_CONTROL_2_8021Q_MASK
;
2009 ret
|= new & PORT_CONTROL_2_8021Q_MASK
;
2011 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL_2
,
2016 netdev_dbg(ds
->ports
[port
], "802.1Q Mode %s (was %s)\n",
2017 mv88e6xxx_port_8021q_mode_names
[new],
2018 mv88e6xxx_port_8021q_mode_names
[old
]);
2023 mutex_unlock(&ps
->smi_mutex
);
2028 int mv88e6xxx_port_vlan_prepare(struct dsa_switch
*ds
, int port
,
2029 const struct switchdev_obj_port_vlan
*vlan
,
2030 struct switchdev_trans
*trans
)
2034 /* If the requested port doesn't belong to the same bridge as the VLAN
2035 * members, do not support it (yet) and fallback to software VLAN.
2037 err
= mv88e6xxx_port_check_hw_vlan(ds
, port
, vlan
->vid_begin
,
2042 /* We don't need any dynamic resource from the kernel (yet),
2043 * so skip the prepare phase.
2048 static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state
*ps
, int port
,
2049 u16 vid
, bool untagged
)
2051 struct mv88e6xxx_vtu_stu_entry vlan
;
2054 err
= _mv88e6xxx_vtu_get(ps
, vid
, &vlan
, true);
2058 vlan
.data
[port
] = untagged
?
2059 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED
:
2060 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED
;
2062 return _mv88e6xxx_vtu_loadpurge(ps
, &vlan
);
2065 void mv88e6xxx_port_vlan_add(struct dsa_switch
*ds
, int port
,
2066 const struct switchdev_obj_port_vlan
*vlan
,
2067 struct switchdev_trans
*trans
)
2069 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2070 bool untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
2071 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
2074 mutex_lock(&ps
->smi_mutex
);
2076 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
)
2077 if (_mv88e6xxx_port_vlan_add(ps
, port
, vid
, untagged
))
2078 netdev_err(ds
->ports
[port
], "failed to add VLAN %d%c\n",
2079 vid
, untagged
? 'u' : 't');
2081 if (pvid
&& _mv88e6xxx_port_pvid_set(ps
, port
, vlan
->vid_end
))
2082 netdev_err(ds
->ports
[port
], "failed to set PVID %d\n",
2085 mutex_unlock(&ps
->smi_mutex
);
2088 static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state
*ps
,
2091 struct dsa_switch
*ds
= ps
->ds
;
2092 struct mv88e6xxx_vtu_stu_entry vlan
;
2095 err
= _mv88e6xxx_vtu_get(ps
, vid
, &vlan
, false);
2099 /* Tell switchdev if this VLAN is handled in software */
2100 if (vlan
.data
[port
] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
)
2103 vlan
.data
[port
] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
;
2105 /* keep the VLAN unless all ports are excluded */
2107 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
2108 if (dsa_is_cpu_port(ds
, i
) || dsa_is_dsa_port(ds
, i
))
2111 if (vlan
.data
[i
] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER
) {
2117 err
= _mv88e6xxx_vtu_loadpurge(ps
, &vlan
);
2121 return _mv88e6xxx_atu_remove(ps
, vlan
.fid
, port
, false);
2124 int mv88e6xxx_port_vlan_del(struct dsa_switch
*ds
, int port
,
2125 const struct switchdev_obj_port_vlan
*vlan
)
2127 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2131 mutex_lock(&ps
->smi_mutex
);
2133 err
= _mv88e6xxx_port_pvid_get(ps
, port
, &pvid
);
2137 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
2138 err
= _mv88e6xxx_port_vlan_del(ps
, port
, vid
);
2143 err
= _mv88e6xxx_port_pvid_set(ps
, port
, 0);
2150 mutex_unlock(&ps
->smi_mutex
);
2155 static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state
*ps
,
2156 const unsigned char *addr
)
2160 for (i
= 0; i
< 3; i
++) {
2161 ret
= _mv88e6xxx_reg_write(
2162 ps
, REG_GLOBAL
, GLOBAL_ATU_MAC_01
+ i
,
2163 (addr
[i
* 2] << 8) | addr
[i
* 2 + 1]);
2171 static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state
*ps
,
2172 unsigned char *addr
)
2176 for (i
= 0; i
< 3; i
++) {
2177 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
,
2178 GLOBAL_ATU_MAC_01
+ i
);
2181 addr
[i
* 2] = ret
>> 8;
2182 addr
[i
* 2 + 1] = ret
& 0xff;
2188 static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state
*ps
,
2189 struct mv88e6xxx_atu_entry
*entry
)
2193 ret
= _mv88e6xxx_atu_wait(ps
);
2197 ret
= _mv88e6xxx_atu_mac_write(ps
, entry
->mac
);
2201 ret
= _mv88e6xxx_atu_data_write(ps
, entry
);
2205 return _mv88e6xxx_atu_cmd(ps
, entry
->fid
, GLOBAL_ATU_OP_LOAD_DB
);
2208 static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state
*ps
, int port
,
2209 const unsigned char *addr
, u16 vid
,
2212 struct mv88e6xxx_atu_entry entry
= { 0 };
2213 struct mv88e6xxx_vtu_stu_entry vlan
;
2216 /* Null VLAN ID corresponds to the port private database */
2218 err
= _mv88e6xxx_port_fid_get(ps
, port
, &vlan
.fid
);
2220 err
= _mv88e6xxx_vtu_get(ps
, vid
, &vlan
, false);
2224 entry
.fid
= vlan
.fid
;
2225 entry
.state
= state
;
2226 ether_addr_copy(entry
.mac
, addr
);
2227 if (state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
2228 entry
.trunk
= false;
2229 entry
.portv_trunkid
= BIT(port
);
2232 return _mv88e6xxx_atu_load(ps
, &entry
);
2235 int mv88e6xxx_port_fdb_prepare(struct dsa_switch
*ds
, int port
,
2236 const struct switchdev_obj_port_fdb
*fdb
,
2237 struct switchdev_trans
*trans
)
2239 /* We don't need any dynamic resource from the kernel (yet),
2240 * so skip the prepare phase.
2245 void mv88e6xxx_port_fdb_add(struct dsa_switch
*ds
, int port
,
2246 const struct switchdev_obj_port_fdb
*fdb
,
2247 struct switchdev_trans
*trans
)
2249 int state
= is_multicast_ether_addr(fdb
->addr
) ?
2250 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
2251 GLOBAL_ATU_DATA_STATE_UC_STATIC
;
2252 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2254 mutex_lock(&ps
->smi_mutex
);
2255 if (_mv88e6xxx_port_fdb_load(ps
, port
, fdb
->addr
, fdb
->vid
, state
))
2256 netdev_err(ds
->ports
[port
], "failed to load MAC address\n");
2257 mutex_unlock(&ps
->smi_mutex
);
2260 int mv88e6xxx_port_fdb_del(struct dsa_switch
*ds
, int port
,
2261 const struct switchdev_obj_port_fdb
*fdb
)
2263 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2266 mutex_lock(&ps
->smi_mutex
);
2267 ret
= _mv88e6xxx_port_fdb_load(ps
, port
, fdb
->addr
, fdb
->vid
,
2268 GLOBAL_ATU_DATA_STATE_UNUSED
);
2269 mutex_unlock(&ps
->smi_mutex
);
2274 static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state
*ps
, u16 fid
,
2275 struct mv88e6xxx_atu_entry
*entry
)
2277 struct mv88e6xxx_atu_entry next
= { 0 };
2282 ret
= _mv88e6xxx_atu_wait(ps
);
2286 ret
= _mv88e6xxx_atu_cmd(ps
, fid
, GLOBAL_ATU_OP_GET_NEXT_DB
);
2290 ret
= _mv88e6xxx_atu_mac_read(ps
, next
.mac
);
2294 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, GLOBAL_ATU_DATA
);
2298 next
.state
= ret
& GLOBAL_ATU_DATA_STATE_MASK
;
2299 if (next
.state
!= GLOBAL_ATU_DATA_STATE_UNUSED
) {
2300 unsigned int mask
, shift
;
2302 if (ret
& GLOBAL_ATU_DATA_TRUNK
) {
2304 mask
= GLOBAL_ATU_DATA_TRUNK_ID_MASK
;
2305 shift
= GLOBAL_ATU_DATA_TRUNK_ID_SHIFT
;
2308 mask
= GLOBAL_ATU_DATA_PORT_VECTOR_MASK
;
2309 shift
= GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT
;
2312 next
.portv_trunkid
= (ret
& mask
) >> shift
;
2319 static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state
*ps
,
2320 u16 fid
, u16 vid
, int port
,
2321 struct switchdev_obj_port_fdb
*fdb
,
2322 int (*cb
)(struct switchdev_obj
*obj
))
2324 struct mv88e6xxx_atu_entry addr
= {
2325 .mac
= { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
2329 err
= _mv88e6xxx_atu_mac_write(ps
, addr
.mac
);
2334 err
= _mv88e6xxx_atu_getnext(ps
, fid
, &addr
);
2338 if (addr
.state
== GLOBAL_ATU_DATA_STATE_UNUSED
)
2341 if (!addr
.trunk
&& addr
.portv_trunkid
& BIT(port
)) {
2342 bool is_static
= addr
.state
==
2343 (is_multicast_ether_addr(addr
.mac
) ?
2344 GLOBAL_ATU_DATA_STATE_MC_STATIC
:
2345 GLOBAL_ATU_DATA_STATE_UC_STATIC
);
2348 ether_addr_copy(fdb
->addr
, addr
.mac
);
2349 fdb
->ndm_state
= is_static
? NUD_NOARP
: NUD_REACHABLE
;
2351 err
= cb(&fdb
->obj
);
2355 } while (!is_broadcast_ether_addr(addr
.mac
));
2360 int mv88e6xxx_port_fdb_dump(struct dsa_switch
*ds
, int port
,
2361 struct switchdev_obj_port_fdb
*fdb
,
2362 int (*cb
)(struct switchdev_obj
*obj
))
2364 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2365 struct mv88e6xxx_vtu_stu_entry vlan
= {
2366 .vid
= GLOBAL_VTU_VID_MASK
, /* all ones */
2371 mutex_lock(&ps
->smi_mutex
);
2373 /* Dump port's default Filtering Information Database (VLAN ID 0) */
2374 err
= _mv88e6xxx_port_fid_get(ps
, port
, &fid
);
2378 err
= _mv88e6xxx_port_fdb_dump_one(ps
, fid
, 0, port
, fdb
, cb
);
2382 /* Dump VLANs' Filtering Information Databases */
2383 err
= _mv88e6xxx_vtu_vid_write(ps
, vlan
.vid
);
2388 err
= _mv88e6xxx_vtu_getnext(ps
, &vlan
);
2395 err
= _mv88e6xxx_port_fdb_dump_one(ps
, vlan
.fid
, vlan
.vid
, port
,
2399 } while (vlan
.vid
< GLOBAL_VTU_VID_MASK
);
2402 mutex_unlock(&ps
->smi_mutex
);
2407 int mv88e6xxx_port_bridge_join(struct dsa_switch
*ds
, int port
,
2408 struct net_device
*bridge
)
2410 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2413 mutex_lock(&ps
->smi_mutex
);
2415 /* Assign the bridge and remap each port's VLANTable */
2416 ps
->ports
[port
].bridge_dev
= bridge
;
2418 for (i
= 0; i
< ps
->info
->num_ports
; ++i
) {
2419 if (ps
->ports
[i
].bridge_dev
== bridge
) {
2420 err
= _mv88e6xxx_port_based_vlan_map(ps
, i
);
2426 mutex_unlock(&ps
->smi_mutex
);
2431 void mv88e6xxx_port_bridge_leave(struct dsa_switch
*ds
, int port
)
2433 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2434 struct net_device
*bridge
= ps
->ports
[port
].bridge_dev
;
2437 mutex_lock(&ps
->smi_mutex
);
2439 /* Unassign the bridge and remap each port's VLANTable */
2440 ps
->ports
[port
].bridge_dev
= NULL
;
2442 for (i
= 0; i
< ps
->info
->num_ports
; ++i
)
2443 if (i
== port
|| ps
->ports
[i
].bridge_dev
== bridge
)
2444 if (_mv88e6xxx_port_based_vlan_map(ps
, i
))
2445 netdev_warn(ds
->ports
[i
], "failed to remap\n");
2447 mutex_unlock(&ps
->smi_mutex
);
2450 static void mv88e6xxx_bridge_work(struct work_struct
*work
)
2452 struct mv88e6xxx_priv_state
*ps
;
2453 struct dsa_switch
*ds
;
2456 ps
= container_of(work
, struct mv88e6xxx_priv_state
, bridge_work
);
2459 mutex_lock(&ps
->smi_mutex
);
2461 for (port
= 0; port
< ps
->info
->num_ports
; ++port
)
2462 if (test_and_clear_bit(port
, ps
->port_state_update_mask
) &&
2463 _mv88e6xxx_port_state(ps
, port
, ps
->ports
[port
].state
))
2464 netdev_warn(ds
->ports
[port
],
2465 "failed to update state to %s\n",
2466 mv88e6xxx_port_state_names
[ps
->ports
[port
].state
]);
2468 mutex_unlock(&ps
->smi_mutex
);
2471 static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state
*ps
,
2472 int port
, int page
, int reg
, int val
)
2476 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, page
);
2478 goto restore_page_0
;
2480 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, reg
, val
);
2482 _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, 0x0);
2487 static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state
*ps
,
2488 int port
, int page
, int reg
)
2492 ret
= _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, page
);
2494 goto restore_page_0
;
2496 ret
= _mv88e6xxx_phy_read_indirect(ps
, port
, reg
);
2498 _mv88e6xxx_phy_write_indirect(ps
, port
, 0x16, 0x0);
2503 static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state
*ps
)
2507 ret
= _mv88e6xxx_phy_page_read(ps
, REG_FIBER_SERDES
, PAGE_FIBER_SERDES
,
2512 if (ret
& BMCR_PDOWN
) {
2514 ret
= _mv88e6xxx_phy_page_write(ps
, REG_FIBER_SERDES
,
2515 PAGE_FIBER_SERDES
, MII_BMCR
,
2522 static int mv88e6xxx_setup_port(struct dsa_switch
*ds
, int port
)
2524 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2528 mutex_lock(&ps
->smi_mutex
);
2530 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2531 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2532 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6095_family(ps
) ||
2533 mv88e6xxx_6065_family(ps
) || mv88e6xxx_6320_family(ps
)) {
2534 /* MAC Forcing register: don't force link, speed,
2535 * duplex or flow control state to any particular
2536 * values on physical ports, but force the CPU port
2537 * and all DSA ports to their maximum bandwidth and
2540 reg
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_PCS_CTRL
);
2541 if (dsa_is_cpu_port(ds
, port
) || dsa_is_dsa_port(ds
, port
)) {
2542 reg
&= ~PORT_PCS_CTRL_UNFORCED
;
2543 reg
|= PORT_PCS_CTRL_FORCE_LINK
|
2544 PORT_PCS_CTRL_LINK_UP
|
2545 PORT_PCS_CTRL_DUPLEX_FULL
|
2546 PORT_PCS_CTRL_FORCE_DUPLEX
;
2547 if (mv88e6xxx_6065_family(ps
))
2548 reg
|= PORT_PCS_CTRL_100
;
2550 reg
|= PORT_PCS_CTRL_1000
;
2552 reg
|= PORT_PCS_CTRL_UNFORCED
;
2555 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2556 PORT_PCS_CTRL
, reg
);
2561 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2562 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2563 * tunneling, determine priority by looking at 802.1p and IP
2564 * priority fields (IP prio has precedence), and set STP state
2567 * If this is the CPU link, use DSA or EDSA tagging depending
2568 * on which tagging mode was configured.
2570 * If this is a link to another switch, use DSA tagging mode.
2572 * If this is the upstream port for this switch, enable
2573 * forwarding of unknown unicasts and multicasts.
2576 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2577 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2578 mv88e6xxx_6095_family(ps
) || mv88e6xxx_6065_family(ps
) ||
2579 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6320_family(ps
))
2580 reg
= PORT_CONTROL_IGMP_MLD_SNOOP
|
2581 PORT_CONTROL_USE_TAG
| PORT_CONTROL_USE_IP
|
2582 PORT_CONTROL_STATE_FORWARDING
;
2583 if (dsa_is_cpu_port(ds
, port
)) {
2584 if (mv88e6xxx_6095_family(ps
) || mv88e6xxx_6185_family(ps
))
2585 reg
|= PORT_CONTROL_DSA_TAG
;
2586 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2587 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2588 mv88e6xxx_6320_family(ps
)) {
2589 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
2590 reg
|= PORT_CONTROL_FRAME_ETHER_TYPE_DSA
;
2592 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
2593 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
2594 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
2597 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2598 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2599 mv88e6xxx_6095_family(ps
) || mv88e6xxx_6065_family(ps
) ||
2600 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6320_family(ps
)) {
2601 if (ds
->dst
->tag_protocol
== DSA_TAG_PROTO_EDSA
)
2602 reg
|= PORT_CONTROL_EGRESS_ADD_TAG
;
2605 if (dsa_is_dsa_port(ds
, port
)) {
2606 if (mv88e6xxx_6095_family(ps
) || mv88e6xxx_6185_family(ps
))
2607 reg
|= PORT_CONTROL_DSA_TAG
;
2608 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2609 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2610 mv88e6xxx_6320_family(ps
)) {
2611 reg
|= PORT_CONTROL_FRAME_MODE_DSA
;
2614 if (port
== dsa_upstream_port(ds
))
2615 reg
|= PORT_CONTROL_FORWARD_UNKNOWN
|
2616 PORT_CONTROL_FORWARD_UNKNOWN_MC
;
2619 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2625 /* If this port is connected to a SerDes, make sure the SerDes is not
2628 if (mv88e6xxx_6352_family(ps
)) {
2629 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(port
), PORT_STATUS
);
2632 ret
&= PORT_STATUS_CMODE_MASK
;
2633 if ((ret
== PORT_STATUS_CMODE_100BASE_X
) ||
2634 (ret
== PORT_STATUS_CMODE_1000BASE_X
) ||
2635 (ret
== PORT_STATUS_CMODE_SGMII
)) {
2636 ret
= mv88e6xxx_power_on_serdes(ps
);
2642 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2643 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2644 * untagged frames on this port, do a destination address lookup on all
2645 * received packets as usual, disable ARP mirroring and don't send a
2646 * copy of all transmitted/received frames on this port to the CPU.
2649 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2650 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2651 mv88e6xxx_6095_family(ps
) || mv88e6xxx_6320_family(ps
) ||
2652 mv88e6xxx_6185_family(ps
))
2653 reg
= PORT_CONTROL_2_MAP_DA
;
2655 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2656 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6320_family(ps
))
2657 reg
|= PORT_CONTROL_2_JUMBO_10240
;
2659 if (mv88e6xxx_6095_family(ps
) || mv88e6xxx_6185_family(ps
)) {
2660 /* Set the upstream port this port should use */
2661 reg
|= dsa_upstream_port(ds
);
2662 /* enable forwarding of unknown multicast addresses to
2665 if (port
== dsa_upstream_port(ds
))
2666 reg
|= PORT_CONTROL_2_FORWARD_UNKNOWN
;
2669 reg
|= PORT_CONTROL_2_8021Q_DISABLED
;
2672 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2673 PORT_CONTROL_2
, reg
);
2678 /* Port Association Vector: when learning source addresses
2679 * of packets, add the address to the address database using
2680 * a port bitmap that has only the bit for this port set and
2681 * the other bits clear.
2684 /* Disable learning for CPU port */
2685 if (dsa_is_cpu_port(ds
, port
))
2688 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_ASSOC_VECTOR
, reg
);
2692 /* Egress rate control 2: disable egress rate control. */
2693 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_RATE_CONTROL_2
,
2698 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2699 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2700 mv88e6xxx_6320_family(ps
)) {
2701 /* Do not limit the period of time that this port can
2702 * be paused for by the remote end or the period of
2703 * time that this port can pause the remote end.
2705 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2706 PORT_PAUSE_CTRL
, 0x0000);
2710 /* Port ATU control: disable limiting the number of
2711 * address database entries that this port is allowed
2714 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2715 PORT_ATU_CONTROL
, 0x0000);
2716 /* Priority Override: disable DA, SA and VTU priority
2719 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2720 PORT_PRI_OVERRIDE
, 0x0000);
2724 /* Port Ethertype: use the Ethertype DSA Ethertype
2727 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2728 PORT_ETH_TYPE
, ETH_P_EDSA
);
2731 /* Tag Remap: use an identity 802.1p prio -> switch
2734 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2735 PORT_TAG_REGMAP_0123
, 0x3210);
2739 /* Tag Remap 2: use an identity 802.1p prio -> switch
2742 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2743 PORT_TAG_REGMAP_4567
, 0x7654);
2748 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2749 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2750 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6095_family(ps
) ||
2751 mv88e6xxx_6320_family(ps
)) {
2752 /* Rate Control: disable ingress rate limiting. */
2753 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
),
2754 PORT_RATE_CONTROL
, 0x0001);
2759 /* Port Control 1: disable trunking, disable sending
2760 * learning messages to this port.
2762 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_CONTROL_1
, 0x0000);
2766 /* Port based VLAN map: give each port the same default address
2767 * database, and allow bidirectional communication between the
2768 * CPU and DSA port(s), and the other ports.
2770 ret
= _mv88e6xxx_port_fid_set(ps
, port
, 0);
2774 ret
= _mv88e6xxx_port_based_vlan_map(ps
, port
);
2778 /* Default VLAN ID and priority: don't set a default VLAN
2779 * ID, and set the default packet priority to zero.
2781 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(port
), PORT_DEFAULT_VLAN
,
2784 mutex_unlock(&ps
->smi_mutex
);
2788 int mv88e6xxx_setup_ports(struct dsa_switch
*ds
)
2790 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2794 for (i
= 0; i
< ps
->info
->num_ports
; i
++) {
2795 ret
= mv88e6xxx_setup_port(ds
, i
);
2802 int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state
*ps
)
2804 mutex_init(&ps
->smi_mutex
);
2806 INIT_WORK(&ps
->bridge_work
, mv88e6xxx_bridge_work
);
2808 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_EEPROM
))
2809 mutex_init(&ps
->eeprom_mutex
);
2811 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU
))
2812 mv88e6xxx_ppu_state_init(ps
);
2817 int mv88e6xxx_setup_global(struct dsa_switch
*ds
)
2819 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
2823 mutex_lock(&ps
->smi_mutex
);
2824 /* Set the default address aging time to 5 minutes, and
2825 * enable address learn messages to be sent to all message
2828 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_ATU_CONTROL
,
2829 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL
);
2833 /* Configure the IP ToS mapping registers. */
2834 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_0
, 0x0000);
2837 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_1
, 0x0000);
2840 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_2
, 0x5555);
2843 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_3
, 0x5555);
2846 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_4
, 0xaaaa);
2849 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_5
, 0xaaaa);
2852 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_6
, 0xffff);
2855 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IP_PRI_7
, 0xffff);
2859 /* Configure the IEEE 802.1p priority mapping register. */
2860 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_IEEE_PRI
, 0xfa41);
2864 /* Send all frames with destination addresses matching
2865 * 01:80:c2:00:00:0x to the CPU port.
2867 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_MGMT_EN_0X
, 0xffff);
2871 /* Ignore removed tag data on doubly tagged packets, disable
2872 * flow control messages, force flow control priority to the
2873 * highest, and send all special multicast frames to the CPU
2874 * port at the highest priority.
2876 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_SWITCH_MGMT
,
2877 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU
| 0x70 |
2878 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI
);
2882 /* Program the DSA routing table. */
2883 for (i
= 0; i
< 32; i
++) {
2886 if (ds
->pd
->rtable
&&
2887 i
!= ds
->index
&& i
< ds
->dst
->pd
->nr_chips
)
2888 nexthop
= ds
->pd
->rtable
[i
] & 0x1f;
2890 err
= _mv88e6xxx_reg_write(
2892 GLOBAL2_DEVICE_MAPPING
,
2893 GLOBAL2_DEVICE_MAPPING_UPDATE
|
2894 (i
<< GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT
) | nexthop
);
2899 /* Clear all trunk masks. */
2900 for (i
= 0; i
< 8; i
++) {
2901 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
, GLOBAL2_TRUNK_MASK
,
2903 (i
<< GLOBAL2_TRUNK_MASK_NUM_SHIFT
) |
2904 ((1 << ps
->info
->num_ports
) - 1));
2909 /* Clear all trunk mappings. */
2910 for (i
= 0; i
< 16; i
++) {
2911 err
= _mv88e6xxx_reg_write(
2913 GLOBAL2_TRUNK_MAPPING
,
2914 GLOBAL2_TRUNK_MAPPING_UPDATE
|
2915 (i
<< GLOBAL2_TRUNK_MAPPING_ID_SHIFT
));
2920 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2921 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2922 mv88e6xxx_6320_family(ps
)) {
2923 /* Send all frames with destination addresses matching
2924 * 01:80:c2:00:00:2x to the CPU port.
2926 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
2927 GLOBAL2_MGMT_EN_2X
, 0xffff);
2931 /* Initialise cross-chip port VLAN table to reset
2934 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
2935 GLOBAL2_PVT_ADDR
, 0x9000);
2939 /* Clear the priority override table. */
2940 for (i
= 0; i
< 16; i
++) {
2941 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
2942 GLOBAL2_PRIO_OVERRIDE
,
2949 if (mv88e6xxx_6352_family(ps
) || mv88e6xxx_6351_family(ps
) ||
2950 mv88e6xxx_6165_family(ps
) || mv88e6xxx_6097_family(ps
) ||
2951 mv88e6xxx_6185_family(ps
) || mv88e6xxx_6095_family(ps
) ||
2952 mv88e6xxx_6320_family(ps
)) {
2953 /* Disable ingress rate limiting by resetting all
2954 * ingress rate limit registers to their initial
2957 for (i
= 0; i
< ps
->info
->num_ports
; i
++) {
2958 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL2
,
2966 /* Clear the statistics counters for all ports */
2967 err
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, GLOBAL_STATS_OP
,
2968 GLOBAL_STATS_OP_FLUSH_ALL
);
2972 /* Wait for the flush to complete. */
2973 err
= _mv88e6xxx_stats_wait(ps
);
2977 /* Clear all ATU entries */
2978 err
= _mv88e6xxx_atu_flush(ps
, 0, true);
2982 /* Clear all the VTU and STU entries */
2983 err
= _mv88e6xxx_vtu_stu_flush(ps
);
2985 mutex_unlock(&ps
->smi_mutex
);
2990 int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state
*ps
, bool ppu_active
)
2992 u16 is_reset
= (ppu_active
? 0x8800 : 0xc800);
2993 struct gpio_desc
*gpiod
= ps
->ds
->pd
->reset
;
2994 unsigned long timeout
;
2998 mutex_lock(&ps
->smi_mutex
);
3000 /* Set all ports to the disabled state. */
3001 for (i
= 0; i
< ps
->info
->num_ports
; i
++) {
3002 ret
= _mv88e6xxx_reg_read(ps
, REG_PORT(i
), PORT_CONTROL
);
3006 ret
= _mv88e6xxx_reg_write(ps
, REG_PORT(i
), PORT_CONTROL
,
3012 /* Wait for transmit queues to drain. */
3013 usleep_range(2000, 4000);
3015 /* If there is a gpio connected to the reset pin, toggle it */
3017 gpiod_set_value_cansleep(gpiod
, 1);
3018 usleep_range(10000, 20000);
3019 gpiod_set_value_cansleep(gpiod
, 0);
3020 usleep_range(10000, 20000);
3023 /* Reset the switch. Keep the PPU active if requested. The PPU
3024 * needs to be active to support indirect phy register access
3025 * through global registers 0x18 and 0x19.
3028 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, 0x04, 0xc000);
3030 ret
= _mv88e6xxx_reg_write(ps
, REG_GLOBAL
, 0x04, 0xc400);
3034 /* Wait up to one second for reset to complete. */
3035 timeout
= jiffies
+ 1 * HZ
;
3036 while (time_before(jiffies
, timeout
)) {
3037 ret
= _mv88e6xxx_reg_read(ps
, REG_GLOBAL
, 0x00);
3041 if ((ret
& is_reset
) == is_reset
)
3043 usleep_range(1000, 2000);
3045 if (time_after(jiffies
, timeout
))
3050 mutex_unlock(&ps
->smi_mutex
);
3055 int mv88e6xxx_phy_page_read(struct dsa_switch
*ds
, int port
, int page
, int reg
)
3057 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3060 mutex_lock(&ps
->smi_mutex
);
3061 ret
= _mv88e6xxx_phy_page_read(ps
, port
, page
, reg
);
3062 mutex_unlock(&ps
->smi_mutex
);
3067 int mv88e6xxx_phy_page_write(struct dsa_switch
*ds
, int port
, int page
,
3070 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3073 mutex_lock(&ps
->smi_mutex
);
3074 ret
= _mv88e6xxx_phy_page_write(ps
, port
, page
, reg
, val
);
3075 mutex_unlock(&ps
->smi_mutex
);
3080 static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state
*ps
,
3083 if (port
>= 0 && port
< ps
->info
->num_ports
)
3089 mv88e6xxx_phy_read(struct dsa_switch
*ds
, int port
, int regnum
)
3091 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3092 int addr
= mv88e6xxx_port_to_phy_addr(ps
, port
);
3098 mutex_lock(&ps
->smi_mutex
);
3100 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU
))
3101 ret
= mv88e6xxx_phy_read_ppu(ps
, addr
, regnum
);
3102 else if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_SMI_PHY
))
3103 ret
= _mv88e6xxx_phy_read_indirect(ps
, addr
, regnum
);
3105 ret
= _mv88e6xxx_phy_read(ps
, addr
, regnum
);
3107 mutex_unlock(&ps
->smi_mutex
);
3112 mv88e6xxx_phy_write(struct dsa_switch
*ds
, int port
, int regnum
, u16 val
)
3114 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3115 int addr
= mv88e6xxx_port_to_phy_addr(ps
, port
);
3121 mutex_lock(&ps
->smi_mutex
);
3123 if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_PPU
))
3124 ret
= mv88e6xxx_phy_write_ppu(ps
, addr
, regnum
, val
);
3125 else if (mv88e6xxx_has(ps
, MV88E6XXX_FLAG_SMI_PHY
))
3126 ret
= _mv88e6xxx_phy_write_indirect(ps
, addr
, regnum
, val
);
3128 ret
= _mv88e6xxx_phy_write(ps
, addr
, regnum
, val
);
3130 mutex_unlock(&ps
->smi_mutex
);
3134 #ifdef CONFIG_NET_DSA_HWMON
3136 static int mv88e61xx_get_temp(struct dsa_switch
*ds
, int *temp
)
3138 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3144 mutex_lock(&ps
->smi_mutex
);
3146 ret
= _mv88e6xxx_phy_write(ps
, 0x0, 0x16, 0x6);
3150 /* Enable temperature sensor */
3151 ret
= _mv88e6xxx_phy_read(ps
, 0x0, 0x1a);
3155 ret
= _mv88e6xxx_phy_write(ps
, 0x0, 0x1a, ret
| (1 << 5));
3159 /* Wait for temperature to stabilize */
3160 usleep_range(10000, 12000);
3162 val
= _mv88e6xxx_phy_read(ps
, 0x0, 0x1a);
3168 /* Disable temperature sensor */
3169 ret
= _mv88e6xxx_phy_write(ps
, 0x0, 0x1a, ret
& ~(1 << 5));
3173 *temp
= ((val
& 0x1f) - 5) * 5;
3176 _mv88e6xxx_phy_write(ps
, 0x0, 0x16, 0x0);
3177 mutex_unlock(&ps
->smi_mutex
);
3181 static int mv88e63xx_get_temp(struct dsa_switch
*ds
, int *temp
)
3183 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3184 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3189 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 27);
3193 *temp
= (ret
& 0xff) - 25;
3198 int mv88e6xxx_get_temp(struct dsa_switch
*ds
, int *temp
)
3200 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3202 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP
))
3205 if (mv88e6xxx_6320_family(ps
) || mv88e6xxx_6352_family(ps
))
3206 return mv88e63xx_get_temp(ds
, temp
);
3208 return mv88e61xx_get_temp(ds
, temp
);
3211 int mv88e6xxx_get_temp_limit(struct dsa_switch
*ds
, int *temp
)
3213 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3214 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3217 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP_LIMIT
))
3222 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
3226 *temp
= (((ret
>> 8) & 0x1f) * 5) - 25;
3231 int mv88e6xxx_set_temp_limit(struct dsa_switch
*ds
, int temp
)
3233 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3234 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3237 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP_LIMIT
))
3240 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
3243 temp
= clamp_val(DIV_ROUND_CLOSEST(temp
, 5) + 5, 0, 0x1f);
3244 return mv88e6xxx_phy_page_write(ds
, phy
, 6, 26,
3245 (ret
& 0xe0ff) | (temp
<< 8));
3248 int mv88e6xxx_get_temp_alarm(struct dsa_switch
*ds
, bool *alarm
)
3250 struct mv88e6xxx_priv_state
*ps
= ds_to_priv(ds
);
3251 int phy
= mv88e6xxx_6320_family(ps
) ? 3 : 0;
3254 if (!mv88e6xxx_has(ps
, MV88E6XXX_FLAG_TEMP_LIMIT
))
3259 ret
= mv88e6xxx_phy_page_read(ds
, phy
, 6, 26);
3263 *alarm
= !!(ret
& 0x40);
3267 #endif /* CONFIG_NET_DSA_HWMON */
3269 static const struct mv88e6xxx_info
*
3270 mv88e6xxx_lookup_info(unsigned int prod_num
, const struct mv88e6xxx_info
*table
,
3275 for (i
= 0; i
< num
; ++i
)
3276 if (table
[i
].prod_num
== prod_num
)
3282 const char *mv88e6xxx_drv_probe(struct device
*dsa_dev
, struct device
*host_dev
,
3283 int sw_addr
, void **priv
,
3284 const struct mv88e6xxx_info
*table
,
3287 const struct mv88e6xxx_info
*info
;
3288 struct mv88e6xxx_priv_state
*ps
;
3289 struct mii_bus
*bus
;
3291 int id
, prod_num
, rev
;
3293 bus
= dsa_host_dev_to_mii_bus(host_dev
);
3297 id
= __mv88e6xxx_reg_read(bus
, sw_addr
, REG_PORT(0), PORT_SWITCH_ID
);
3301 prod_num
= (id
& 0xfff0) >> 4;
3304 info
= mv88e6xxx_lookup_info(prod_num
, table
, num
);
3310 ps
= devm_kzalloc(dsa_dev
, sizeof(*ps
), GFP_KERNEL
);
3315 ps
->sw_addr
= sw_addr
;
3320 dev_info(&ps
->bus
->dev
, "switch 0x%x probed: %s, revision %u\n",
3321 prod_num
, name
, rev
);
3326 static int __init
mv88e6xxx_init(void)
3328 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3329 register_switch_driver(&mv88e6131_switch_driver
);
3331 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3332 register_switch_driver(&mv88e6123_switch_driver
);
3334 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3335 register_switch_driver(&mv88e6352_switch_driver
);
3337 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3338 register_switch_driver(&mv88e6171_switch_driver
);
3342 module_init(mv88e6xxx_init
);
3344 static void __exit
mv88e6xxx_cleanup(void)
3346 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
3347 unregister_switch_driver(&mv88e6171_switch_driver
);
3349 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
3350 unregister_switch_driver(&mv88e6352_switch_driver
);
3352 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
3353 unregister_switch_driver(&mv88e6123_switch_driver
);
3355 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
3356 unregister_switch_driver(&mv88e6131_switch_driver
);
3359 module_exit(mv88e6xxx_cleanup
);
3361 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
3362 MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
3363 MODULE_LICENSE("GPL");