7d62802a66d591884f4381d06bfb9ca9aff302d7
[deliverable/linux.git] / drivers / net / dsa / bcm_sf2.c
1 /*
2 * Broadcom Starfighter 2 DSA switch driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/of_net.h>
25 #include <net/dsa.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_bridge.h>
28 #include <linux/brcmphy.h>
29 #include <linux/etherdevice.h>
30 #include <net/switchdev.h>
31
32 #include "bcm_sf2.h"
33 #include "bcm_sf2_regs.h"
34
35 /* String, offset, and register size in bytes if different from 4 bytes */
36 static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
37 { "TxOctets", 0x000, 8 },
38 { "TxDropPkts", 0x020 },
39 { "TxQPKTQ0", 0x030 },
40 { "TxBroadcastPkts", 0x040 },
41 { "TxMulticastPkts", 0x050 },
42 { "TxUnicastPKts", 0x060 },
43 { "TxCollisions", 0x070 },
44 { "TxSingleCollision", 0x080 },
45 { "TxMultipleCollision", 0x090 },
46 { "TxDeferredCollision", 0x0a0 },
47 { "TxLateCollision", 0x0b0 },
48 { "TxExcessiveCollision", 0x0c0 },
49 { "TxFrameInDisc", 0x0d0 },
50 { "TxPausePkts", 0x0e0 },
51 { "TxQPKTQ1", 0x0f0 },
52 { "TxQPKTQ2", 0x100 },
53 { "TxQPKTQ3", 0x110 },
54 { "TxQPKTQ4", 0x120 },
55 { "TxQPKTQ5", 0x130 },
56 { "RxOctets", 0x140, 8 },
57 { "RxUndersizePkts", 0x160 },
58 { "RxPausePkts", 0x170 },
59 { "RxPkts64Octets", 0x180 },
60 { "RxPkts65to127Octets", 0x190 },
61 { "RxPkts128to255Octets", 0x1a0 },
62 { "RxPkts256to511Octets", 0x1b0 },
63 { "RxPkts512to1023Octets", 0x1c0 },
64 { "RxPkts1024toMaxPktsOctets", 0x1d0 },
65 { "RxOversizePkts", 0x1e0 },
66 { "RxJabbers", 0x1f0 },
67 { "RxAlignmentErrors", 0x200 },
68 { "RxFCSErrors", 0x210 },
69 { "RxGoodOctets", 0x220, 8 },
70 { "RxDropPkts", 0x240 },
71 { "RxUnicastPkts", 0x250 },
72 { "RxMulticastPkts", 0x260 },
73 { "RxBroadcastPkts", 0x270 },
74 { "RxSAChanges", 0x280 },
75 { "RxFragments", 0x290 },
76 { "RxJumboPkt", 0x2a0 },
77 { "RxSymblErr", 0x2b0 },
78 { "InRangeErrCount", 0x2c0 },
79 { "OutRangeErrCount", 0x2d0 },
80 { "EEELpiEvent", 0x2e0 },
81 { "EEELpiDuration", 0x2f0 },
82 { "RxDiscard", 0x300, 8 },
83 { "TxQPKTQ6", 0x320 },
84 { "TxQPKTQ7", 0x330 },
85 { "TxPkts64Octets", 0x340 },
86 { "TxPkts65to127Octets", 0x350 },
87 { "TxPkts128to255Octets", 0x360 },
88 { "TxPkts256to511Ocets", 0x370 },
89 { "TxPkts512to1023Ocets", 0x380 },
90 { "TxPkts1024toMaxPktOcets", 0x390 },
91 };
92
93 #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
94
95 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
96 int port, uint8_t *data)
97 {
98 unsigned int i;
99
100 for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
101 memcpy(data + i * ETH_GSTRING_LEN,
102 bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
103 }
104
105 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
106 int port, uint64_t *data)
107 {
108 struct bcm_sf2_priv *priv = ds_to_priv(ds);
109 const struct bcm_sf2_hw_stats *s;
110 unsigned int i;
111 u64 val = 0;
112 u32 offset;
113
114 mutex_lock(&priv->stats_mutex);
115
116 /* Now fetch the per-port counters */
117 for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
118 s = &bcm_sf2_mib[i];
119
120 /* Do a latched 64-bit read if needed */
121 offset = s->reg + CORE_P_MIB_OFFSET(port);
122 if (s->sizeof_stat == 8)
123 val = core_readq(priv, offset);
124 else
125 val = core_readl(priv, offset);
126
127 data[i] = (u64)val;
128 }
129
130 mutex_unlock(&priv->stats_mutex);
131 }
132
133 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
134 {
135 return BCM_SF2_STATS_SIZE;
136 }
137
138 static char *bcm_sf2_sw_probe(struct device *dsa_dev, struct device *host_dev,
139 int sw_addr, void **_priv)
140 {
141 struct bcm_sf2_priv *priv;
142
143 priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
144 if (!priv)
145 return NULL;
146 *_priv = priv;
147
148 return "Broadcom Starfighter 2";
149 }
150
151 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
152 {
153 struct bcm_sf2_priv *priv = ds_to_priv(ds);
154 unsigned int i;
155 u32 reg;
156
157 /* Enable the IMP Port to be in the same VLAN as the other ports
158 * on a per-port basis such that we only have Port i and IMP in
159 * the same VLAN.
160 */
161 for (i = 0; i < priv->hw_params.num_ports; i++) {
162 if (!((1 << i) & ds->phys_port_mask))
163 continue;
164
165 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
166 reg |= (1 << cpu_port);
167 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
168 }
169 }
170
171 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
172 {
173 struct bcm_sf2_priv *priv = ds_to_priv(ds);
174 u32 reg, val;
175
176 /* Enable the port memories */
177 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
178 reg &= ~P_TXQ_PSM_VDD(port);
179 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
180
181 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
182 reg = core_readl(priv, CORE_IMP_CTL);
183 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
184 reg &= ~(RX_DIS | TX_DIS);
185 core_writel(priv, reg, CORE_IMP_CTL);
186
187 /* Enable forwarding */
188 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
189
190 /* Enable IMP port in dumb mode */
191 reg = core_readl(priv, CORE_SWITCH_CTRL);
192 reg |= MII_DUMB_FWDG_EN;
193 core_writel(priv, reg, CORE_SWITCH_CTRL);
194
195 /* Resolve which bit controls the Broadcom tag */
196 switch (port) {
197 case 8:
198 val = BRCM_HDR_EN_P8;
199 break;
200 case 7:
201 val = BRCM_HDR_EN_P7;
202 break;
203 case 5:
204 val = BRCM_HDR_EN_P5;
205 break;
206 default:
207 val = 0;
208 break;
209 }
210
211 /* Enable Broadcom tags for IMP port */
212 reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
213 reg |= val;
214 core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
215
216 /* Enable reception Broadcom tag for CPU TX (switch RX) to
217 * allow us to tag outgoing frames
218 */
219 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
220 reg &= ~(1 << port);
221 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
222
223 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
224 * allow delivering frames to the per-port net_devices
225 */
226 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
227 reg &= ~(1 << port);
228 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
229
230 /* Force link status for IMP port */
231 reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
232 reg |= (MII_SW_OR | LINK_STS);
233 core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
234 }
235
236 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
237 {
238 struct bcm_sf2_priv *priv = ds_to_priv(ds);
239 u32 reg;
240
241 reg = core_readl(priv, CORE_EEE_EN_CTRL);
242 if (enable)
243 reg |= 1 << port;
244 else
245 reg &= ~(1 << port);
246 core_writel(priv, reg, CORE_EEE_EN_CTRL);
247 }
248
249 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
250 {
251 struct bcm_sf2_priv *priv = ds_to_priv(ds);
252 u32 reg;
253
254 reg = reg_readl(priv, REG_SPHY_CNTRL);
255 if (enable) {
256 reg |= PHY_RESET;
257 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
258 reg_writel(priv, reg, REG_SPHY_CNTRL);
259 udelay(21);
260 reg = reg_readl(priv, REG_SPHY_CNTRL);
261 reg &= ~PHY_RESET;
262 } else {
263 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
264 reg_writel(priv, reg, REG_SPHY_CNTRL);
265 mdelay(1);
266 reg |= CK25_DIS;
267 }
268 reg_writel(priv, reg, REG_SPHY_CNTRL);
269
270 /* Use PHY-driven LED signaling */
271 if (!enable) {
272 reg = reg_readl(priv, REG_LED_CNTRL(0));
273 reg |= SPDLNK_SRC_SEL;
274 reg_writel(priv, reg, REG_LED_CNTRL(0));
275 }
276 }
277
278 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
279 int port)
280 {
281 unsigned int off;
282
283 switch (port) {
284 case 7:
285 off = P7_IRQ_OFF;
286 break;
287 case 0:
288 /* Port 0 interrupts are located on the first bank */
289 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
290 return;
291 default:
292 off = P_IRQ_OFF(port);
293 break;
294 }
295
296 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
297 }
298
299 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
300 int port)
301 {
302 unsigned int off;
303
304 switch (port) {
305 case 7:
306 off = P7_IRQ_OFF;
307 break;
308 case 0:
309 /* Port 0 interrupts are located on the first bank */
310 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
311 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
312 return;
313 default:
314 off = P_IRQ_OFF(port);
315 break;
316 }
317
318 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
319 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
320 }
321
322 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
323 struct phy_device *phy)
324 {
325 struct bcm_sf2_priv *priv = ds_to_priv(ds);
326 s8 cpu_port = ds->dst[ds->index].cpu_port;
327 u32 reg;
328
329 /* Clear the memory power down */
330 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
331 reg &= ~P_TXQ_PSM_VDD(port);
332 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
333
334 /* Clear the Rx and Tx disable bits and set to no spanning tree */
335 core_writel(priv, 0, CORE_G_PCTL_PORT(port));
336
337 /* Re-enable the GPHY and re-apply workarounds */
338 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
339 bcm_sf2_gphy_enable_set(ds, true);
340 if (phy) {
341 /* if phy_stop() has been called before, phy
342 * will be in halted state, and phy_start()
343 * will call resume.
344 *
345 * the resume path does not configure back
346 * autoneg settings, and since we hard reset
347 * the phy manually here, we need to reset the
348 * state machine also.
349 */
350 phy->state = PHY_READY;
351 phy_init_hw(phy);
352 }
353 }
354
355 /* Enable MoCA port interrupts to get notified */
356 if (port == priv->moca_port)
357 bcm_sf2_port_intr_enable(priv, port);
358
359 /* Set this port, and only this one to be in the default VLAN,
360 * if member of a bridge, restore its membership prior to
361 * bringing down this port.
362 */
363 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
364 reg &= ~PORT_VLAN_CTRL_MASK;
365 reg |= (1 << port);
366 reg |= priv->port_sts[port].vlan_ctl_mask;
367 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
368
369 bcm_sf2_imp_vlan_setup(ds, cpu_port);
370
371 /* If EEE was enabled, restore it */
372 if (priv->port_sts[port].eee.eee_enabled)
373 bcm_sf2_eee_enable_set(ds, port, true);
374
375 return 0;
376 }
377
378 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
379 struct phy_device *phy)
380 {
381 struct bcm_sf2_priv *priv = ds_to_priv(ds);
382 u32 off, reg;
383
384 if (priv->wol_ports_mask & (1 << port))
385 return;
386
387 if (port == priv->moca_port)
388 bcm_sf2_port_intr_disable(priv, port);
389
390 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
391 bcm_sf2_gphy_enable_set(ds, false);
392
393 if (dsa_is_cpu_port(ds, port))
394 off = CORE_IMP_CTL;
395 else
396 off = CORE_G_PCTL_PORT(port);
397
398 reg = core_readl(priv, off);
399 reg |= RX_DIS | TX_DIS;
400 core_writel(priv, reg, off);
401
402 /* Power down the port memory */
403 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
404 reg |= P_TXQ_PSM_VDD(port);
405 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
406 }
407
408 /* Returns 0 if EEE was not enabled, or 1 otherwise
409 */
410 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
411 struct phy_device *phy)
412 {
413 struct bcm_sf2_priv *priv = ds_to_priv(ds);
414 struct ethtool_eee *p = &priv->port_sts[port].eee;
415 int ret;
416
417 p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
418
419 ret = phy_init_eee(phy, 0);
420 if (ret)
421 return 0;
422
423 bcm_sf2_eee_enable_set(ds, port, true);
424
425 return 1;
426 }
427
428 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
429 struct ethtool_eee *e)
430 {
431 struct bcm_sf2_priv *priv = ds_to_priv(ds);
432 struct ethtool_eee *p = &priv->port_sts[port].eee;
433 u32 reg;
434
435 reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
436 e->eee_enabled = p->eee_enabled;
437 e->eee_active = !!(reg & (1 << port));
438
439 return 0;
440 }
441
442 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
443 struct phy_device *phydev,
444 struct ethtool_eee *e)
445 {
446 struct bcm_sf2_priv *priv = ds_to_priv(ds);
447 struct ethtool_eee *p = &priv->port_sts[port].eee;
448
449 p->eee_enabled = e->eee_enabled;
450
451 if (!p->eee_enabled) {
452 bcm_sf2_eee_enable_set(ds, port, false);
453 } else {
454 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
455 if (!p->eee_enabled)
456 return -EOPNOTSUPP;
457 }
458
459 return 0;
460 }
461
462 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
463 * flush for that port.
464 */
465 static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
466 {
467 struct bcm_sf2_priv *priv = ds_to_priv(ds);
468 unsigned int timeout = 1000;
469 u32 reg;
470
471 core_writel(priv, port, CORE_FAST_AGE_PORT);
472
473 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
474 reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
475 core_writel(priv, reg, CORE_FAST_AGE_CTRL);
476
477 do {
478 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
479 if (!(reg & FAST_AGE_STR_DONE))
480 break;
481
482 cpu_relax();
483 } while (timeout--);
484
485 if (!timeout)
486 return -ETIMEDOUT;
487
488 core_writel(priv, 0, CORE_FAST_AGE_CTRL);
489
490 return 0;
491 }
492
493 static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
494 struct net_device *bridge)
495 {
496 struct bcm_sf2_priv *priv = ds_to_priv(ds);
497 unsigned int i;
498 u32 reg, p_ctl;
499
500 priv->port_sts[port].bridge_dev = bridge;
501 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
502
503 for (i = 0; i < priv->hw_params.num_ports; i++) {
504 if (priv->port_sts[i].bridge_dev != bridge)
505 continue;
506
507 /* Add this local port to the remote port VLAN control
508 * membership and update the remote port bitmask
509 */
510 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
511 reg |= 1 << port;
512 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
513 priv->port_sts[i].vlan_ctl_mask = reg;
514
515 p_ctl |= 1 << i;
516 }
517
518 /* Configure the local port VLAN control membership to include
519 * remote ports and update the local port bitmask
520 */
521 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
522 priv->port_sts[port].vlan_ctl_mask = p_ctl;
523
524 return 0;
525 }
526
527 static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
528 {
529 struct bcm_sf2_priv *priv = ds_to_priv(ds);
530 struct net_device *bridge = priv->port_sts[port].bridge_dev;
531 unsigned int i;
532 u32 reg, p_ctl;
533
534 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
535
536 for (i = 0; i < priv->hw_params.num_ports; i++) {
537 /* Don't touch the remaining ports */
538 if (priv->port_sts[i].bridge_dev != bridge)
539 continue;
540
541 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
542 reg &= ~(1 << port);
543 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
544 priv->port_sts[port].vlan_ctl_mask = reg;
545
546 /* Prevent self removal to preserve isolation */
547 if (port != i)
548 p_ctl &= ~(1 << i);
549 }
550
551 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
552 priv->port_sts[port].vlan_ctl_mask = p_ctl;
553 priv->port_sts[port].bridge_dev = NULL;
554 }
555
556 static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
557 u8 state)
558 {
559 struct bcm_sf2_priv *priv = ds_to_priv(ds);
560 u8 hw_state, cur_hw_state;
561 u32 reg;
562
563 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
564 cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
565
566 switch (state) {
567 case BR_STATE_DISABLED:
568 hw_state = G_MISTP_DIS_STATE;
569 break;
570 case BR_STATE_LISTENING:
571 hw_state = G_MISTP_LISTEN_STATE;
572 break;
573 case BR_STATE_LEARNING:
574 hw_state = G_MISTP_LEARN_STATE;
575 break;
576 case BR_STATE_FORWARDING:
577 hw_state = G_MISTP_FWD_STATE;
578 break;
579 case BR_STATE_BLOCKING:
580 hw_state = G_MISTP_BLOCK_STATE;
581 break;
582 default:
583 pr_err("%s: invalid STP state: %d\n", __func__, state);
584 return;
585 }
586
587 /* Fast-age ARL entries if we are moving a port from Learning or
588 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
589 * state (hw_state)
590 */
591 if (cur_hw_state != hw_state) {
592 if (cur_hw_state >= G_MISTP_LEARN_STATE &&
593 hw_state <= G_MISTP_LISTEN_STATE) {
594 if (bcm_sf2_sw_fast_age_port(ds, port)) {
595 pr_err("%s: fast-ageing failed\n", __func__);
596 return;
597 }
598 }
599 }
600
601 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
602 reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
603 reg |= hw_state;
604 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
605 }
606
607 /* Address Resolution Logic routines */
608 static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
609 {
610 unsigned int timeout = 10;
611 u32 reg;
612
613 do {
614 reg = core_readl(priv, CORE_ARLA_RWCTL);
615 if (!(reg & ARL_STRTDN))
616 return 0;
617
618 usleep_range(1000, 2000);
619 } while (timeout--);
620
621 return -ETIMEDOUT;
622 }
623
624 static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
625 {
626 u32 cmd;
627
628 if (op > ARL_RW)
629 return -EINVAL;
630
631 cmd = core_readl(priv, CORE_ARLA_RWCTL);
632 cmd &= ~IVL_SVL_SELECT;
633 cmd |= ARL_STRTDN;
634 if (op)
635 cmd |= ARL_RW;
636 else
637 cmd &= ~ARL_RW;
638 core_writel(priv, cmd, CORE_ARLA_RWCTL);
639
640 return bcm_sf2_arl_op_wait(priv);
641 }
642
643 static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
644 u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
645 bool is_valid)
646 {
647 unsigned int i;
648 int ret;
649
650 ret = bcm_sf2_arl_op_wait(priv);
651 if (ret)
652 return ret;
653
654 /* Read the 4 bins */
655 for (i = 0; i < 4; i++) {
656 u64 mac_vid;
657 u32 fwd_entry;
658
659 mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
660 fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
661 bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
662
663 if (ent->is_valid && is_valid) {
664 *idx = i;
665 return 0;
666 }
667
668 /* This is the MAC we just deleted */
669 if (!is_valid && (mac_vid & mac))
670 return 0;
671 }
672
673 return -ENOENT;
674 }
675
676 static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
677 const unsigned char *addr, u16 vid, bool is_valid)
678 {
679 struct bcm_sf2_arl_entry ent;
680 u32 fwd_entry;
681 u64 mac, mac_vid = 0;
682 u8 idx = 0;
683 int ret;
684
685 /* Convert the array into a 64-bit MAC */
686 mac = bcm_sf2_mac_to_u64(addr);
687
688 /* Perform a read for the given MAC and VID */
689 core_writeq(priv, mac, CORE_ARLA_MAC);
690 core_writel(priv, vid, CORE_ARLA_VID);
691
692 /* Issue a read operation for this MAC */
693 ret = bcm_sf2_arl_rw_op(priv, 1);
694 if (ret)
695 return ret;
696
697 ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
698 /* If this is a read, just finish now */
699 if (op)
700 return ret;
701
702 /* We could not find a matching MAC, so reset to a new entry */
703 if (ret) {
704 fwd_entry = 0;
705 idx = 0;
706 }
707
708 memset(&ent, 0, sizeof(ent));
709 ent.port = port;
710 ent.is_valid = is_valid;
711 ent.vid = vid;
712 ent.is_static = true;
713 memcpy(ent.mac, addr, ETH_ALEN);
714 bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
715
716 core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
717 core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
718
719 ret = bcm_sf2_arl_rw_op(priv, 0);
720 if (ret)
721 return ret;
722
723 /* Re-read the entry to check */
724 return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
725 }
726
727 static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
728 const struct switchdev_obj_port_fdb *fdb,
729 struct switchdev_trans *trans)
730 {
731 /* We do not need to do anything specific here yet */
732 return 0;
733 }
734
735 static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
736 const struct switchdev_obj_port_fdb *fdb,
737 struct switchdev_trans *trans)
738 {
739 struct bcm_sf2_priv *priv = ds_to_priv(ds);
740
741 if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
742 pr_err("%s: failed to add MAC address\n", __func__);
743 }
744
745 static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
746 const struct switchdev_obj_port_fdb *fdb)
747 {
748 struct bcm_sf2_priv *priv = ds_to_priv(ds);
749
750 return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
751 }
752
753 static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
754 {
755 unsigned timeout = 1000;
756 u32 reg;
757
758 do {
759 reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
760 if (!(reg & ARLA_SRCH_STDN))
761 return 0;
762
763 if (reg & ARLA_SRCH_VLID)
764 return 0;
765
766 usleep_range(1000, 2000);
767 } while (timeout--);
768
769 return -ETIMEDOUT;
770 }
771
772 static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
773 struct bcm_sf2_arl_entry *ent)
774 {
775 u64 mac_vid;
776 u32 fwd_entry;
777
778 mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
779 fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
780 bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
781 }
782
783 static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
784 const struct bcm_sf2_arl_entry *ent,
785 struct switchdev_obj_port_fdb *fdb,
786 int (*cb)(struct switchdev_obj *obj))
787 {
788 if (!ent->is_valid)
789 return 0;
790
791 if (port != ent->port)
792 return 0;
793
794 ether_addr_copy(fdb->addr, ent->mac);
795 fdb->vid = ent->vid;
796 fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
797
798 return cb(&fdb->obj);
799 }
800
801 static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
802 struct switchdev_obj_port_fdb *fdb,
803 int (*cb)(struct switchdev_obj *obj))
804 {
805 struct bcm_sf2_priv *priv = ds_to_priv(ds);
806 struct net_device *dev = ds->ports[port];
807 struct bcm_sf2_arl_entry results[2];
808 unsigned int count = 0;
809 int ret;
810
811 /* Start search operation */
812 core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
813
814 do {
815 ret = bcm_sf2_arl_search_wait(priv);
816 if (ret)
817 return ret;
818
819 /* Read both entries, then return their values back */
820 bcm_sf2_arl_search_rd(priv, 0, &results[0]);
821 ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
822 if (ret)
823 return ret;
824
825 bcm_sf2_arl_search_rd(priv, 1, &results[1]);
826 ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
827 if (ret)
828 return ret;
829
830 if (!results[0].is_valid && !results[1].is_valid)
831 break;
832
833 } while (count++ < CORE_ARLA_NUM_ENTRIES);
834
835 return 0;
836 }
837
838 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
839 {
840 struct bcm_sf2_priv *priv = dev_id;
841
842 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
843 ~priv->irq0_mask;
844 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
845
846 return IRQ_HANDLED;
847 }
848
849 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
850 {
851 struct bcm_sf2_priv *priv = dev_id;
852
853 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
854 ~priv->irq1_mask;
855 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
856
857 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
858 priv->port_sts[7].link = 1;
859 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
860 priv->port_sts[7].link = 0;
861
862 return IRQ_HANDLED;
863 }
864
865 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
866 {
867 unsigned int timeout = 1000;
868 u32 reg;
869
870 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
871 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
872 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
873
874 do {
875 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
876 if (!(reg & SOFTWARE_RESET))
877 break;
878
879 usleep_range(1000, 2000);
880 } while (timeout-- > 0);
881
882 if (timeout == 0)
883 return -ETIMEDOUT;
884
885 return 0;
886 }
887
888 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
889 {
890 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
891 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
892 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
893 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
894 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
895 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
896 }
897
898 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
899 struct device_node *dn)
900 {
901 struct device_node *port;
902 const char *phy_mode_str;
903 int mode;
904 unsigned int port_num;
905 int ret;
906
907 priv->moca_port = -1;
908
909 for_each_available_child_of_node(dn, port) {
910 if (of_property_read_u32(port, "reg", &port_num))
911 continue;
912
913 /* Internal PHYs get assigned a specific 'phy-mode' property
914 * value: "internal" to help flag them before MDIO probing
915 * has completed, since they might be turned off at that
916 * time
917 */
918 mode = of_get_phy_mode(port);
919 if (mode < 0) {
920 ret = of_property_read_string(port, "phy-mode",
921 &phy_mode_str);
922 if (ret < 0)
923 continue;
924
925 if (!strcasecmp(phy_mode_str, "internal"))
926 priv->int_phy_mask |= 1 << port_num;
927 }
928
929 if (mode == PHY_INTERFACE_MODE_MOCA)
930 priv->moca_port = port_num;
931 }
932 }
933
934 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
935 {
936 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
937 struct bcm_sf2_priv *priv = ds_to_priv(ds);
938 struct device_node *dn;
939 void __iomem **base;
940 unsigned int port;
941 unsigned int i;
942 u32 reg, rev;
943 int ret;
944
945 spin_lock_init(&priv->indir_lock);
946 mutex_init(&priv->stats_mutex);
947
948 /* All the interesting properties are at the parent device_node
949 * level
950 */
951 dn = ds->pd->of_node->parent;
952 bcm_sf2_identify_ports(priv, ds->pd->of_node);
953
954 priv->irq0 = irq_of_parse_and_map(dn, 0);
955 priv->irq1 = irq_of_parse_and_map(dn, 1);
956
957 base = &priv->core;
958 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
959 *base = of_iomap(dn, i);
960 if (*base == NULL) {
961 pr_err("unable to find register: %s\n", reg_names[i]);
962 ret = -ENOMEM;
963 goto out_unmap;
964 }
965 base++;
966 }
967
968 ret = bcm_sf2_sw_rst(priv);
969 if (ret) {
970 pr_err("unable to software reset switch: %d\n", ret);
971 goto out_unmap;
972 }
973
974 /* Disable all interrupts and request them */
975 bcm_sf2_intr_disable(priv);
976
977 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
978 "switch_0", priv);
979 if (ret < 0) {
980 pr_err("failed to request switch_0 IRQ\n");
981 goto out_unmap;
982 }
983
984 ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
985 "switch_1", priv);
986 if (ret < 0) {
987 pr_err("failed to request switch_1 IRQ\n");
988 goto out_free_irq0;
989 }
990
991 /* Reset the MIB counters */
992 reg = core_readl(priv, CORE_GMNCFGCFG);
993 reg |= RST_MIB_CNT;
994 core_writel(priv, reg, CORE_GMNCFGCFG);
995 reg &= ~RST_MIB_CNT;
996 core_writel(priv, reg, CORE_GMNCFGCFG);
997
998 /* Get the maximum number of ports for this switch */
999 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1000 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1001 priv->hw_params.num_ports = DSA_MAX_PORTS;
1002
1003 /* Assume a single GPHY setup if we can't read that property */
1004 if (of_property_read_u32(dn, "brcm,num-gphy",
1005 &priv->hw_params.num_gphy))
1006 priv->hw_params.num_gphy = 1;
1007
1008 /* Enable all valid ports and disable those unused */
1009 for (port = 0; port < priv->hw_params.num_ports; port++) {
1010 /* IMP port receives special treatment */
1011 if ((1 << port) & ds->phys_port_mask)
1012 bcm_sf2_port_setup(ds, port, NULL);
1013 else if (dsa_is_cpu_port(ds, port))
1014 bcm_sf2_imp_setup(ds, port);
1015 else
1016 bcm_sf2_port_disable(ds, port, NULL);
1017 }
1018
1019 /* Include the pseudo-PHY address and the broadcast PHY address to
1020 * divert reads towards our workaround. This is only required for
1021 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
1022 * that we can use the regular SWITCH_MDIO master controller instead.
1023 *
1024 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
1025 * to have a 1:1 mapping between Port address and PHY address in order
1026 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
1027 * not what we want here, so we initialize phys_mii_mask 0 to always
1028 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
1029 */
1030 if (of_machine_is_compatible("brcm,bcm7445d0"))
1031 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
1032 else
1033 ds->phys_mii_mask = 0;
1034
1035 rev = reg_readl(priv, REG_SWITCH_REVISION);
1036 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1037 SWITCH_TOP_REV_MASK;
1038 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1039
1040 rev = reg_readl(priv, REG_PHY_REVISION);
1041 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1042
1043 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1044 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1045 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1046 priv->core, priv->irq0, priv->irq1);
1047
1048 return 0;
1049
1050 out_free_irq0:
1051 free_irq(priv->irq0, priv);
1052 out_unmap:
1053 base = &priv->core;
1054 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1055 if (*base)
1056 iounmap(*base);
1057 base++;
1058 }
1059 return ret;
1060 }
1061
1062 static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
1063 {
1064 return 0;
1065 }
1066
1067 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
1068 {
1069 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1070
1071 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
1072 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
1073 * the REG_PHY_REVISION register layout is.
1074 */
1075
1076 return priv->hw_params.gphy_rev;
1077 }
1078
1079 static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
1080 int regnum, u16 val)
1081 {
1082 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1083 int ret = 0;
1084 u32 reg;
1085
1086 reg = reg_readl(priv, REG_SWITCH_CNTRL);
1087 reg |= MDIO_MASTER_SEL;
1088 reg_writel(priv, reg, REG_SWITCH_CNTRL);
1089
1090 /* Page << 8 | offset */
1091 reg = 0x70;
1092 reg <<= 2;
1093 core_writel(priv, addr, reg);
1094
1095 /* Page << 8 | offset */
1096 reg = 0x80 << 8 | regnum << 1;
1097 reg <<= 2;
1098
1099 if (op)
1100 ret = core_readl(priv, reg);
1101 else
1102 core_writel(priv, val, reg);
1103
1104 reg = reg_readl(priv, REG_SWITCH_CNTRL);
1105 reg &= ~MDIO_MASTER_SEL;
1106 reg_writel(priv, reg, REG_SWITCH_CNTRL);
1107
1108 return ret & 0xffff;
1109 }
1110
1111 static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
1112 {
1113 /* Intercept reads from the MDIO broadcast address or Broadcom
1114 * pseudo-PHY address
1115 */
1116 switch (addr) {
1117 case 0:
1118 case BRCM_PSEUDO_PHY_ADDR:
1119 return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
1120 default:
1121 return 0xffff;
1122 }
1123 }
1124
1125 static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
1126 u16 val)
1127 {
1128 /* Intercept writes to the MDIO broadcast address or Broadcom
1129 * pseudo-PHY address
1130 */
1131 switch (addr) {
1132 case 0:
1133 case BRCM_PSEUDO_PHY_ADDR:
1134 bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
1135 break;
1136 }
1137
1138 return 0;
1139 }
1140
1141 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
1142 struct phy_device *phydev)
1143 {
1144 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1145 u32 id_mode_dis = 0, port_mode;
1146 const char *str = NULL;
1147 u32 reg;
1148
1149 switch (phydev->interface) {
1150 case PHY_INTERFACE_MODE_RGMII:
1151 str = "RGMII (no delay)";
1152 id_mode_dis = 1;
1153 case PHY_INTERFACE_MODE_RGMII_TXID:
1154 if (!str)
1155 str = "RGMII (TX delay)";
1156 port_mode = EXT_GPHY;
1157 break;
1158 case PHY_INTERFACE_MODE_MII:
1159 str = "MII";
1160 port_mode = EXT_EPHY;
1161 break;
1162 case PHY_INTERFACE_MODE_REVMII:
1163 str = "Reverse MII";
1164 port_mode = EXT_REVMII;
1165 break;
1166 default:
1167 /* All other PHYs: internal and MoCA */
1168 goto force_link;
1169 }
1170
1171 /* If the link is down, just disable the interface to conserve power */
1172 if (!phydev->link) {
1173 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1174 reg &= ~RGMII_MODE_EN;
1175 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1176 goto force_link;
1177 }
1178
1179 /* Clear id_mode_dis bit, and the existing port mode, but
1180 * make sure we enable the RGMII block for data to pass
1181 */
1182 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1183 reg &= ~ID_MODE_DIS;
1184 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
1185 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
1186
1187 reg |= port_mode | RGMII_MODE_EN;
1188 if (id_mode_dis)
1189 reg |= ID_MODE_DIS;
1190
1191 if (phydev->pause) {
1192 if (phydev->asym_pause)
1193 reg |= TX_PAUSE_EN;
1194 reg |= RX_PAUSE_EN;
1195 }
1196
1197 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1198
1199 pr_info("Port %d configured for %s\n", port, str);
1200
1201 force_link:
1202 /* Force link settings detected from the PHY */
1203 reg = SW_OVERRIDE;
1204 switch (phydev->speed) {
1205 case SPEED_1000:
1206 reg |= SPDSTS_1000 << SPEED_SHIFT;
1207 break;
1208 case SPEED_100:
1209 reg |= SPDSTS_100 << SPEED_SHIFT;
1210 break;
1211 }
1212
1213 if (phydev->link)
1214 reg |= LINK_STS;
1215 if (phydev->duplex == DUPLEX_FULL)
1216 reg |= DUPLX_MODE;
1217
1218 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1219 }
1220
1221 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
1222 struct fixed_phy_status *status)
1223 {
1224 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1225 u32 duplex, pause;
1226 u32 reg;
1227
1228 duplex = core_readl(priv, CORE_DUPSTS);
1229 pause = core_readl(priv, CORE_PAUSESTS);
1230
1231 status->link = 0;
1232
1233 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
1234 * which means that we need to force the link at the port override
1235 * level to get the data to flow. We do use what the interrupt handler
1236 * did determine before.
1237 *
1238 * For the other ports, we just force the link status, since this is
1239 * a fixed PHY device.
1240 */
1241 if (port == priv->moca_port) {
1242 status->link = priv->port_sts[port].link;
1243 /* For MoCA interfaces, also force a link down notification
1244 * since some version of the user-space daemon (mocad) use
1245 * cmd->autoneg to force the link, which messes up the PHY
1246 * state machine and make it go in PHY_FORCING state instead.
1247 */
1248 if (!status->link)
1249 netif_carrier_off(ds->ports[port]);
1250 status->duplex = 1;
1251 } else {
1252 status->link = 1;
1253 status->duplex = !!(duplex & (1 << port));
1254 }
1255
1256 reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1257 reg |= SW_OVERRIDE;
1258 if (status->link)
1259 reg |= LINK_STS;
1260 else
1261 reg &= ~LINK_STS;
1262 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1263
1264 if ((pause & (1 << port)) &&
1265 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
1266 status->asym_pause = 1;
1267 status->pause = 1;
1268 }
1269
1270 if (pause & (1 << port))
1271 status->pause = 1;
1272 }
1273
1274 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
1275 {
1276 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1277 unsigned int port;
1278
1279 bcm_sf2_intr_disable(priv);
1280
1281 /* Disable all ports physically present including the IMP
1282 * port, the other ones have already been disabled during
1283 * bcm_sf2_sw_setup
1284 */
1285 for (port = 0; port < DSA_MAX_PORTS; port++) {
1286 if ((1 << port) & ds->phys_port_mask ||
1287 dsa_is_cpu_port(ds, port))
1288 bcm_sf2_port_disable(ds, port, NULL);
1289 }
1290
1291 return 0;
1292 }
1293
1294 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
1295 {
1296 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1297 unsigned int port;
1298 int ret;
1299
1300 ret = bcm_sf2_sw_rst(priv);
1301 if (ret) {
1302 pr_err("%s: failed to software reset switch\n", __func__);
1303 return ret;
1304 }
1305
1306 if (priv->hw_params.num_gphy == 1)
1307 bcm_sf2_gphy_enable_set(ds, true);
1308
1309 for (port = 0; port < DSA_MAX_PORTS; port++) {
1310 if ((1 << port) & ds->phys_port_mask)
1311 bcm_sf2_port_setup(ds, port, NULL);
1312 else if (dsa_is_cpu_port(ds, port))
1313 bcm_sf2_imp_setup(ds, port);
1314 }
1315
1316 return 0;
1317 }
1318
1319 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
1320 struct ethtool_wolinfo *wol)
1321 {
1322 struct net_device *p = ds->dst[ds->index].master_netdev;
1323 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1324 struct ethtool_wolinfo pwol;
1325
1326 /* Get the parent device WoL settings */
1327 p->ethtool_ops->get_wol(p, &pwol);
1328
1329 /* Advertise the parent device supported settings */
1330 wol->supported = pwol.supported;
1331 memset(&wol->sopass, 0, sizeof(wol->sopass));
1332
1333 if (pwol.wolopts & WAKE_MAGICSECURE)
1334 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
1335
1336 if (priv->wol_ports_mask & (1 << port))
1337 wol->wolopts = pwol.wolopts;
1338 else
1339 wol->wolopts = 0;
1340 }
1341
1342 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1343 struct ethtool_wolinfo *wol)
1344 {
1345 struct net_device *p = ds->dst[ds->index].master_netdev;
1346 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1347 s8 cpu_port = ds->dst[ds->index].cpu_port;
1348 struct ethtool_wolinfo pwol;
1349
1350 p->ethtool_ops->get_wol(p, &pwol);
1351 if (wol->wolopts & ~pwol.supported)
1352 return -EINVAL;
1353
1354 if (wol->wolopts)
1355 priv->wol_ports_mask |= (1 << port);
1356 else
1357 priv->wol_ports_mask &= ~(1 << port);
1358
1359 /* If we have at least one port enabled, make sure the CPU port
1360 * is also enabled. If the CPU port is the last one enabled, we disable
1361 * it since this configuration does not make sense.
1362 */
1363 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1364 priv->wol_ports_mask |= (1 << cpu_port);
1365 else
1366 priv->wol_ports_mask &= ~(1 << cpu_port);
1367
1368 return p->ethtool_ops->set_wol(p, wol);
1369 }
1370
1371 static struct dsa_switch_driver bcm_sf2_switch_driver = {
1372 .tag_protocol = DSA_TAG_PROTO_BRCM,
1373 .probe = bcm_sf2_sw_probe,
1374 .setup = bcm_sf2_sw_setup,
1375 .set_addr = bcm_sf2_sw_set_addr,
1376 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
1377 .phy_read = bcm_sf2_sw_phy_read,
1378 .phy_write = bcm_sf2_sw_phy_write,
1379 .get_strings = bcm_sf2_sw_get_strings,
1380 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
1381 .get_sset_count = bcm_sf2_sw_get_sset_count,
1382 .adjust_link = bcm_sf2_sw_adjust_link,
1383 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
1384 .suspend = bcm_sf2_sw_suspend,
1385 .resume = bcm_sf2_sw_resume,
1386 .get_wol = bcm_sf2_sw_get_wol,
1387 .set_wol = bcm_sf2_sw_set_wol,
1388 .port_enable = bcm_sf2_port_setup,
1389 .port_disable = bcm_sf2_port_disable,
1390 .get_eee = bcm_sf2_sw_get_eee,
1391 .set_eee = bcm_sf2_sw_set_eee,
1392 .port_bridge_join = bcm_sf2_sw_br_join,
1393 .port_bridge_leave = bcm_sf2_sw_br_leave,
1394 .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
1395 .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
1396 .port_fdb_add = bcm_sf2_sw_fdb_add,
1397 .port_fdb_del = bcm_sf2_sw_fdb_del,
1398 .port_fdb_dump = bcm_sf2_sw_fdb_dump,
1399 };
1400
1401 static int __init bcm_sf2_init(void)
1402 {
1403 register_switch_driver(&bcm_sf2_switch_driver);
1404
1405 return 0;
1406 }
1407 module_init(bcm_sf2_init);
1408
1409 static void __exit bcm_sf2_exit(void)
1410 {
1411 unregister_switch_driver(&bcm_sf2_switch_driver);
1412 }
1413 module_exit(bcm_sf2_exit);
1414
1415 MODULE_AUTHOR("Broadcom Corporation");
1416 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1417 MODULE_LICENSE("GPL");
1418 MODULE_ALIAS("platform:brcm-sf2");
This page took 0.060699 seconds and 4 git commands to generate.