2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
28 #include "bcmsysport.h"
30 /* I/O accessors register helpers */
31 #define BCM_SYSPORT_IO_MACRO(name, offset) \
32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
34 u32 reg = __raw_readl(priv->base + offset + off); \
37 static inline void name##_writel(struct bcm_sysport_priv *priv, \
40 __raw_writel(val, priv->base + offset + off); \
43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44 BCM_SYSPORT_IO_MACRO(intrl2_1
, SYS_PORT_INTRL2_1_OFFSET
);
45 BCM_SYSPORT_IO_MACRO(umac
, SYS_PORT_UMAC_OFFSET
);
46 BCM_SYSPORT_IO_MACRO(tdma
, SYS_PORT_TDMA_OFFSET
);
47 BCM_SYSPORT_IO_MACRO(rdma
, SYS_PORT_RDMA_OFFSET
);
48 BCM_SYSPORT_IO_MACRO(rxchk
, SYS_PORT_RXCHK_OFFSET
);
49 BCM_SYSPORT_IO_MACRO(txchk
, SYS_PORT_TXCHK_OFFSET
);
50 BCM_SYSPORT_IO_MACRO(rbuf
, SYS_PORT_RBUF_OFFSET
);
51 BCM_SYSPORT_IO_MACRO(tbuf
, SYS_PORT_TBUF_OFFSET
);
52 BCM_SYSPORT_IO_MACRO(topctrl
, SYS_PORT_TOPCTRL_OFFSET
);
54 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
57 #define BCM_SYSPORT_INTR_L2(which) \
58 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
62 priv->irq##which##_mask &= ~(mask); \
64 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
68 priv->irq##which##_mask |= (mask); \
71 BCM_SYSPORT_INTR_L2(0)
72 BCM_SYSPORT_INTR_L2(1)
74 /* Register accesses to GISB/RBUS registers are expensive (few hundred
75 * nanoseconds), so keep the check for 64-bits explicit here to save
76 * one register write per-packet on 32-bits platforms.
78 static inline void dma_desc_set_addr(struct bcm_sysport_priv
*priv
,
82 #ifdef CONFIG_PHYS_ADDR_T_64BIT
83 __raw_writel(upper_32_bits(addr
) & DESC_ADDR_HI_MASK
,
84 d
+ DESC_ADDR_HI_STATUS_LEN
);
86 __raw_writel(lower_32_bits(addr
), d
+ DESC_ADDR_LO
);
89 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv
*priv
,
90 struct dma_desc
*desc
,
93 /* Ports are latched, so write upper address first */
94 tdma_writel(priv
, desc
->addr_status_len
, TDMA_WRITE_PORT_HI(port
));
95 tdma_writel(priv
, desc
->addr_lo
, TDMA_WRITE_PORT_LO(port
));
98 /* Ethtool operations */
99 static int bcm_sysport_set_settings(struct net_device
*dev
,
100 struct ethtool_cmd
*cmd
)
102 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
104 if (!netif_running(dev
))
107 return phy_ethtool_sset(priv
->phydev
, cmd
);
110 static int bcm_sysport_get_settings(struct net_device
*dev
,
111 struct ethtool_cmd
*cmd
)
113 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
115 if (!netif_running(dev
))
118 return phy_ethtool_gset(priv
->phydev
, cmd
);
121 static int bcm_sysport_set_rx_csum(struct net_device
*dev
,
122 netdev_features_t wanted
)
124 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
127 priv
->rx_chk_en
= !!(wanted
& NETIF_F_RXCSUM
);
128 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
134 /* If UniMAC forwards CRC, we need to skip over it to get
135 * a valid CHK bit to be set in the per-packet status word
137 if (priv
->rx_chk_en
&& priv
->crc_fwd
)
138 reg
|= RXCHK_SKIP_FCS
;
140 reg
&= ~RXCHK_SKIP_FCS
;
142 /* If Broadcom tags are enabled (e.g: using a switch), make
143 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
144 * tag after the Ethernet MAC Source Address.
146 if (netdev_uses_dsa(dev
))
147 reg
|= RXCHK_BRCM_TAG_EN
;
149 reg
&= ~RXCHK_BRCM_TAG_EN
;
151 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
156 static int bcm_sysport_set_tx_csum(struct net_device
*dev
,
157 netdev_features_t wanted
)
159 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
162 /* Hardware transmit checksum requires us to enable the Transmit status
163 * block prepended to the packet contents
165 priv
->tsb_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
166 reg
= tdma_readl(priv
, TDMA_CONTROL
);
171 tdma_writel(priv
, reg
, TDMA_CONTROL
);
176 static int bcm_sysport_set_features(struct net_device
*dev
,
177 netdev_features_t features
)
179 netdev_features_t changed
= features
^ dev
->features
;
180 netdev_features_t wanted
= dev
->wanted_features
;
183 if (changed
& NETIF_F_RXCSUM
)
184 ret
= bcm_sysport_set_rx_csum(dev
, wanted
);
185 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
186 ret
= bcm_sysport_set_tx_csum(dev
, wanted
);
191 /* Hardware counters must be kept in sync because the order/offset
192 * is important here (order in structure declaration = order in hardware)
194 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats
[] = {
196 STAT_NETDEV(rx_packets
),
197 STAT_NETDEV(tx_packets
),
198 STAT_NETDEV(rx_bytes
),
199 STAT_NETDEV(tx_bytes
),
200 STAT_NETDEV(rx_errors
),
201 STAT_NETDEV(tx_errors
),
202 STAT_NETDEV(rx_dropped
),
203 STAT_NETDEV(tx_dropped
),
204 STAT_NETDEV(multicast
),
205 /* UniMAC RSV counters */
206 STAT_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
207 STAT_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
208 STAT_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
209 STAT_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
210 STAT_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
211 STAT_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
212 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
213 STAT_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
214 STAT_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
215 STAT_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
216 STAT_MIB_RX("rx_pkts", mib
.rx
.pkt
),
217 STAT_MIB_RX("rx_bytes", mib
.rx
.bytes
),
218 STAT_MIB_RX("rx_multicast", mib
.rx
.mca
),
219 STAT_MIB_RX("rx_broadcast", mib
.rx
.bca
),
220 STAT_MIB_RX("rx_fcs", mib
.rx
.fcs
),
221 STAT_MIB_RX("rx_control", mib
.rx
.cf
),
222 STAT_MIB_RX("rx_pause", mib
.rx
.pf
),
223 STAT_MIB_RX("rx_unknown", mib
.rx
.uo
),
224 STAT_MIB_RX("rx_align", mib
.rx
.aln
),
225 STAT_MIB_RX("rx_outrange", mib
.rx
.flr
),
226 STAT_MIB_RX("rx_code", mib
.rx
.cde
),
227 STAT_MIB_RX("rx_carrier", mib
.rx
.fcr
),
228 STAT_MIB_RX("rx_oversize", mib
.rx
.ovr
),
229 STAT_MIB_RX("rx_jabber", mib
.rx
.jbr
),
230 STAT_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
231 STAT_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
232 STAT_MIB_RX("rx_unicast", mib
.rx
.uc
),
233 STAT_MIB_RX("rx_ppp", mib
.rx
.ppp
),
234 STAT_MIB_RX("rx_crc", mib
.rx
.rcrc
),
235 /* UniMAC TSV counters */
236 STAT_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
237 STAT_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
238 STAT_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
239 STAT_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
240 STAT_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
241 STAT_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
242 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
243 STAT_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
244 STAT_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
245 STAT_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
246 STAT_MIB_TX("tx_pkts", mib
.tx
.pkts
),
247 STAT_MIB_TX("tx_multicast", mib
.tx
.mca
),
248 STAT_MIB_TX("tx_broadcast", mib
.tx
.bca
),
249 STAT_MIB_TX("tx_pause", mib
.tx
.pf
),
250 STAT_MIB_TX("tx_control", mib
.tx
.cf
),
251 STAT_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
252 STAT_MIB_TX("tx_oversize", mib
.tx
.ovr
),
253 STAT_MIB_TX("tx_defer", mib
.tx
.drf
),
254 STAT_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
255 STAT_MIB_TX("tx_single_col", mib
.tx
.scl
),
256 STAT_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
257 STAT_MIB_TX("tx_late_col", mib
.tx
.lcl
),
258 STAT_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
259 STAT_MIB_TX("tx_frags", mib
.tx
.frg
),
260 STAT_MIB_TX("tx_total_col", mib
.tx
.ncl
),
261 STAT_MIB_TX("tx_jabber", mib
.tx
.jbr
),
262 STAT_MIB_TX("tx_bytes", mib
.tx
.bytes
),
263 STAT_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
264 STAT_MIB_TX("tx_unicast", mib
.tx
.uc
),
265 /* UniMAC RUNT counters */
266 STAT_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
267 STAT_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
268 STAT_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
269 STAT_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
270 /* RXCHK misc statistics */
271 STAT_RXCHK("rxchk_bad_csum", mib
.rxchk_bad_csum
, RXCHK_BAD_CSUM_CNTR
),
272 STAT_RXCHK("rxchk_other_pkt_disc", mib
.rxchk_other_pkt_disc
,
273 RXCHK_OTHER_DISC_CNTR
),
274 /* RBUF misc statistics */
275 STAT_RBUF("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
, RBUF_OVFL_DISC_CNTR
),
276 STAT_RBUF("rbuf_err_cnt", mib
.rbuf_err_cnt
, RBUF_ERR_PKT_CNTR
),
277 STAT_MIB_RX("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
278 STAT_MIB_RX("rx_dma_failed", mib
.rx_dma_failed
),
279 STAT_MIB_TX("tx_dma_failed", mib
.tx_dma_failed
),
282 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
284 static void bcm_sysport_get_drvinfo(struct net_device
*dev
,
285 struct ethtool_drvinfo
*info
)
287 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
288 strlcpy(info
->version
, "0.1", sizeof(info
->version
));
289 strlcpy(info
->bus_info
, "platform", sizeof(info
->bus_info
));
290 info
->n_stats
= BCM_SYSPORT_STATS_LEN
;
293 static u32
bcm_sysport_get_msglvl(struct net_device
*dev
)
295 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
297 return priv
->msg_enable
;
300 static void bcm_sysport_set_msglvl(struct net_device
*dev
, u32 enable
)
302 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
304 priv
->msg_enable
= enable
;
307 static int bcm_sysport_get_sset_count(struct net_device
*dev
, int string_set
)
309 switch (string_set
) {
311 return BCM_SYSPORT_STATS_LEN
;
317 static void bcm_sysport_get_strings(struct net_device
*dev
,
318 u32 stringset
, u8
*data
)
324 for (i
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
325 memcpy(data
+ i
* ETH_GSTRING_LEN
,
326 bcm_sysport_gstrings_stats
[i
].stat_string
,
335 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv
*priv
)
339 for (i
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
340 const struct bcm_sysport_stats
*s
;
345 s
= &bcm_sysport_gstrings_stats
[i
];
347 case BCM_SYSPORT_STAT_NETDEV
:
349 case BCM_SYSPORT_STAT_MIB_RX
:
350 case BCM_SYSPORT_STAT_MIB_TX
:
351 case BCM_SYSPORT_STAT_RUNT
:
352 if (s
->type
!= BCM_SYSPORT_STAT_MIB_RX
)
353 offset
= UMAC_MIB_STAT_OFFSET
;
354 val
= umac_readl(priv
, UMAC_MIB_START
+ j
+ offset
);
356 case BCM_SYSPORT_STAT_RXCHK
:
357 val
= rxchk_readl(priv
, s
->reg_offset
);
359 rxchk_writel(priv
, 0, s
->reg_offset
);
361 case BCM_SYSPORT_STAT_RBUF
:
362 val
= rbuf_readl(priv
, s
->reg_offset
);
364 rbuf_writel(priv
, 0, s
->reg_offset
);
369 p
= (char *)priv
+ s
->stat_offset
;
373 netif_dbg(priv
, hw
, priv
->netdev
, "updated MIB counters\n");
376 static void bcm_sysport_get_stats(struct net_device
*dev
,
377 struct ethtool_stats
*stats
, u64
*data
)
379 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
382 if (netif_running(dev
))
383 bcm_sysport_update_mib_counters(priv
);
385 for (i
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
386 const struct bcm_sysport_stats
*s
;
389 s
= &bcm_sysport_gstrings_stats
[i
];
390 if (s
->type
== BCM_SYSPORT_STAT_NETDEV
)
391 p
= (char *)&dev
->stats
;
399 static void bcm_sysport_get_wol(struct net_device
*dev
,
400 struct ethtool_wolinfo
*wol
)
402 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
405 wol
->supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
;
406 wol
->wolopts
= priv
->wolopts
;
408 if (!(priv
->wolopts
& WAKE_MAGICSECURE
))
411 /* Return the programmed SecureOn password */
412 reg
= umac_readl(priv
, UMAC_PSW_MS
);
413 put_unaligned_be16(reg
, &wol
->sopass
[0]);
414 reg
= umac_readl(priv
, UMAC_PSW_LS
);
415 put_unaligned_be32(reg
, &wol
->sopass
[2]);
418 static int bcm_sysport_set_wol(struct net_device
*dev
,
419 struct ethtool_wolinfo
*wol
)
421 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
422 struct device
*kdev
= &priv
->pdev
->dev
;
423 u32 supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
;
425 if (!device_can_wakeup(kdev
))
428 if (wol
->wolopts
& ~supported
)
431 /* Program the SecureOn password */
432 if (wol
->wolopts
& WAKE_MAGICSECURE
) {
433 umac_writel(priv
, get_unaligned_be16(&wol
->sopass
[0]),
435 umac_writel(priv
, get_unaligned_be32(&wol
->sopass
[2]),
439 /* Flag the device and relevant IRQ as wakeup capable */
441 device_set_wakeup_enable(kdev
, 1);
442 if (priv
->wol_irq_disabled
)
443 enable_irq_wake(priv
->wol_irq
);
444 priv
->wol_irq_disabled
= 0;
446 device_set_wakeup_enable(kdev
, 0);
447 /* Avoid unbalanced disable_irq_wake calls */
448 if (!priv
->wol_irq_disabled
)
449 disable_irq_wake(priv
->wol_irq
);
450 priv
->wol_irq_disabled
= 1;
453 priv
->wolopts
= wol
->wolopts
;
458 static void bcm_sysport_free_cb(struct bcm_sysport_cb
*cb
)
460 dev_kfree_skb_any(cb
->skb
);
462 dma_unmap_addr_set(cb
, dma_addr
, 0);
465 static int bcm_sysport_rx_refill(struct bcm_sysport_priv
*priv
,
466 struct bcm_sysport_cb
*cb
)
468 struct device
*kdev
= &priv
->pdev
->dev
;
469 struct net_device
*ndev
= priv
->netdev
;
473 cb
->skb
= netdev_alloc_skb(priv
->netdev
, RX_BUF_LENGTH
);
475 netif_err(priv
, rx_err
, ndev
, "SKB alloc failed\n");
479 mapping
= dma_map_single(kdev
, cb
->skb
->data
,
480 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
481 ret
= dma_mapping_error(kdev
, mapping
);
483 priv
->mib
.rx_dma_failed
++;
484 bcm_sysport_free_cb(cb
);
485 netif_err(priv
, rx_err
, ndev
, "DMA mapping failure\n");
489 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
490 dma_desc_set_addr(priv
, priv
->rx_bd_assign_ptr
, mapping
);
492 priv
->rx_bd_assign_index
++;
493 priv
->rx_bd_assign_index
&= (priv
->num_rx_bds
- 1);
494 priv
->rx_bd_assign_ptr
= priv
->rx_bds
+
495 (priv
->rx_bd_assign_index
* DESC_SIZE
);
497 netif_dbg(priv
, rx_status
, ndev
, "RX refill\n");
502 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv
*priv
)
504 struct bcm_sysport_cb
*cb
;
508 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
509 cb
= &priv
->rx_cbs
[priv
->rx_bd_assign_index
];
513 ret
= bcm_sysport_rx_refill(priv
, cb
);
521 /* Poll the hardware for up to budget packets to process */
522 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv
*priv
,
525 struct device
*kdev
= &priv
->pdev
->dev
;
526 struct net_device
*ndev
= priv
->netdev
;
527 unsigned int processed
= 0, to_process
;
528 struct bcm_sysport_cb
*cb
;
530 unsigned int p_index
;
535 /* Determine how much we should process since last call */
536 p_index
= rdma_readl(priv
, RDMA_PROD_INDEX
);
537 p_index
&= RDMA_PROD_INDEX_MASK
;
539 if (p_index
< priv
->rx_c_index
)
540 to_process
= (RDMA_CONS_INDEX_MASK
+ 1) -
541 priv
->rx_c_index
+ p_index
;
543 to_process
= p_index
- priv
->rx_c_index
;
545 netif_dbg(priv
, rx_status
, ndev
,
546 "p_index=%d rx_c_index=%d to_process=%d\n",
547 p_index
, priv
->rx_c_index
, to_process
);
549 while ((processed
< to_process
) && (processed
< budget
)) {
550 cb
= &priv
->rx_cbs
[priv
->rx_read_ptr
];
556 if (priv
->rx_read_ptr
== priv
->num_rx_bds
)
557 priv
->rx_read_ptr
= 0;
559 /* We do not have a backing SKB, so we do not a corresponding
560 * DMA mapping for this incoming packet since
561 * bcm_sysport_rx_refill always either has both skb and mapping
564 if (unlikely(!skb
)) {
565 netif_err(priv
, rx_err
, ndev
, "out of memory!\n");
566 ndev
->stats
.rx_dropped
++;
567 ndev
->stats
.rx_errors
++;
571 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
572 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
574 /* Extract the Receive Status Block prepended */
575 rsb
= (struct bcm_rsb
*)skb
->data
;
576 len
= (rsb
->rx_status_len
>> DESC_LEN_SHIFT
) & DESC_LEN_MASK
;
577 status
= (rsb
->rx_status_len
>> DESC_STATUS_SHIFT
) &
580 netif_dbg(priv
, rx_status
, ndev
,
581 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
582 p_index
, priv
->rx_c_index
, priv
->rx_read_ptr
,
585 if (unlikely(!(status
& DESC_EOP
) || !(status
& DESC_SOP
))) {
586 netif_err(priv
, rx_status
, ndev
, "fragmented packet!\n");
587 ndev
->stats
.rx_dropped
++;
588 ndev
->stats
.rx_errors
++;
589 bcm_sysport_free_cb(cb
);
593 if (unlikely(status
& (RX_STATUS_ERR
| RX_STATUS_OVFLOW
))) {
594 netif_err(priv
, rx_err
, ndev
, "error packet\n");
595 if (status
& RX_STATUS_OVFLOW
)
596 ndev
->stats
.rx_over_errors
++;
597 ndev
->stats
.rx_dropped
++;
598 ndev
->stats
.rx_errors
++;
599 bcm_sysport_free_cb(cb
);
605 /* Hardware validated our checksum */
606 if (likely(status
& DESC_L4_CSUM
))
607 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
609 /* Hardware pre-pends packets with 2bytes before Ethernet
610 * header plus we have the Receive Status Block, strip off all
611 * of this from the SKB.
613 skb_pull(skb
, sizeof(*rsb
) + 2);
614 len
-= (sizeof(*rsb
) + 2);
616 /* UniMAC may forward CRC */
618 skb_trim(skb
, len
- ETH_FCS_LEN
);
622 skb
->protocol
= eth_type_trans(skb
, ndev
);
623 ndev
->stats
.rx_packets
++;
624 ndev
->stats
.rx_bytes
+= len
;
626 napi_gro_receive(&priv
->napi
, skb
);
628 ret
= bcm_sysport_rx_refill(priv
, cb
);
630 priv
->mib
.alloc_rx_buff_failed
++;
636 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv
*priv
,
637 struct bcm_sysport_cb
*cb
,
638 unsigned int *bytes_compl
,
639 unsigned int *pkts_compl
)
641 struct device
*kdev
= &priv
->pdev
->dev
;
642 struct net_device
*ndev
= priv
->netdev
;
645 ndev
->stats
.tx_bytes
+= cb
->skb
->len
;
646 *bytes_compl
+= cb
->skb
->len
;
647 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
648 dma_unmap_len(cb
, dma_len
),
650 ndev
->stats
.tx_packets
++;
652 bcm_sysport_free_cb(cb
);
654 } else if (dma_unmap_addr(cb
, dma_addr
)) {
655 ndev
->stats
.tx_bytes
+= dma_unmap_len(cb
, dma_len
);
656 dma_unmap_page(kdev
, dma_unmap_addr(cb
, dma_addr
),
657 dma_unmap_len(cb
, dma_len
), DMA_TO_DEVICE
);
658 dma_unmap_addr_set(cb
, dma_addr
, 0);
662 /* Reclaim queued SKBs for transmission completion, lockless version */
663 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
664 struct bcm_sysport_tx_ring
*ring
)
666 struct net_device
*ndev
= priv
->netdev
;
667 unsigned int c_index
, last_c_index
, last_tx_cn
, num_tx_cbs
;
668 unsigned int pkts_compl
= 0, bytes_compl
= 0;
669 struct bcm_sysport_cb
*cb
;
670 struct netdev_queue
*txq
;
673 txq
= netdev_get_tx_queue(ndev
, ring
->index
);
675 /* Compute how many descriptors have been processed since last call */
676 hw_ind
= tdma_readl(priv
, TDMA_DESC_RING_PROD_CONS_INDEX(ring
->index
));
677 c_index
= (hw_ind
>> RING_CONS_INDEX_SHIFT
) & RING_CONS_INDEX_MASK
;
678 ring
->p_index
= (hw_ind
& RING_PROD_INDEX_MASK
);
680 last_c_index
= ring
->c_index
;
681 num_tx_cbs
= ring
->size
;
683 c_index
&= (num_tx_cbs
- 1);
685 if (c_index
>= last_c_index
)
686 last_tx_cn
= c_index
- last_c_index
;
688 last_tx_cn
= num_tx_cbs
- last_c_index
+ c_index
;
690 netif_dbg(priv
, tx_done
, ndev
,
691 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
692 ring
->index
, c_index
, last_tx_cn
, last_c_index
);
694 while (last_tx_cn
-- > 0) {
695 cb
= ring
->cbs
+ last_c_index
;
696 bcm_sysport_tx_reclaim_one(priv
, cb
, &bytes_compl
, &pkts_compl
);
700 last_c_index
&= (num_tx_cbs
- 1);
703 ring
->c_index
= c_index
;
705 if (netif_tx_queue_stopped(txq
) && pkts_compl
)
706 netif_tx_wake_queue(txq
);
708 netif_dbg(priv
, tx_done
, ndev
,
709 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
710 ring
->index
, ring
->c_index
, pkts_compl
, bytes_compl
);
715 /* Locked version of the per-ring TX reclaim routine */
716 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
717 struct bcm_sysport_tx_ring
*ring
)
719 unsigned int released
;
722 spin_lock_irqsave(&ring
->lock
, flags
);
723 released
= __bcm_sysport_tx_reclaim(priv
, ring
);
724 spin_unlock_irqrestore(&ring
->lock
, flags
);
729 static int bcm_sysport_tx_poll(struct napi_struct
*napi
, int budget
)
731 struct bcm_sysport_tx_ring
*ring
=
732 container_of(napi
, struct bcm_sysport_tx_ring
, napi
);
733 unsigned int work_done
= 0;
735 work_done
= bcm_sysport_tx_reclaim(ring
->priv
, ring
);
737 if (work_done
== 0) {
739 /* re-enable TX interrupt */
740 intrl2_1_mask_clear(ring
->priv
, BIT(ring
->index
));
748 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv
*priv
)
752 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++)
753 bcm_sysport_tx_reclaim(priv
, &priv
->tx_rings
[q
]);
756 static int bcm_sysport_poll(struct napi_struct
*napi
, int budget
)
758 struct bcm_sysport_priv
*priv
=
759 container_of(napi
, struct bcm_sysport_priv
, napi
);
760 unsigned int work_done
= 0;
762 work_done
= bcm_sysport_desc_rx(priv
, budget
);
764 priv
->rx_c_index
+= work_done
;
765 priv
->rx_c_index
&= RDMA_CONS_INDEX_MASK
;
766 rdma_writel(priv
, priv
->rx_c_index
, RDMA_CONS_INDEX
);
768 if (work_done
< budget
) {
770 /* re-enable RX interrupts */
771 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
);
777 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv
*priv
)
781 /* Stop monitoring MPD interrupt */
782 intrl2_0_mask_set(priv
, INTRL2_0_MPD
);
784 /* Clear the MagicPacket detection logic */
785 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
787 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
789 netif_dbg(priv
, wol
, priv
->netdev
, "resumed from WOL\n");
792 /* RX and misc interrupt routine */
793 static irqreturn_t
bcm_sysport_rx_isr(int irq
, void *dev_id
)
795 struct net_device
*dev
= dev_id
;
796 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
798 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
799 ~intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
800 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
802 if (unlikely(priv
->irq0_stat
== 0)) {
803 netdev_warn(priv
->netdev
, "spurious RX interrupt\n");
807 if (priv
->irq0_stat
& INTRL2_0_RDMA_MBDONE
) {
808 if (likely(napi_schedule_prep(&priv
->napi
))) {
809 /* disable RX interrupts */
810 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
);
811 __napi_schedule(&priv
->napi
);
815 /* TX ring is full, perform a full reclaim since we do not know
816 * which one would trigger this interrupt
818 if (priv
->irq0_stat
& INTRL2_0_TX_RING_FULL
)
819 bcm_sysport_tx_reclaim_all(priv
);
821 if (priv
->irq0_stat
& INTRL2_0_MPD
) {
822 netdev_info(priv
->netdev
, "Wake-on-LAN interrupt!\n");
823 bcm_sysport_resume_from_wol(priv
);
829 /* TX interrupt service routine */
830 static irqreturn_t
bcm_sysport_tx_isr(int irq
, void *dev_id
)
832 struct net_device
*dev
= dev_id
;
833 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
834 struct bcm_sysport_tx_ring
*txr
;
837 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
838 ~intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
839 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
841 if (unlikely(priv
->irq1_stat
== 0)) {
842 netdev_warn(priv
->netdev
, "spurious TX interrupt\n");
846 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
847 if (!(priv
->irq1_stat
& BIT(ring
)))
850 txr
= &priv
->tx_rings
[ring
];
852 if (likely(napi_schedule_prep(&txr
->napi
))) {
853 intrl2_1_mask_set(priv
, BIT(ring
));
854 __napi_schedule(&txr
->napi
);
861 static irqreturn_t
bcm_sysport_wol_isr(int irq
, void *dev_id
)
863 struct bcm_sysport_priv
*priv
= dev_id
;
865 pm_wakeup_event(&priv
->pdev
->dev
, 0);
870 static struct sk_buff
*bcm_sysport_insert_tsb(struct sk_buff
*skb
,
871 struct net_device
*dev
)
873 struct sk_buff
*nskb
;
880 /* Re-allocate SKB if needed */
881 if (unlikely(skb_headroom(skb
) < sizeof(*tsb
))) {
882 nskb
= skb_realloc_headroom(skb
, sizeof(*tsb
));
885 dev
->stats
.tx_errors
++;
886 dev
->stats
.tx_dropped
++;
892 tsb
= (struct bcm_tsb
*)skb_push(skb
, sizeof(*tsb
));
893 /* Zero-out TSB by default */
894 memset(tsb
, 0, sizeof(*tsb
));
896 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
897 ip_ver
= htons(skb
->protocol
);
900 ip_proto
= ip_hdr(skb
)->protocol
;
903 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
909 /* Get the checksum offset and the L4 (transport) offset */
910 csum_start
= skb_checksum_start_offset(skb
) - sizeof(*tsb
);
911 csum_info
= (csum_start
+ skb
->csum_offset
) & L4_CSUM_PTR_MASK
;
912 csum_info
|= (csum_start
<< L4_PTR_SHIFT
);
914 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
915 csum_info
|= L4_LENGTH_VALID
;
916 if (ip_proto
== IPPROTO_UDP
&& ip_ver
== ETH_P_IP
)
922 tsb
->l4_ptr_dest_map
= csum_info
;
928 static netdev_tx_t
bcm_sysport_xmit(struct sk_buff
*skb
,
929 struct net_device
*dev
)
931 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
932 struct device
*kdev
= &priv
->pdev
->dev
;
933 struct bcm_sysport_tx_ring
*ring
;
934 struct bcm_sysport_cb
*cb
;
935 struct netdev_queue
*txq
;
936 struct dma_desc
*desc
;
937 unsigned int skb_len
;
944 queue
= skb_get_queue_mapping(skb
);
945 txq
= netdev_get_tx_queue(dev
, queue
);
946 ring
= &priv
->tx_rings
[queue
];
948 /* lock against tx reclaim in BH context and TX ring full interrupt */
949 spin_lock_irqsave(&ring
->lock
, flags
);
950 if (unlikely(ring
->desc_count
== 0)) {
951 netif_tx_stop_queue(txq
);
952 netdev_err(dev
, "queue %d awake and ring full!\n", queue
);
953 ret
= NETDEV_TX_BUSY
;
957 /* Insert TSB and checksum infos */
959 skb
= bcm_sysport_insert_tsb(skb
, dev
);
966 /* The Ethernet switch we are interfaced with needs packets to be at
967 * least 64 bytes (including FCS) otherwise they will be discarded when
968 * they enter the switch port logic. When Broadcom tags are enabled, we
969 * need to make sure that packets are at least 68 bytes
970 * (including FCS and tag) because the length verification is done after
971 * the Broadcom tag is stripped off the ingress packet.
973 if (skb_padto(skb
, ETH_ZLEN
+ ENET_BRCM_TAG_LEN
)) {
978 skb_len
= skb
->len
< ETH_ZLEN
+ ENET_BRCM_TAG_LEN
?
979 ETH_ZLEN
+ ENET_BRCM_TAG_LEN
: skb
->len
;
981 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
982 if (dma_mapping_error(kdev
, mapping
)) {
983 priv
->mib
.tx_dma_failed
++;
984 netif_err(priv
, tx_err
, dev
, "DMA map failed at %p (len=%d)\n",
990 /* Remember the SKB for future freeing */
991 cb
= &ring
->cbs
[ring
->curr_desc
];
993 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
994 dma_unmap_len_set(cb
, dma_len
, skb_len
);
996 /* Fetch a descriptor entry from our pool */
997 desc
= ring
->desc_cpu
;
999 desc
->addr_lo
= lower_32_bits(mapping
);
1000 len_status
= upper_32_bits(mapping
) & DESC_ADDR_HI_MASK
;
1001 len_status
|= (skb_len
<< DESC_LEN_SHIFT
);
1002 len_status
|= (DESC_SOP
| DESC_EOP
| TX_STATUS_APP_CRC
) <<
1004 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1005 len_status
|= (DESC_L4_CSUM
<< DESC_STATUS_SHIFT
);
1008 if (ring
->curr_desc
== ring
->size
)
1009 ring
->curr_desc
= 0;
1012 /* Ensure write completion of the descriptor status/length
1013 * in DRAM before the System Port WRITE_PORT register latches
1017 desc
->addr_status_len
= len_status
;
1020 /* Write this descriptor address to the RING write port */
1021 tdma_port_write_desc_addr(priv
, desc
, ring
->index
);
1023 /* Check ring space and update SW control flow */
1024 if (ring
->desc_count
== 0)
1025 netif_tx_stop_queue(txq
);
1027 netif_dbg(priv
, tx_queued
, dev
, "ring=%d desc_count=%d, curr_desc=%d\n",
1028 ring
->index
, ring
->desc_count
, ring
->curr_desc
);
1032 spin_unlock_irqrestore(&ring
->lock
, flags
);
1036 static void bcm_sysport_tx_timeout(struct net_device
*dev
)
1038 netdev_warn(dev
, "transmit timeout!\n");
1040 dev
->trans_start
= jiffies
;
1041 dev
->stats
.tx_errors
++;
1043 netif_tx_wake_all_queues(dev
);
1046 /* phylib adjust link callback */
1047 static void bcm_sysport_adj_link(struct net_device
*dev
)
1049 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1050 struct phy_device
*phydev
= priv
->phydev
;
1051 unsigned int changed
= 0;
1052 u32 cmd_bits
= 0, reg
;
1054 if (priv
->old_link
!= phydev
->link
) {
1056 priv
->old_link
= phydev
->link
;
1059 if (priv
->old_duplex
!= phydev
->duplex
) {
1061 priv
->old_duplex
= phydev
->duplex
;
1064 switch (phydev
->speed
) {
1066 cmd_bits
= CMD_SPEED_2500
;
1069 cmd_bits
= CMD_SPEED_1000
;
1072 cmd_bits
= CMD_SPEED_100
;
1075 cmd_bits
= CMD_SPEED_10
;
1080 cmd_bits
<<= CMD_SPEED_SHIFT
;
1082 if (phydev
->duplex
== DUPLEX_HALF
)
1083 cmd_bits
|= CMD_HD_EN
;
1085 if (priv
->old_pause
!= phydev
->pause
) {
1087 priv
->old_pause
= phydev
->pause
;
1091 cmd_bits
|= CMD_RX_PAUSE_IGNORE
| CMD_TX_PAUSE_IGNORE
;
1097 reg
= umac_readl(priv
, UMAC_CMD
);
1098 reg
&= ~((CMD_SPEED_MASK
<< CMD_SPEED_SHIFT
) |
1099 CMD_HD_EN
| CMD_RX_PAUSE_IGNORE
|
1100 CMD_TX_PAUSE_IGNORE
);
1102 umac_writel(priv
, reg
, UMAC_CMD
);
1105 phy_print_status(priv
->phydev
);
1108 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv
*priv
,
1111 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1112 struct device
*kdev
= &priv
->pdev
->dev
;
1117 /* Simple descriptors partitioning for now */
1120 /* We just need one DMA descriptor which is DMA-able, since writing to
1121 * the port will allocate a new descriptor in its internal linked-list
1123 p
= dma_zalloc_coherent(kdev
, sizeof(struct dma_desc
), &ring
->desc_dma
,
1126 netif_err(priv
, hw
, priv
->netdev
, "DMA alloc failed\n");
1130 ring
->cbs
= kcalloc(size
, sizeof(struct bcm_sysport_cb
), GFP_KERNEL
);
1132 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1136 /* Initialize SW view of the ring */
1137 spin_lock_init(&ring
->lock
);
1139 netif_napi_add(priv
->netdev
, &ring
->napi
, bcm_sysport_tx_poll
, 64);
1140 ring
->index
= index
;
1142 ring
->alloc_size
= ring
->size
;
1144 ring
->desc_count
= ring
->size
;
1145 ring
->curr_desc
= 0;
1147 /* Initialize HW ring */
1148 tdma_writel(priv
, RING_EN
, TDMA_DESC_RING_HEAD_TAIL_PTR(index
));
1149 tdma_writel(priv
, 0, TDMA_DESC_RING_COUNT(index
));
1150 tdma_writel(priv
, 1, TDMA_DESC_RING_INTR_CONTROL(index
));
1151 tdma_writel(priv
, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index
));
1152 tdma_writel(priv
, RING_IGNORE_STATUS
, TDMA_DESC_RING_MAPPING(index
));
1153 tdma_writel(priv
, 0, TDMA_DESC_RING_PCP_DEI_VID(index
));
1155 /* Program the number of descriptors as MAX_THRESHOLD and half of
1156 * its size for the hysteresis trigger
1158 tdma_writel(priv
, ring
->size
|
1159 1 << RING_HYST_THRESH_SHIFT
,
1160 TDMA_DESC_RING_MAX_HYST(index
));
1162 /* Enable the ring queue in the arbiter */
1163 reg
= tdma_readl(priv
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1164 reg
|= (1 << index
);
1165 tdma_writel(priv
, reg
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1167 napi_enable(&ring
->napi
);
1169 netif_dbg(priv
, hw
, priv
->netdev
,
1170 "TDMA cfg, size=%d, desc_cpu=%p\n",
1171 ring
->size
, ring
->desc_cpu
);
1176 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv
*priv
,
1179 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1180 struct device
*kdev
= &priv
->pdev
->dev
;
1183 /* Caller should stop the TDMA engine */
1184 reg
= tdma_readl(priv
, TDMA_STATUS
);
1185 if (!(reg
& TDMA_DISABLED
))
1186 netdev_warn(priv
->netdev
, "TDMA not stopped!\n");
1188 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1189 * fail, so by checking this pointer we know whether the TX ring was
1190 * fully initialized or not.
1195 napi_disable(&ring
->napi
);
1196 netif_napi_del(&ring
->napi
);
1198 bcm_sysport_tx_reclaim(priv
, ring
);
1203 if (ring
->desc_dma
) {
1204 dma_free_coherent(kdev
, sizeof(struct dma_desc
),
1205 ring
->desc_cpu
, ring
->desc_dma
);
1209 ring
->alloc_size
= 0;
1211 netif_dbg(priv
, hw
, priv
->netdev
, "TDMA fini done\n");
1215 static inline int rdma_enable_set(struct bcm_sysport_priv
*priv
,
1216 unsigned int enable
)
1218 unsigned int timeout
= 1000;
1221 reg
= rdma_readl(priv
, RDMA_CONTROL
);
1226 rdma_writel(priv
, reg
, RDMA_CONTROL
);
1228 /* Poll for RMDA disabling completion */
1230 reg
= rdma_readl(priv
, RDMA_STATUS
);
1231 if (!!(reg
& RDMA_DISABLED
) == !enable
)
1233 usleep_range(1000, 2000);
1234 } while (timeout
-- > 0);
1236 netdev_err(priv
->netdev
, "timeout waiting for RDMA to finish\n");
1242 static inline int tdma_enable_set(struct bcm_sysport_priv
*priv
,
1243 unsigned int enable
)
1245 unsigned int timeout
= 1000;
1248 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1253 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1255 /* Poll for TMDA disabling completion */
1257 reg
= tdma_readl(priv
, TDMA_STATUS
);
1258 if (!!(reg
& TDMA_DISABLED
) == !enable
)
1261 usleep_range(1000, 2000);
1262 } while (timeout
-- > 0);
1264 netdev_err(priv
->netdev
, "timeout waiting for TDMA to finish\n");
1269 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv
*priv
)
1274 /* Initialize SW view of the RX ring */
1275 priv
->num_rx_bds
= NUM_RX_DESC
;
1276 priv
->rx_bds
= priv
->base
+ SYS_PORT_RDMA_OFFSET
;
1277 priv
->rx_bd_assign_ptr
= priv
->rx_bds
;
1278 priv
->rx_bd_assign_index
= 0;
1279 priv
->rx_c_index
= 0;
1280 priv
->rx_read_ptr
= 0;
1281 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct bcm_sysport_cb
),
1283 if (!priv
->rx_cbs
) {
1284 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1288 ret
= bcm_sysport_alloc_rx_bufs(priv
);
1290 netif_err(priv
, hw
, priv
->netdev
, "SKB allocation failed\n");
1294 /* Initialize HW, ensure RDMA is disabled */
1295 reg
= rdma_readl(priv
, RDMA_STATUS
);
1296 if (!(reg
& RDMA_DISABLED
))
1297 rdma_enable_set(priv
, 0);
1299 rdma_writel(priv
, 0, RDMA_WRITE_PTR_LO
);
1300 rdma_writel(priv
, 0, RDMA_WRITE_PTR_HI
);
1301 rdma_writel(priv
, 0, RDMA_PROD_INDEX
);
1302 rdma_writel(priv
, 0, RDMA_CONS_INDEX
);
1303 rdma_writel(priv
, priv
->num_rx_bds
<< RDMA_RING_SIZE_SHIFT
|
1304 RX_BUF_LENGTH
, RDMA_RING_BUF_SIZE
);
1305 /* Operate the queue in ring mode */
1306 rdma_writel(priv
, 0, RDMA_START_ADDR_HI
);
1307 rdma_writel(priv
, 0, RDMA_START_ADDR_LO
);
1308 rdma_writel(priv
, 0, RDMA_END_ADDR_HI
);
1309 rdma_writel(priv
, NUM_HW_RX_DESC_WORDS
- 1, RDMA_END_ADDR_LO
);
1311 rdma_writel(priv
, 1, RDMA_MBDONE_INTR
);
1313 netif_dbg(priv
, hw
, priv
->netdev
,
1314 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1315 priv
->num_rx_bds
, priv
->rx_bds
);
1320 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv
*priv
)
1322 struct bcm_sysport_cb
*cb
;
1326 /* Caller should ensure RDMA is disabled */
1327 reg
= rdma_readl(priv
, RDMA_STATUS
);
1328 if (!(reg
& RDMA_DISABLED
))
1329 netdev_warn(priv
->netdev
, "RDMA not stopped!\n");
1331 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1332 cb
= &priv
->rx_cbs
[i
];
1333 if (dma_unmap_addr(cb
, dma_addr
))
1334 dma_unmap_single(&priv
->pdev
->dev
,
1335 dma_unmap_addr(cb
, dma_addr
),
1336 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
1337 bcm_sysport_free_cb(cb
);
1340 kfree(priv
->rx_cbs
);
1341 priv
->rx_cbs
= NULL
;
1343 netif_dbg(priv
, hw
, priv
->netdev
, "RDMA fini done\n");
1346 static void bcm_sysport_set_rx_mode(struct net_device
*dev
)
1348 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1351 reg
= umac_readl(priv
, UMAC_CMD
);
1352 if (dev
->flags
& IFF_PROMISC
)
1355 reg
&= ~CMD_PROMISC
;
1356 umac_writel(priv
, reg
, UMAC_CMD
);
1358 /* No support for ALLMULTI */
1359 if (dev
->flags
& IFF_ALLMULTI
)
1363 static inline void umac_enable_set(struct bcm_sysport_priv
*priv
,
1364 u32 mask
, unsigned int enable
)
1368 reg
= umac_readl(priv
, UMAC_CMD
);
1373 umac_writel(priv
, reg
, UMAC_CMD
);
1375 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1376 * to be processed (1 msec).
1379 usleep_range(1000, 2000);
1382 static inline void umac_reset(struct bcm_sysport_priv
*priv
)
1386 reg
= umac_readl(priv
, UMAC_CMD
);
1387 reg
|= CMD_SW_RESET
;
1388 umac_writel(priv
, reg
, UMAC_CMD
);
1390 reg
= umac_readl(priv
, UMAC_CMD
);
1391 reg
&= ~CMD_SW_RESET
;
1392 umac_writel(priv
, reg
, UMAC_CMD
);
1395 static void umac_set_hw_addr(struct bcm_sysport_priv
*priv
,
1396 unsigned char *addr
)
1398 umac_writel(priv
, (addr
[0] << 24) | (addr
[1] << 16) |
1399 (addr
[2] << 8) | addr
[3], UMAC_MAC0
);
1400 umac_writel(priv
, (addr
[4] << 8) | addr
[5], UMAC_MAC1
);
1403 static void topctrl_flush(struct bcm_sysport_priv
*priv
)
1405 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
1406 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
1408 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
1409 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
1412 static int bcm_sysport_change_mac(struct net_device
*dev
, void *p
)
1414 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1415 struct sockaddr
*addr
= p
;
1417 if (!is_valid_ether_addr(addr
->sa_data
))
1420 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1422 /* interface is disabled, changes to MAC will be reflected on next
1425 if (!netif_running(dev
))
1428 umac_set_hw_addr(priv
, dev
->dev_addr
);
1433 static void bcm_sysport_netif_start(struct net_device
*dev
)
1435 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1438 napi_enable(&priv
->napi
);
1440 /* Enable RX interrupt and TX ring full interrupt */
1441 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1443 phy_start(priv
->phydev
);
1445 /* Enable TX interrupts for the 32 TXQs */
1446 intrl2_1_mask_clear(priv
, 0xffffffff);
1448 /* Last call before we start the real business */
1449 netif_tx_start_all_queues(dev
);
1452 static void rbuf_init(struct bcm_sysport_priv
*priv
)
1456 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
1457 reg
|= RBUF_4B_ALGN
| RBUF_RSB_EN
;
1458 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
1461 static int bcm_sysport_open(struct net_device
*dev
)
1463 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1470 /* Flush TX and RX FIFOs at TOPCTRL level */
1471 topctrl_flush(priv
);
1473 /* Disable the UniMAC RX/TX */
1474 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 0);
1476 /* Enable RBUF 2bytes alignment and Receive Status Block */
1479 /* Set maximum frame length */
1480 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1482 /* Set MAC address */
1483 umac_set_hw_addr(priv
, dev
->dev_addr
);
1485 /* Read CRC forward */
1486 priv
->crc_fwd
= !!(umac_readl(priv
, UMAC_CMD
) & CMD_CRC_FWD
);
1488 priv
->phydev
= of_phy_connect(dev
, priv
->phy_dn
, bcm_sysport_adj_link
,
1489 0, priv
->phy_interface
);
1490 if (!priv
->phydev
) {
1491 netdev_err(dev
, "could not attach to PHY\n");
1495 /* Reset house keeping link status */
1496 priv
->old_duplex
= -1;
1497 priv
->old_link
= -1;
1498 priv
->old_pause
= -1;
1500 /* mask all interrupts and request them */
1501 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_MASK_SET
);
1502 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1503 intrl2_0_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1504 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_MASK_SET
);
1505 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1506 intrl2_1_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1508 ret
= request_irq(priv
->irq0
, bcm_sysport_rx_isr
, 0, dev
->name
, dev
);
1510 netdev_err(dev
, "failed to request RX interrupt\n");
1511 goto out_phy_disconnect
;
1514 ret
= request_irq(priv
->irq1
, bcm_sysport_tx_isr
, 0, dev
->name
, dev
);
1516 netdev_err(dev
, "failed to request TX interrupt\n");
1520 /* Initialize both hardware and software ring */
1521 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1522 ret
= bcm_sysport_init_tx_ring(priv
, i
);
1524 netdev_err(dev
, "failed to initialize TX ring %d\n",
1526 goto out_free_tx_ring
;
1530 /* Initialize linked-list */
1531 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
1533 /* Initialize RX ring */
1534 ret
= bcm_sysport_init_rx_ring(priv
);
1536 netdev_err(dev
, "failed to initialize RX ring\n");
1537 goto out_free_rx_ring
;
1541 ret
= rdma_enable_set(priv
, 1);
1543 goto out_free_rx_ring
;
1546 ret
= tdma_enable_set(priv
, 1);
1548 goto out_clear_rx_int
;
1550 /* Turn on UniMAC TX/RX */
1551 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 1);
1553 bcm_sysport_netif_start(dev
);
1558 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1560 bcm_sysport_fini_rx_ring(priv
);
1562 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1563 bcm_sysport_fini_tx_ring(priv
, i
);
1564 free_irq(priv
->irq1
, dev
);
1566 free_irq(priv
->irq0
, dev
);
1568 phy_disconnect(priv
->phydev
);
1572 static void bcm_sysport_netif_stop(struct net_device
*dev
)
1574 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1576 /* stop all software from updating hardware */
1577 netif_tx_stop_all_queues(dev
);
1578 napi_disable(&priv
->napi
);
1579 phy_stop(priv
->phydev
);
1581 /* mask all interrupts */
1582 intrl2_0_mask_set(priv
, 0xffffffff);
1583 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1584 intrl2_1_mask_set(priv
, 0xffffffff);
1585 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1588 static int bcm_sysport_stop(struct net_device
*dev
)
1590 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1594 bcm_sysport_netif_stop(dev
);
1596 /* Disable UniMAC RX */
1597 umac_enable_set(priv
, CMD_RX_EN
, 0);
1599 ret
= tdma_enable_set(priv
, 0);
1601 netdev_err(dev
, "timeout disabling RDMA\n");
1605 /* Wait for a maximum packet size to be drained */
1606 usleep_range(2000, 3000);
1608 ret
= rdma_enable_set(priv
, 0);
1610 netdev_err(dev
, "timeout disabling TDMA\n");
1614 /* Disable UniMAC TX */
1615 umac_enable_set(priv
, CMD_TX_EN
, 0);
1617 /* Free RX/TX rings SW structures */
1618 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1619 bcm_sysport_fini_tx_ring(priv
, i
);
1620 bcm_sysport_fini_rx_ring(priv
);
1622 free_irq(priv
->irq0
, dev
);
1623 free_irq(priv
->irq1
, dev
);
1625 /* Disconnect from PHY */
1626 phy_disconnect(priv
->phydev
);
1631 static struct ethtool_ops bcm_sysport_ethtool_ops
= {
1632 .get_settings
= bcm_sysport_get_settings
,
1633 .set_settings
= bcm_sysport_set_settings
,
1634 .get_drvinfo
= bcm_sysport_get_drvinfo
,
1635 .get_msglevel
= bcm_sysport_get_msglvl
,
1636 .set_msglevel
= bcm_sysport_set_msglvl
,
1637 .get_link
= ethtool_op_get_link
,
1638 .get_strings
= bcm_sysport_get_strings
,
1639 .get_ethtool_stats
= bcm_sysport_get_stats
,
1640 .get_sset_count
= bcm_sysport_get_sset_count
,
1641 .get_wol
= bcm_sysport_get_wol
,
1642 .set_wol
= bcm_sysport_set_wol
,
1645 static const struct net_device_ops bcm_sysport_netdev_ops
= {
1646 .ndo_start_xmit
= bcm_sysport_xmit
,
1647 .ndo_tx_timeout
= bcm_sysport_tx_timeout
,
1648 .ndo_open
= bcm_sysport_open
,
1649 .ndo_stop
= bcm_sysport_stop
,
1650 .ndo_set_features
= bcm_sysport_set_features
,
1651 .ndo_set_rx_mode
= bcm_sysport_set_rx_mode
,
1652 .ndo_set_mac_address
= bcm_sysport_change_mac
,
1655 #define REV_FMT "v%2x.%02x"
1657 static int bcm_sysport_probe(struct platform_device
*pdev
)
1659 struct bcm_sysport_priv
*priv
;
1660 struct device_node
*dn
;
1661 struct net_device
*dev
;
1662 const void *macaddr
;
1667 dn
= pdev
->dev
.of_node
;
1668 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1670 /* Read the Transmit/Receive Queue properties */
1671 if (of_property_read_u32(dn
, "systemport,num-txq", &txq
))
1672 txq
= TDMA_NUM_RINGS
;
1673 if (of_property_read_u32(dn
, "systemport,num-rxq", &rxq
))
1676 dev
= alloc_etherdev_mqs(sizeof(*priv
), txq
, rxq
);
1680 /* Initialize private members */
1681 priv
= netdev_priv(dev
);
1683 priv
->irq0
= platform_get_irq(pdev
, 0);
1684 priv
->irq1
= platform_get_irq(pdev
, 1);
1685 priv
->wol_irq
= platform_get_irq(pdev
, 2);
1686 if (priv
->irq0
<= 0 || priv
->irq1
<= 0) {
1687 dev_err(&pdev
->dev
, "invalid interrupts\n");
1692 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
1693 if (IS_ERR(priv
->base
)) {
1694 ret
= PTR_ERR(priv
->base
);
1701 priv
->phy_interface
= of_get_phy_mode(dn
);
1702 /* Default to GMII interface mode */
1703 if (priv
->phy_interface
< 0)
1704 priv
->phy_interface
= PHY_INTERFACE_MODE_GMII
;
1706 /* In the case of a fixed PHY, the DT node associated
1707 * to the PHY is the Ethernet MAC DT node.
1709 if (of_phy_is_fixed_link(dn
)) {
1710 ret
= of_phy_register_fixed_link(dn
);
1712 dev_err(&pdev
->dev
, "failed to register fixed PHY\n");
1719 /* Initialize netdevice members */
1720 macaddr
= of_get_mac_address(dn
);
1721 if (!macaddr
|| !is_valid_ether_addr(macaddr
)) {
1722 dev_warn(&pdev
->dev
, "using random Ethernet MAC\n");
1723 random_ether_addr(dev
->dev_addr
);
1725 ether_addr_copy(dev
->dev_addr
, macaddr
);
1728 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1729 dev_set_drvdata(&pdev
->dev
, dev
);
1730 dev
->ethtool_ops
= &bcm_sysport_ethtool_ops
;
1731 dev
->netdev_ops
= &bcm_sysport_netdev_ops
;
1732 netif_napi_add(dev
, &priv
->napi
, bcm_sysport_poll
, 64);
1734 /* HW supported features, none enabled by default */
1735 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
|
1736 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
1738 /* Request the WOL interrupt and advertise suspend if available */
1739 priv
->wol_irq_disabled
= 1;
1740 ret
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
,
1741 bcm_sysport_wol_isr
, 0, dev
->name
, priv
);
1743 device_set_wakeup_capable(&pdev
->dev
, 1);
1745 /* Set the needed headroom once and for all */
1746 BUILD_BUG_ON(sizeof(struct bcm_tsb
) != 8);
1747 dev
->needed_headroom
+= sizeof(struct bcm_tsb
);
1749 /* libphy will adjust the link state accordingly */
1750 netif_carrier_off(dev
);
1752 ret
= register_netdev(dev
);
1754 dev_err(&pdev
->dev
, "failed to register net_device\n");
1758 priv
->rev
= topctrl_readl(priv
, REV_CNTL
) & REV_MASK
;
1759 dev_info(&pdev
->dev
,
1760 "Broadcom SYSTEMPORT" REV_FMT
1761 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1762 (priv
->rev
>> 8) & 0xff, priv
->rev
& 0xff,
1763 priv
->base
, priv
->irq0
, priv
->irq1
, txq
, rxq
);
1771 static int bcm_sysport_remove(struct platform_device
*pdev
)
1773 struct net_device
*dev
= dev_get_drvdata(&pdev
->dev
);
1775 /* Not much to do, ndo_close has been called
1776 * and we use managed allocations
1778 unregister_netdev(dev
);
1780 dev_set_drvdata(&pdev
->dev
, NULL
);
1785 #ifdef CONFIG_PM_SLEEP
1786 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv
*priv
)
1788 struct net_device
*ndev
= priv
->netdev
;
1789 unsigned int timeout
= 1000;
1792 /* Password has already been programmed */
1793 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
1796 if (priv
->wolopts
& WAKE_MAGICSECURE
)
1798 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
1800 /* Make sure RBUF entered WoL mode as result */
1802 reg
= rbuf_readl(priv
, RBUF_STATUS
);
1803 if (reg
& RBUF_WOL_MODE
)
1807 } while (timeout
-- > 0);
1809 /* Do not leave the UniMAC RBUF matching only MPD packets */
1811 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
1813 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
1814 netif_err(priv
, wol
, ndev
, "failed to enter WOL mode\n");
1818 /* UniMAC receive needs to be turned on */
1819 umac_enable_set(priv
, CMD_RX_EN
, 1);
1821 /* Enable the interrupt wake-up source */
1822 intrl2_0_mask_clear(priv
, INTRL2_0_MPD
);
1824 netif_dbg(priv
, wol
, ndev
, "entered WOL mode\n");
1829 static int bcm_sysport_suspend(struct device
*d
)
1831 struct net_device
*dev
= dev_get_drvdata(d
);
1832 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1837 if (!netif_running(dev
))
1840 bcm_sysport_netif_stop(dev
);
1842 phy_suspend(priv
->phydev
);
1844 netif_device_detach(dev
);
1846 /* Disable UniMAC RX */
1847 umac_enable_set(priv
, CMD_RX_EN
, 0);
1849 ret
= rdma_enable_set(priv
, 0);
1851 netdev_err(dev
, "RDMA timeout!\n");
1855 /* Disable RXCHK if enabled */
1856 if (priv
->rx_chk_en
) {
1857 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
1859 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
1864 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
1866 ret
= tdma_enable_set(priv
, 0);
1868 netdev_err(dev
, "TDMA timeout!\n");
1872 /* Wait for a packet boundary */
1873 usleep_range(2000, 3000);
1875 umac_enable_set(priv
, CMD_TX_EN
, 0);
1877 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
1879 /* Free RX/TX rings SW structures */
1880 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1881 bcm_sysport_fini_tx_ring(priv
, i
);
1882 bcm_sysport_fini_rx_ring(priv
);
1884 /* Get prepared for Wake-on-LAN */
1885 if (device_may_wakeup(d
) && priv
->wolopts
)
1886 ret
= bcm_sysport_suspend_to_wol(priv
);
1891 static int bcm_sysport_resume(struct device
*d
)
1893 struct net_device
*dev
= dev_get_drvdata(d
);
1894 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1899 if (!netif_running(dev
))
1904 /* We may have been suspended and never received a WOL event that
1905 * would turn off MPD detection, take care of that now
1907 bcm_sysport_resume_from_wol(priv
);
1909 /* Initialize both hardware and software ring */
1910 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1911 ret
= bcm_sysport_init_tx_ring(priv
, i
);
1913 netdev_err(dev
, "failed to initialize TX ring %d\n",
1915 goto out_free_tx_rings
;
1919 /* Initialize linked-list */
1920 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
1922 /* Initialize RX ring */
1923 ret
= bcm_sysport_init_rx_ring(priv
);
1925 netdev_err(dev
, "failed to initialize RX ring\n");
1926 goto out_free_rx_ring
;
1929 netif_device_attach(dev
);
1931 /* RX pipe enable */
1932 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
1934 ret
= rdma_enable_set(priv
, 1);
1936 netdev_err(dev
, "failed to enable RDMA\n");
1937 goto out_free_rx_ring
;
1941 if (priv
->rx_chk_en
) {
1942 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
1944 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
1949 /* Set maximum frame length */
1950 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1952 /* Set MAC address */
1953 umac_set_hw_addr(priv
, dev
->dev_addr
);
1955 umac_enable_set(priv
, CMD_RX_EN
, 1);
1957 /* TX pipe enable */
1958 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
1960 umac_enable_set(priv
, CMD_TX_EN
, 1);
1962 ret
= tdma_enable_set(priv
, 1);
1964 netdev_err(dev
, "TDMA timeout!\n");
1965 goto out_free_rx_ring
;
1968 phy_resume(priv
->phydev
);
1970 bcm_sysport_netif_start(dev
);
1975 bcm_sysport_fini_rx_ring(priv
);
1977 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1978 bcm_sysport_fini_tx_ring(priv
, i
);
1983 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops
,
1984 bcm_sysport_suspend
, bcm_sysport_resume
);
1986 static const struct of_device_id bcm_sysport_of_match
[] = {
1987 { .compatible
= "brcm,systemport-v1.00" },
1988 { .compatible
= "brcm,systemport" },
1992 static struct platform_driver bcm_sysport_driver
= {
1993 .probe
= bcm_sysport_probe
,
1994 .remove
= bcm_sysport_remove
,
1996 .name
= "brcm-systemport",
1997 .of_match_table
= bcm_sysport_of_match
,
1998 .pm
= &bcm_sysport_pm_ops
,
2001 module_platform_driver(bcm_sysport_driver
);
2003 MODULE_AUTHOR("Broadcom Corporation");
2004 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2005 MODULE_ALIAS("platform:brcm-systemport");
2006 MODULE_LICENSE("GPL");