net: systemport: add suspend and resume support
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bcmsysport.c
CommitLineData
80105bef
FF
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <linux/phy.h>
24#include <linux/phy_fixed.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27
28#include "bcmsysport.h"
29
30/* I/O accessors register helpers */
31#define BCM_SYSPORT_IO_MACRO(name, offset) \
32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
33{ \
34 u32 reg = __raw_readl(priv->base + offset + off); \
35 return reg; \
36} \
37static inline void name##_writel(struct bcm_sysport_priv *priv, \
38 u32 val, u32 off) \
39{ \
40 __raw_writel(val, priv->base + offset + off); \
41} \
42
43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
48BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53
54/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
56 */
57#define BCM_SYSPORT_INTR_L2(which) \
58static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
59 u32 mask) \
60{ \
61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
62 priv->irq##which##_mask &= ~(mask); \
63} \
64static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
65 u32 mask) \
66{ \
67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
68 priv->irq##which##_mask |= (mask); \
69} \
70
71BCM_SYSPORT_INTR_L2(0)
72BCM_SYSPORT_INTR_L2(1)
73
74/* Register accesses to GISB/RBUS registers are expensive (few hundred
75 * nanoseconds), so keep the check for 64-bits explicit here to save
76 * one register write per-packet on 32-bits platforms.
77 */
78static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
79 void __iomem *d,
80 dma_addr_t addr)
81{
82#ifdef CONFIG_PHYS_ADDR_T_64BIT
83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
84 d + DESC_ADDR_HI_STATUS_LEN);
85#endif
86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
87}
88
89static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
90 struct dma_desc *desc,
91 unsigned int port)
92{
93 /* Ports are latched, so write upper address first */
94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
96}
97
98/* Ethtool operations */
99static int bcm_sysport_set_settings(struct net_device *dev,
100 struct ethtool_cmd *cmd)
101{
102 struct bcm_sysport_priv *priv = netdev_priv(dev);
103
104 if (!netif_running(dev))
105 return -EINVAL;
106
107 return phy_ethtool_sset(priv->phydev, cmd);
108}
109
110static int bcm_sysport_get_settings(struct net_device *dev,
111 struct ethtool_cmd *cmd)
112{
113 struct bcm_sysport_priv *priv = netdev_priv(dev);
114
115 if (!netif_running(dev))
116 return -EINVAL;
117
118 return phy_ethtool_gset(priv->phydev, cmd);
119}
120
121static int bcm_sysport_set_rx_csum(struct net_device *dev,
122 netdev_features_t wanted)
123{
124 struct bcm_sysport_priv *priv = netdev_priv(dev);
125 u32 reg;
126
127 priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
128 reg = rxchk_readl(priv, RXCHK_CONTROL);
129 if (priv->rx_csum_en)
130 reg |= RXCHK_EN;
131 else
132 reg &= ~RXCHK_EN;
133
134 /* If UniMAC forwards CRC, we need to skip over it to get
135 * a valid CHK bit to be set in the per-packet status word
136 */
137 if (priv->rx_csum_en && priv->crc_fwd)
138 reg |= RXCHK_SKIP_FCS;
139 else
140 reg &= ~RXCHK_SKIP_FCS;
141
142 rxchk_writel(priv, reg, RXCHK_CONTROL);
143
144 return 0;
145}
146
147static int bcm_sysport_set_tx_csum(struct net_device *dev,
148 netdev_features_t wanted)
149{
150 struct bcm_sysport_priv *priv = netdev_priv(dev);
151 u32 reg;
152
153 /* Hardware transmit checksum requires us to enable the Transmit status
154 * block prepended to the packet contents
155 */
156 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
157 reg = tdma_readl(priv, TDMA_CONTROL);
158 if (priv->tsb_en)
159 reg |= TSB_EN;
160 else
161 reg &= ~TSB_EN;
162 tdma_writel(priv, reg, TDMA_CONTROL);
163
164 return 0;
165}
166
167static int bcm_sysport_set_features(struct net_device *dev,
168 netdev_features_t features)
169{
170 netdev_features_t changed = features ^ dev->features;
171 netdev_features_t wanted = dev->wanted_features;
172 int ret = 0;
173
174 if (changed & NETIF_F_RXCSUM)
175 ret = bcm_sysport_set_rx_csum(dev, wanted);
176 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
177 ret = bcm_sysport_set_tx_csum(dev, wanted);
178
179 return ret;
180}
181
182/* Hardware counters must be kept in sync because the order/offset
183 * is important here (order in structure declaration = order in hardware)
184 */
185static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
186 /* general stats */
187 STAT_NETDEV(rx_packets),
188 STAT_NETDEV(tx_packets),
189 STAT_NETDEV(rx_bytes),
190 STAT_NETDEV(tx_bytes),
191 STAT_NETDEV(rx_errors),
192 STAT_NETDEV(tx_errors),
193 STAT_NETDEV(rx_dropped),
194 STAT_NETDEV(tx_dropped),
195 STAT_NETDEV(multicast),
196 /* UniMAC RSV counters */
197 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
198 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
199 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
200 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
201 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
202 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
203 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
204 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
205 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
206 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
207 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
208 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
209 STAT_MIB_RX("rx_multicast", mib.rx.mca),
210 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
211 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
212 STAT_MIB_RX("rx_control", mib.rx.cf),
213 STAT_MIB_RX("rx_pause", mib.rx.pf),
214 STAT_MIB_RX("rx_unknown", mib.rx.uo),
215 STAT_MIB_RX("rx_align", mib.rx.aln),
216 STAT_MIB_RX("rx_outrange", mib.rx.flr),
217 STAT_MIB_RX("rx_code", mib.rx.cde),
218 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
219 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
220 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
221 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
222 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
223 STAT_MIB_RX("rx_unicast", mib.rx.uc),
224 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
225 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
226 /* UniMAC TSV counters */
227 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
228 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
229 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
230 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
231 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
232 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
233 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
234 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
235 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
236 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
237 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
238 STAT_MIB_TX("tx_multicast", mib.tx.mca),
239 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
240 STAT_MIB_TX("tx_pause", mib.tx.pf),
241 STAT_MIB_TX("tx_control", mib.tx.cf),
242 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
243 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
244 STAT_MIB_TX("tx_defer", mib.tx.drf),
245 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
246 STAT_MIB_TX("tx_single_col", mib.tx.scl),
247 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
248 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
249 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
250 STAT_MIB_TX("tx_frags", mib.tx.frg),
251 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
252 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
253 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
254 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
255 STAT_MIB_TX("tx_unicast", mib.tx.uc),
256 /* UniMAC RUNT counters */
257 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
258 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
259 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
260 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
261 /* RXCHK misc statistics */
262 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
263 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
264 RXCHK_OTHER_DISC_CNTR),
265 /* RBUF misc statistics */
266 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
267 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
268};
269
270#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
271
272static void bcm_sysport_get_drvinfo(struct net_device *dev,
273 struct ethtool_drvinfo *info)
274{
275 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
276 strlcpy(info->version, "0.1", sizeof(info->version));
277 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
278 info->n_stats = BCM_SYSPORT_STATS_LEN;
279}
280
281static u32 bcm_sysport_get_msglvl(struct net_device *dev)
282{
283 struct bcm_sysport_priv *priv = netdev_priv(dev);
284
285 return priv->msg_enable;
286}
287
288static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
289{
290 struct bcm_sysport_priv *priv = netdev_priv(dev);
291
292 priv->msg_enable = enable;
293}
294
295static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
296{
297 switch (string_set) {
298 case ETH_SS_STATS:
299 return BCM_SYSPORT_STATS_LEN;
300 default:
301 return -EOPNOTSUPP;
302 }
303}
304
305static void bcm_sysport_get_strings(struct net_device *dev,
306 u32 stringset, u8 *data)
307{
308 int i;
309
310 switch (stringset) {
311 case ETH_SS_STATS:
312 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
313 memcpy(data + i * ETH_GSTRING_LEN,
314 bcm_sysport_gstrings_stats[i].stat_string,
315 ETH_GSTRING_LEN);
316 }
317 break;
318 default:
319 break;
320 }
321}
322
323static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
324{
325 int i, j = 0;
326
327 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
328 const struct bcm_sysport_stats *s;
329 u8 offset = 0;
330 u32 val = 0;
331 char *p;
332
333 s = &bcm_sysport_gstrings_stats[i];
334 switch (s->type) {
335 case BCM_SYSPORT_STAT_NETDEV:
336 continue;
337 case BCM_SYSPORT_STAT_MIB_RX:
338 case BCM_SYSPORT_STAT_MIB_TX:
339 case BCM_SYSPORT_STAT_RUNT:
340 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
341 offset = UMAC_MIB_STAT_OFFSET;
342 val = umac_readl(priv, UMAC_MIB_START + j + offset);
343 break;
344 case BCM_SYSPORT_STAT_RXCHK:
345 val = rxchk_readl(priv, s->reg_offset);
346 if (val == ~0)
347 rxchk_writel(priv, 0, s->reg_offset);
348 break;
349 case BCM_SYSPORT_STAT_RBUF:
350 val = rbuf_readl(priv, s->reg_offset);
351 if (val == ~0)
352 rbuf_writel(priv, 0, s->reg_offset);
353 break;
354 }
355
356 j += s->stat_sizeof;
357 p = (char *)priv + s->stat_offset;
358 *(u32 *)p = val;
359 }
360
361 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
362}
363
364static void bcm_sysport_get_stats(struct net_device *dev,
365 struct ethtool_stats *stats, u64 *data)
366{
367 struct bcm_sysport_priv *priv = netdev_priv(dev);
368 int i;
369
370 if (netif_running(dev))
371 bcm_sysport_update_mib_counters(priv);
372
373 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
374 const struct bcm_sysport_stats *s;
375 char *p;
376
377 s = &bcm_sysport_gstrings_stats[i];
378 if (s->type == BCM_SYSPORT_STAT_NETDEV)
379 p = (char *)&dev->stats;
380 else
381 p = (char *)priv;
382 p += s->stat_offset;
383 data[i] = *(u32 *)p;
384 }
385}
386
387static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
388{
389 dev_kfree_skb_any(cb->skb);
390 cb->skb = NULL;
391 dma_unmap_addr_set(cb, dma_addr, 0);
392}
393
394static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
395 struct bcm_sysport_cb *cb)
396{
397 struct device *kdev = &priv->pdev->dev;
398 struct net_device *ndev = priv->netdev;
399 dma_addr_t mapping;
400 int ret;
401
402 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
403 if (!cb->skb) {
404 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
405 return -ENOMEM;
406 }
407
408 mapping = dma_map_single(kdev, cb->skb->data,
409 RX_BUF_LENGTH, DMA_FROM_DEVICE);
410 ret = dma_mapping_error(kdev, mapping);
411 if (ret) {
412 bcm_sysport_free_cb(cb);
413 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
414 return ret;
415 }
416
417 dma_unmap_addr_set(cb, dma_addr, mapping);
418 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
419
420 priv->rx_bd_assign_index++;
421 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
422 priv->rx_bd_assign_ptr = priv->rx_bds +
423 (priv->rx_bd_assign_index * DESC_SIZE);
424
425 netif_dbg(priv, rx_status, ndev, "RX refill\n");
426
427 return 0;
428}
429
430static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
431{
432 struct bcm_sysport_cb *cb;
433 int ret = 0;
434 unsigned int i;
435
436 for (i = 0; i < priv->num_rx_bds; i++) {
437 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
438 if (cb->skb)
439 continue;
440
441 ret = bcm_sysport_rx_refill(priv, cb);
442 if (ret)
443 break;
444 }
445
446 return ret;
447}
448
449/* Poll the hardware for up to budget packets to process */
450static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
451 unsigned int budget)
452{
453 struct device *kdev = &priv->pdev->dev;
454 struct net_device *ndev = priv->netdev;
455 unsigned int processed = 0, to_process;
456 struct bcm_sysport_cb *cb;
457 struct sk_buff *skb;
458 unsigned int p_index;
459 u16 len, status;
3afc557d 460 struct bcm_rsb *rsb;
80105bef
FF
461
462 /* Determine how much we should process since last call */
463 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
464 p_index &= RDMA_PROD_INDEX_MASK;
465
466 if (p_index < priv->rx_c_index)
467 to_process = (RDMA_CONS_INDEX_MASK + 1) -
468 priv->rx_c_index + p_index;
469 else
470 to_process = p_index - priv->rx_c_index;
471
472 netif_dbg(priv, rx_status, ndev,
473 "p_index=%d rx_c_index=%d to_process=%d\n",
474 p_index, priv->rx_c_index, to_process);
475
476 while ((processed < to_process) &&
477 (processed < budget)) {
478
479 cb = &priv->rx_cbs[priv->rx_read_ptr];
480 skb = cb->skb;
481 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
b1ff53e9 482 RX_BUF_LENGTH, DMA_FROM_DEVICE);
80105bef
FF
483
484 /* Extract the Receive Status Block prepended */
3afc557d 485 rsb = (struct bcm_rsb *)skb->data;
80105bef
FF
486 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
487 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
488 DESC_STATUS_MASK;
489
490 processed++;
491 priv->rx_read_ptr++;
492 if (priv->rx_read_ptr == priv->num_rx_bds)
493 priv->rx_read_ptr = 0;
494
495 netif_dbg(priv, rx_status, ndev,
496 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
497 p_index, priv->rx_c_index, priv->rx_read_ptr,
498 len, status);
499
500 if (unlikely(!skb)) {
501 netif_err(priv, rx_err, ndev, "out of memory!\n");
502 ndev->stats.rx_dropped++;
503 ndev->stats.rx_errors++;
504 goto refill;
505 }
506
507 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
508 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
509 ndev->stats.rx_dropped++;
510 ndev->stats.rx_errors++;
511 bcm_sysport_free_cb(cb);
512 goto refill;
513 }
514
515 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
516 netif_err(priv, rx_err, ndev, "error packet\n");
ad51c610 517 if (status & RX_STATUS_OVFLOW)
80105bef
FF
518 ndev->stats.rx_over_errors++;
519 ndev->stats.rx_dropped++;
520 ndev->stats.rx_errors++;
521 bcm_sysport_free_cb(cb);
522 goto refill;
523 }
524
525 skb_put(skb, len);
526
527 /* Hardware validated our checksum */
528 if (likely(status & DESC_L4_CSUM))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
530
e0ea05d0
FF
531 /* Hardware pre-pends packets with 2bytes before Ethernet
532 * header plus we have the Receive Status Block, strip off all
533 * of this from the SKB.
80105bef
FF
534 */
535 skb_pull(skb, sizeof(*rsb) + 2);
536 len -= (sizeof(*rsb) + 2);
537
538 /* UniMAC may forward CRC */
539 if (priv->crc_fwd) {
540 skb_trim(skb, len - ETH_FCS_LEN);
541 len -= ETH_FCS_LEN;
542 }
543
544 skb->protocol = eth_type_trans(skb, ndev);
545 ndev->stats.rx_packets++;
546 ndev->stats.rx_bytes += len;
547
548 napi_gro_receive(&priv->napi, skb);
549refill:
550 bcm_sysport_rx_refill(priv, cb);
551 }
552
553 return processed;
554}
555
556static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
557 struct bcm_sysport_cb *cb,
558 unsigned int *bytes_compl,
559 unsigned int *pkts_compl)
560{
561 struct device *kdev = &priv->pdev->dev;
562 struct net_device *ndev = priv->netdev;
563
564 if (cb->skb) {
565 ndev->stats.tx_bytes += cb->skb->len;
566 *bytes_compl += cb->skb->len;
567 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
568 dma_unmap_len(cb, dma_len),
569 DMA_TO_DEVICE);
570 ndev->stats.tx_packets++;
571 (*pkts_compl)++;
572 bcm_sysport_free_cb(cb);
573 /* SKB fragment */
574 } else if (dma_unmap_addr(cb, dma_addr)) {
575 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
576 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
577 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
578 dma_unmap_addr_set(cb, dma_addr, 0);
579 }
580}
581
582/* Reclaim queued SKBs for transmission completion, lockless version */
583static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
584 struct bcm_sysport_tx_ring *ring)
585{
586 struct net_device *ndev = priv->netdev;
587 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
588 unsigned int pkts_compl = 0, bytes_compl = 0;
589 struct bcm_sysport_cb *cb;
590 struct netdev_queue *txq;
591 u32 hw_ind;
592
593 txq = netdev_get_tx_queue(ndev, ring->index);
594
595 /* Compute how many descriptors have been processed since last call */
596 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
597 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
598 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
599
600 last_c_index = ring->c_index;
601 num_tx_cbs = ring->size;
602
603 c_index &= (num_tx_cbs - 1);
604
605 if (c_index >= last_c_index)
606 last_tx_cn = c_index - last_c_index;
607 else
608 last_tx_cn = num_tx_cbs - last_c_index + c_index;
609
610 netif_dbg(priv, tx_done, ndev,
611 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
612 ring->index, c_index, last_tx_cn, last_c_index);
613
614 while (last_tx_cn-- > 0) {
615 cb = ring->cbs + last_c_index;
616 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
617
618 ring->desc_count++;
619 last_c_index++;
620 last_c_index &= (num_tx_cbs - 1);
621 }
622
623 ring->c_index = c_index;
624
625 if (netif_tx_queue_stopped(txq) && pkts_compl)
626 netif_tx_wake_queue(txq);
627
628 netif_dbg(priv, tx_done, ndev,
629 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
630 ring->index, ring->c_index, pkts_compl, bytes_compl);
631
632 return pkts_compl;
633}
634
635/* Locked version of the per-ring TX reclaim routine */
636static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
637 struct bcm_sysport_tx_ring *ring)
638{
639 unsigned int released;
d8498088 640 unsigned long flags;
80105bef 641
d8498088 642 spin_lock_irqsave(&ring->lock, flags);
80105bef 643 released = __bcm_sysport_tx_reclaim(priv, ring);
d8498088 644 spin_unlock_irqrestore(&ring->lock, flags);
80105bef
FF
645
646 return released;
647}
648
649static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
650{
651 struct bcm_sysport_tx_ring *ring =
652 container_of(napi, struct bcm_sysport_tx_ring, napi);
653 unsigned int work_done = 0;
654
655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
656
657 if (work_done < budget) {
658 napi_complete(napi);
659 /* re-enable TX interrupt */
660 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
661 }
662
663 return work_done;
664}
665
666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
667{
668 unsigned int q;
669
670 for (q = 0; q < priv->netdev->num_tx_queues; q++)
671 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
672}
673
674static int bcm_sysport_poll(struct napi_struct *napi, int budget)
675{
676 struct bcm_sysport_priv *priv =
677 container_of(napi, struct bcm_sysport_priv, napi);
678 unsigned int work_done = 0;
679
680 work_done = bcm_sysport_desc_rx(priv, budget);
681
682 priv->rx_c_index += work_done;
683 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
684 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
685
686 if (work_done < budget) {
687 napi_complete(napi);
688 /* re-enable RX interrupts */
689 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
690 }
691
692 return work_done;
693}
694
695
696/* RX and misc interrupt routine */
697static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
698{
699 struct net_device *dev = dev_id;
700 struct bcm_sysport_priv *priv = netdev_priv(dev);
701
702 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
703 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
704 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
705
706 if (unlikely(priv->irq0_stat == 0)) {
707 netdev_warn(priv->netdev, "spurious RX interrupt\n");
708 return IRQ_NONE;
709 }
710
711 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
712 if (likely(napi_schedule_prep(&priv->napi))) {
713 /* disable RX interrupts */
714 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
715 __napi_schedule(&priv->napi);
716 }
717 }
718
719 /* TX ring is full, perform a full reclaim since we do not know
720 * which one would trigger this interrupt
721 */
722 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
723 bcm_sysport_tx_reclaim_all(priv);
724
725 return IRQ_HANDLED;
726}
727
728/* TX interrupt service routine */
729static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
730{
731 struct net_device *dev = dev_id;
732 struct bcm_sysport_priv *priv = netdev_priv(dev);
733 struct bcm_sysport_tx_ring *txr;
734 unsigned int ring;
735
736 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
737 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
738 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
739
740 if (unlikely(priv->irq1_stat == 0)) {
741 netdev_warn(priv->netdev, "spurious TX interrupt\n");
742 return IRQ_NONE;
743 }
744
745 for (ring = 0; ring < dev->num_tx_queues; ring++) {
746 if (!(priv->irq1_stat & BIT(ring)))
747 continue;
748
749 txr = &priv->tx_rings[ring];
750
751 if (likely(napi_schedule_prep(&txr->napi))) {
752 intrl2_1_mask_set(priv, BIT(ring));
753 __napi_schedule(&txr->napi);
754 }
755 }
756
757 return IRQ_HANDLED;
758}
759
760static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
761{
762 struct sk_buff *nskb;
3afc557d 763 struct bcm_tsb *tsb;
80105bef
FF
764 u32 csum_info;
765 u8 ip_proto;
766 u16 csum_start;
767 u16 ip_ver;
768
769 /* Re-allocate SKB if needed */
770 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
771 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
772 dev_kfree_skb(skb);
773 if (!nskb) {
774 dev->stats.tx_errors++;
775 dev->stats.tx_dropped++;
776 return -ENOMEM;
777 }
778 skb = nskb;
779 }
780
3afc557d 781 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
80105bef
FF
782 /* Zero-out TSB by default */
783 memset(tsb, 0, sizeof(*tsb));
784
785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
786 ip_ver = htons(skb->protocol);
787 switch (ip_ver) {
788 case ETH_P_IP:
789 ip_proto = ip_hdr(skb)->protocol;
790 break;
791 case ETH_P_IPV6:
792 ip_proto = ipv6_hdr(skb)->nexthdr;
793 break;
794 default:
795 return 0;
796 }
797
798 /* Get the checksum offset and the L4 (transport) offset */
799 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
800 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
801 csum_info |= (csum_start << L4_PTR_SHIFT);
802
803 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
804 csum_info |= L4_LENGTH_VALID;
805 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
806 csum_info |= L4_UDP;
807 } else
808 csum_info = 0;
809
810 tsb->l4_ptr_dest_map = csum_info;
811 }
812
813 return 0;
814}
815
816static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
817 struct net_device *dev)
818{
819 struct bcm_sysport_priv *priv = netdev_priv(dev);
820 struct device *kdev = &priv->pdev->dev;
821 struct bcm_sysport_tx_ring *ring;
822 struct bcm_sysport_cb *cb;
823 struct netdev_queue *txq;
824 struct dma_desc *desc;
dab531b4 825 unsigned int skb_len;
d8498088 826 unsigned long flags;
80105bef
FF
827 dma_addr_t mapping;
828 u32 len_status;
829 u16 queue;
830 int ret;
831
832 queue = skb_get_queue_mapping(skb);
833 txq = netdev_get_tx_queue(dev, queue);
834 ring = &priv->tx_rings[queue];
835
d8498088
FF
836 /* lock against tx reclaim in BH context and TX ring full interrupt */
837 spin_lock_irqsave(&ring->lock, flags);
80105bef
FF
838 if (unlikely(ring->desc_count == 0)) {
839 netif_tx_stop_queue(txq);
840 netdev_err(dev, "queue %d awake and ring full!\n", queue);
841 ret = NETDEV_TX_BUSY;
842 goto out;
843 }
844
845 /* Insert TSB and checksum infos */
846 if (priv->tsb_en) {
847 ret = bcm_sysport_insert_tsb(skb, dev);
848 if (ret) {
849 ret = NETDEV_TX_OK;
850 goto out;
851 }
852 }
853
dab531b4
FF
854 /* The Ethernet switch we are interfaced with needs packets to be at
855 * least 64 bytes (including FCS) otherwise they will be discarded when
856 * they enter the switch port logic. When Broadcom tags are enabled, we
857 * need to make sure that packets are at least 68 bytes
858 * (including FCS and tag) because the length verification is done after
859 * the Broadcom tag is stripped off the ingress packet.
860 */
861 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
862 ret = NETDEV_TX_OK;
863 goto out;
864 }
865
866 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
867 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
868
869 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
80105bef
FF
870 if (dma_mapping_error(kdev, mapping)) {
871 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
dab531b4 872 skb->data, skb_len);
80105bef
FF
873 ret = NETDEV_TX_OK;
874 goto out;
875 }
876
877 /* Remember the SKB for future freeing */
878 cb = &ring->cbs[ring->curr_desc];
879 cb->skb = skb;
880 dma_unmap_addr_set(cb, dma_addr, mapping);
dab531b4 881 dma_unmap_len_set(cb, dma_len, skb_len);
80105bef
FF
882
883 /* Fetch a descriptor entry from our pool */
884 desc = ring->desc_cpu;
885
886 desc->addr_lo = lower_32_bits(mapping);
887 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
dab531b4 888 len_status |= (skb_len << DESC_LEN_SHIFT);
80105bef
FF
889 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
890 DESC_STATUS_SHIFT;
891 if (skb->ip_summed == CHECKSUM_PARTIAL)
892 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
893
894 ring->curr_desc++;
895 if (ring->curr_desc == ring->size)
896 ring->curr_desc = 0;
897 ring->desc_count--;
898
899 /* Ensure write completion of the descriptor status/length
900 * in DRAM before the System Port WRITE_PORT register latches
901 * the value
902 */
903 wmb();
904 desc->addr_status_len = len_status;
905 wmb();
906
907 /* Write this descriptor address to the RING write port */
908 tdma_port_write_desc_addr(priv, desc, ring->index);
909
910 /* Check ring space and update SW control flow */
911 if (ring->desc_count == 0)
912 netif_tx_stop_queue(txq);
913
914 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
915 ring->index, ring->desc_count, ring->curr_desc);
916
917 ret = NETDEV_TX_OK;
918out:
d8498088 919 spin_unlock_irqrestore(&ring->lock, flags);
80105bef
FF
920 return ret;
921}
922
923static void bcm_sysport_tx_timeout(struct net_device *dev)
924{
925 netdev_warn(dev, "transmit timeout!\n");
926
927 dev->trans_start = jiffies;
928 dev->stats.tx_errors++;
929
930 netif_tx_wake_all_queues(dev);
931}
932
933/* phylib adjust link callback */
934static void bcm_sysport_adj_link(struct net_device *dev)
935{
936 struct bcm_sysport_priv *priv = netdev_priv(dev);
937 struct phy_device *phydev = priv->phydev;
938 unsigned int changed = 0;
939 u32 cmd_bits = 0, reg;
940
941 if (priv->old_link != phydev->link) {
942 changed = 1;
943 priv->old_link = phydev->link;
944 }
945
946 if (priv->old_duplex != phydev->duplex) {
947 changed = 1;
948 priv->old_duplex = phydev->duplex;
949 }
950
951 switch (phydev->speed) {
952 case SPEED_2500:
953 cmd_bits = CMD_SPEED_2500;
954 break;
955 case SPEED_1000:
956 cmd_bits = CMD_SPEED_1000;
957 break;
958 case SPEED_100:
959 cmd_bits = CMD_SPEED_100;
960 break;
961 case SPEED_10:
962 cmd_bits = CMD_SPEED_10;
963 break;
964 default:
965 break;
966 }
967 cmd_bits <<= CMD_SPEED_SHIFT;
968
969 if (phydev->duplex == DUPLEX_HALF)
970 cmd_bits |= CMD_HD_EN;
971
972 if (priv->old_pause != phydev->pause) {
973 changed = 1;
974 priv->old_pause = phydev->pause;
975 }
976
977 if (!phydev->pause)
978 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
979
d5e32cc7
FF
980 if (changed) {
981 reg = umac_readl(priv, UMAC_CMD);
982 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
80105bef
FF
983 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
984 CMD_TX_PAUSE_IGNORE);
d5e32cc7
FF
985 reg |= cmd_bits;
986 umac_writel(priv, reg, UMAC_CMD);
80105bef 987
80105bef 988 phy_print_status(priv->phydev);
d5e32cc7 989 }
80105bef
FF
990}
991
992static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
993 unsigned int index)
994{
995 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
996 struct device *kdev = &priv->pdev->dev;
997 size_t size;
998 void *p;
999 u32 reg;
1000
1001 /* Simple descriptors partitioning for now */
1002 size = 256;
1003
1004 /* We just need one DMA descriptor which is DMA-able, since writing to
1005 * the port will allocate a new descriptor in its internal linked-list
1006 */
1007 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
1008 if (!p) {
1009 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1010 return -ENOMEM;
1011 }
1012
1013 ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
1014 if (!ring->cbs) {
1015 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1016 return -ENOMEM;
1017 }
1018
1019 /* Initialize SW view of the ring */
1020 spin_lock_init(&ring->lock);
1021 ring->priv = priv;
1022 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1023 ring->index = index;
1024 ring->size = size;
1025 ring->alloc_size = ring->size;
1026 ring->desc_cpu = p;
1027 ring->desc_count = ring->size;
1028 ring->curr_desc = 0;
1029
1030 /* Initialize HW ring */
1031 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1032 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1033 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1034 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1035 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1036 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1037
1038 /* Program the number of descriptors as MAX_THRESHOLD and half of
1039 * its size for the hysteresis trigger
1040 */
1041 tdma_writel(priv, ring->size |
1042 1 << RING_HYST_THRESH_SHIFT,
1043 TDMA_DESC_RING_MAX_HYST(index));
1044
1045 /* Enable the ring queue in the arbiter */
1046 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1047 reg |= (1 << index);
1048 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1049
1050 napi_enable(&ring->napi);
1051
1052 netif_dbg(priv, hw, priv->netdev,
1053 "TDMA cfg, size=%d, desc_cpu=%p\n",
1054 ring->size, ring->desc_cpu);
1055
1056 return 0;
1057}
1058
1059static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1060 unsigned int index)
1061{
1062 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1063 struct device *kdev = &priv->pdev->dev;
1064 u32 reg;
1065
1066 /* Caller should stop the TDMA engine */
1067 reg = tdma_readl(priv, TDMA_STATUS);
1068 if (!(reg & TDMA_DISABLED))
1069 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1070
1071 napi_disable(&ring->napi);
1072 netif_napi_del(&ring->napi);
1073
1074 bcm_sysport_tx_reclaim(priv, ring);
1075
1076 kfree(ring->cbs);
1077 ring->cbs = NULL;
1078
1079 if (ring->desc_dma) {
1080 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
1081 ring->desc_dma = 0;
1082 }
1083 ring->size = 0;
1084 ring->alloc_size = 0;
1085
1086 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1087}
1088
1089/* RDMA helper */
1090static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1091 unsigned int enable)
1092{
1093 unsigned int timeout = 1000;
1094 u32 reg;
1095
1096 reg = rdma_readl(priv, RDMA_CONTROL);
1097 if (enable)
1098 reg |= RDMA_EN;
1099 else
1100 reg &= ~RDMA_EN;
1101 rdma_writel(priv, reg, RDMA_CONTROL);
1102
1103 /* Poll for RMDA disabling completion */
1104 do {
1105 reg = rdma_readl(priv, RDMA_STATUS);
1106 if (!!(reg & RDMA_DISABLED) == !enable)
1107 return 0;
1108 usleep_range(1000, 2000);
1109 } while (timeout-- > 0);
1110
1111 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1112
1113 return -ETIMEDOUT;
1114}
1115
1116/* TDMA helper */
1117static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1118 unsigned int enable)
1119{
1120 unsigned int timeout = 1000;
1121 u32 reg;
1122
1123 reg = tdma_readl(priv, TDMA_CONTROL);
1124 if (enable)
1125 reg |= TDMA_EN;
1126 else
1127 reg &= ~TDMA_EN;
1128 tdma_writel(priv, reg, TDMA_CONTROL);
1129
1130 /* Poll for TMDA disabling completion */
1131 do {
1132 reg = tdma_readl(priv, TDMA_STATUS);
1133 if (!!(reg & TDMA_DISABLED) == !enable)
1134 return 0;
1135
1136 usleep_range(1000, 2000);
1137 } while (timeout-- > 0);
1138
1139 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1140
1141 return -ETIMEDOUT;
1142}
1143
1144static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1145{
1146 u32 reg;
1147 int ret;
1148
1149 /* Initialize SW view of the RX ring */
1150 priv->num_rx_bds = NUM_RX_DESC;
1151 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1152 priv->rx_bd_assign_ptr = priv->rx_bds;
1153 priv->rx_bd_assign_index = 0;
1154 priv->rx_c_index = 0;
1155 priv->rx_read_ptr = 0;
1156 priv->rx_cbs = kzalloc(priv->num_rx_bds *
1157 sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1158 if (!priv->rx_cbs) {
1159 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1160 return -ENOMEM;
1161 }
1162
1163 ret = bcm_sysport_alloc_rx_bufs(priv);
1164 if (ret) {
1165 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1166 return ret;
1167 }
1168
1169 /* Initialize HW, ensure RDMA is disabled */
1170 reg = rdma_readl(priv, RDMA_STATUS);
1171 if (!(reg & RDMA_DISABLED))
1172 rdma_enable_set(priv, 0);
1173
1174 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1175 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1176 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1177 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1178 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1179 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1180 /* Operate the queue in ring mode */
1181 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1182 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1183 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1184 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
1185
1186 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1187
1188 netif_dbg(priv, hw, priv->netdev,
1189 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1190 priv->num_rx_bds, priv->rx_bds);
1191
1192 return 0;
1193}
1194
1195static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1196{
1197 struct bcm_sysport_cb *cb;
1198 unsigned int i;
1199 u32 reg;
1200
1201 /* Caller should ensure RDMA is disabled */
1202 reg = rdma_readl(priv, RDMA_STATUS);
1203 if (!(reg & RDMA_DISABLED))
1204 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1205
1206 for (i = 0; i < priv->num_rx_bds; i++) {
1207 cb = &priv->rx_cbs[i];
1208 if (dma_unmap_addr(cb, dma_addr))
1209 dma_unmap_single(&priv->pdev->dev,
1210 dma_unmap_addr(cb, dma_addr),
1211 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1212 bcm_sysport_free_cb(cb);
1213 }
1214
1215 kfree(priv->rx_cbs);
1216 priv->rx_cbs = NULL;
1217
1218 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1219}
1220
1221static void bcm_sysport_set_rx_mode(struct net_device *dev)
1222{
1223 struct bcm_sysport_priv *priv = netdev_priv(dev);
1224 u32 reg;
1225
1226 reg = umac_readl(priv, UMAC_CMD);
1227 if (dev->flags & IFF_PROMISC)
1228 reg |= CMD_PROMISC;
1229 else
1230 reg &= ~CMD_PROMISC;
1231 umac_writel(priv, reg, UMAC_CMD);
1232
1233 /* No support for ALLMULTI */
1234 if (dev->flags & IFF_ALLMULTI)
1235 return;
1236}
1237
1238static inline void umac_enable_set(struct bcm_sysport_priv *priv,
18e21b01 1239 u32 mask, unsigned int enable)
80105bef
FF
1240{
1241 u32 reg;
1242
1243 reg = umac_readl(priv, UMAC_CMD);
1244 if (enable)
18e21b01 1245 reg |= mask;
80105bef 1246 else
18e21b01 1247 reg &= ~mask;
80105bef 1248 umac_writel(priv, reg, UMAC_CMD);
00b91c69
FF
1249
1250 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1251 * to be processed (1 msec).
1252 */
1253 if (enable == 0)
1254 usleep_range(1000, 2000);
80105bef
FF
1255}
1256
1257static inline int umac_reset(struct bcm_sysport_priv *priv)
1258{
1259 unsigned int timeout = 0;
1260 u32 reg;
1261 int ret = 0;
1262
1263 umac_writel(priv, 0, UMAC_CMD);
1264 while (timeout++ < 1000) {
1265 reg = umac_readl(priv, UMAC_CMD);
1266 if (!(reg & CMD_SW_RESET))
1267 break;
1268
1269 udelay(1);
1270 }
1271
1272 if (timeout == 1000) {
1273 dev_err(&priv->pdev->dev,
1274 "timeout waiting for MAC to come out of reset\n");
1275 ret = -ETIMEDOUT;
1276 }
1277
1278 return ret;
1279}
1280
1281static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1282 unsigned char *addr)
1283{
1284 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1285 (addr[2] << 8) | addr[3], UMAC_MAC0);
1286 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1287}
1288
1289static void topctrl_flush(struct bcm_sysport_priv *priv)
1290{
1291 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1292 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1293 mdelay(1);
1294 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1295 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1296}
1297
b02e6d9b
FF
1298static void bcm_sysport_netif_start(struct net_device *dev)
1299{
1300 struct bcm_sysport_priv *priv = netdev_priv(dev);
1301
1302 /* Enable NAPI */
1303 napi_enable(&priv->napi);
1304
1305 phy_start(priv->phydev);
1306
1307 /* Enable TX interrupts for the 32 TXQs */
1308 intrl2_1_mask_clear(priv, 0xffffffff);
1309
1310 /* Last call before we start the real business */
1311 netif_tx_start_all_queues(dev);
1312}
1313
40755a0f
FF
1314static void rbuf_init(struct bcm_sysport_priv *priv)
1315{
1316 u32 reg;
1317
1318 reg = rbuf_readl(priv, RBUF_CONTROL);
1319 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1320 rbuf_writel(priv, reg, RBUF_CONTROL);
1321}
1322
80105bef
FF
1323static int bcm_sysport_open(struct net_device *dev)
1324{
1325 struct bcm_sysport_priv *priv = netdev_priv(dev);
1326 unsigned int i;
80105bef
FF
1327 int ret;
1328
1329 /* Reset UniMAC */
1330 ret = umac_reset(priv);
1331 if (ret) {
1332 netdev_err(dev, "UniMAC reset failed\n");
1333 return ret;
1334 }
1335
1336 /* Flush TX and RX FIFOs at TOPCTRL level */
1337 topctrl_flush(priv);
1338
1339 /* Disable the UniMAC RX/TX */
18e21b01 1340 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
80105bef
FF
1341
1342 /* Enable RBUF 2bytes alignment and Receive Status Block */
40755a0f 1343 rbuf_init(priv);
80105bef
FF
1344
1345 /* Set maximum frame length */
1346 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1347
1348 /* Set MAC address */
1349 umac_set_hw_addr(priv, dev->dev_addr);
1350
1351 /* Read CRC forward */
1352 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1353
186534a3
FF
1354 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1355 0, priv->phy_interface);
80105bef
FF
1356 if (!priv->phydev) {
1357 netdev_err(dev, "could not attach to PHY\n");
1358 return -ENODEV;
1359 }
1360
1361 /* Reset house keeping link status */
1362 priv->old_duplex = -1;
1363 priv->old_link = -1;
1364 priv->old_pause = -1;
1365
1366 /* mask all interrupts and request them */
1367 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1368 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1369 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1370 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1371 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1372 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1373
1374 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1375 if (ret) {
1376 netdev_err(dev, "failed to request RX interrupt\n");
1377 goto out_phy_disconnect;
1378 }
1379
1380 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
1381 if (ret) {
1382 netdev_err(dev, "failed to request TX interrupt\n");
1383 goto out_free_irq0;
1384 }
1385
1386 /* Initialize both hardware and software ring */
1387 for (i = 0; i < dev->num_tx_queues; i++) {
1388 ret = bcm_sysport_init_tx_ring(priv, i);
1389 if (ret) {
1390 netdev_err(dev, "failed to initialize TX ring %d\n",
1391 i);
1392 goto out_free_tx_ring;
1393 }
1394 }
1395
1396 /* Initialize linked-list */
1397 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1398
1399 /* Initialize RX ring */
1400 ret = bcm_sysport_init_rx_ring(priv);
1401 if (ret) {
1402 netdev_err(dev, "failed to initialize RX ring\n");
1403 goto out_free_rx_ring;
1404 }
1405
1406 /* Turn on RDMA */
1407 ret = rdma_enable_set(priv, 1);
1408 if (ret)
1409 goto out_free_rx_ring;
1410
1411 /* Enable RX interrupt and TX ring full interrupt */
1412 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1413
1414 /* Turn on TDMA */
1415 ret = tdma_enable_set(priv, 1);
1416 if (ret)
1417 goto out_clear_rx_int;
1418
80105bef 1419 /* Turn on UniMAC TX/RX */
18e21b01 1420 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
80105bef 1421
b02e6d9b 1422 bcm_sysport_netif_start(dev);
80105bef
FF
1423
1424 return 0;
1425
1426out_clear_rx_int:
1427 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1428out_free_rx_ring:
1429 bcm_sysport_fini_rx_ring(priv);
1430out_free_tx_ring:
1431 for (i = 0; i < dev->num_tx_queues; i++)
1432 bcm_sysport_fini_tx_ring(priv, i);
1433 free_irq(priv->irq1, dev);
1434out_free_irq0:
1435 free_irq(priv->irq0, dev);
1436out_phy_disconnect:
1437 phy_disconnect(priv->phydev);
1438 return ret;
1439}
1440
b02e6d9b 1441static void bcm_sysport_netif_stop(struct net_device *dev)
80105bef
FF
1442{
1443 struct bcm_sysport_priv *priv = netdev_priv(dev);
80105bef
FF
1444
1445 /* stop all software from updating hardware */
1446 netif_tx_stop_all_queues(dev);
1447 napi_disable(&priv->napi);
1448 phy_stop(priv->phydev);
1449
1450 /* mask all interrupts */
1451 intrl2_0_mask_set(priv, 0xffffffff);
1452 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1453 intrl2_1_mask_set(priv, 0xffffffff);
1454 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
b02e6d9b
FF
1455}
1456
1457static int bcm_sysport_stop(struct net_device *dev)
1458{
1459 struct bcm_sysport_priv *priv = netdev_priv(dev);
1460 unsigned int i;
1461 int ret;
1462
1463 bcm_sysport_netif_stop(dev);
80105bef
FF
1464
1465 /* Disable UniMAC RX */
18e21b01 1466 umac_enable_set(priv, CMD_RX_EN, 0);
80105bef
FF
1467
1468 ret = tdma_enable_set(priv, 0);
1469 if (ret) {
1470 netdev_err(dev, "timeout disabling RDMA\n");
1471 return ret;
1472 }
1473
1474 /* Wait for a maximum packet size to be drained */
1475 usleep_range(2000, 3000);
1476
1477 ret = rdma_enable_set(priv, 0);
1478 if (ret) {
1479 netdev_err(dev, "timeout disabling TDMA\n");
1480 return ret;
1481 }
1482
1483 /* Disable UniMAC TX */
18e21b01 1484 umac_enable_set(priv, CMD_TX_EN, 0);
80105bef
FF
1485
1486 /* Free RX/TX rings SW structures */
1487 for (i = 0; i < dev->num_tx_queues; i++)
1488 bcm_sysport_fini_tx_ring(priv, i);
1489 bcm_sysport_fini_rx_ring(priv);
1490
1491 free_irq(priv->irq0, dev);
1492 free_irq(priv->irq1, dev);
1493
1494 /* Disconnect from PHY */
1495 phy_disconnect(priv->phydev);
1496
1497 return 0;
1498}
1499
1500static struct ethtool_ops bcm_sysport_ethtool_ops = {
1501 .get_settings = bcm_sysport_get_settings,
1502 .set_settings = bcm_sysport_set_settings,
1503 .get_drvinfo = bcm_sysport_get_drvinfo,
1504 .get_msglevel = bcm_sysport_get_msglvl,
1505 .set_msglevel = bcm_sysport_set_msglvl,
1506 .get_link = ethtool_op_get_link,
1507 .get_strings = bcm_sysport_get_strings,
1508 .get_ethtool_stats = bcm_sysport_get_stats,
1509 .get_sset_count = bcm_sysport_get_sset_count,
1510};
1511
1512static const struct net_device_ops bcm_sysport_netdev_ops = {
1513 .ndo_start_xmit = bcm_sysport_xmit,
1514 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1515 .ndo_open = bcm_sysport_open,
1516 .ndo_stop = bcm_sysport_stop,
1517 .ndo_set_features = bcm_sysport_set_features,
1518 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
1519};
1520
1521#define REV_FMT "v%2x.%02x"
1522
1523static int bcm_sysport_probe(struct platform_device *pdev)
1524{
1525 struct bcm_sysport_priv *priv;
1526 struct device_node *dn;
1527 struct net_device *dev;
1528 const void *macaddr;
1529 struct resource *r;
1530 u32 txq, rxq;
1531 int ret;
1532
1533 dn = pdev->dev.of_node;
1534 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1535
1536 /* Read the Transmit/Receive Queue properties */
1537 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1538 txq = TDMA_NUM_RINGS;
1539 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1540 rxq = 1;
1541
1542 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
1543 if (!dev)
1544 return -ENOMEM;
1545
1546 /* Initialize private members */
1547 priv = netdev_priv(dev);
1548
1549 priv->irq0 = platform_get_irq(pdev, 0);
1550 priv->irq1 = platform_get_irq(pdev, 1);
1551 if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1552 dev_err(&pdev->dev, "invalid interrupts\n");
1553 ret = -EINVAL;
1554 goto err;
1555 }
1556
126e6122
JH
1557 priv->base = devm_ioremap_resource(&pdev->dev, r);
1558 if (IS_ERR(priv->base)) {
1559 ret = PTR_ERR(priv->base);
80105bef
FF
1560 goto err;
1561 }
1562
1563 priv->netdev = dev;
1564 priv->pdev = pdev;
1565
1566 priv->phy_interface = of_get_phy_mode(dn);
1567 /* Default to GMII interface mode */
1568 if (priv->phy_interface < 0)
1569 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
1570
186534a3
FF
1571 /* In the case of a fixed PHY, the DT node associated
1572 * to the PHY is the Ethernet MAC DT node.
1573 */
1574 if (of_phy_is_fixed_link(dn)) {
1575 ret = of_phy_register_fixed_link(dn);
1576 if (ret) {
1577 dev_err(&pdev->dev, "failed to register fixed PHY\n");
1578 goto err;
1579 }
1580
1581 priv->phy_dn = dn;
1582 }
1583
80105bef
FF
1584 /* Initialize netdevice members */
1585 macaddr = of_get_mac_address(dn);
1586 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1587 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1588 random_ether_addr(dev->dev_addr);
1589 } else {
1590 ether_addr_copy(dev->dev_addr, macaddr);
1591 }
1592
1593 SET_NETDEV_DEV(dev, &pdev->dev);
1594 dev_set_drvdata(&pdev->dev, dev);
7ad24ea4 1595 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
80105bef
FF
1596 dev->netdev_ops = &bcm_sysport_netdev_ops;
1597 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
1598
1599 /* HW supported features, none enabled by default */
1600 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
1601 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1602
1603 /* Set the needed headroom once and for all */
3afc557d
PG
1604 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1605 dev->needed_headroom += sizeof(struct bcm_tsb);
80105bef
FF
1606
1607 /* We are interfaced to a switch which handles the multicast
1608 * filtering for us, so we do not support programming any
1609 * multicast hash table in this Ethernet MAC.
1610 */
1611 dev->flags &= ~IFF_MULTICAST;
1612
f532e744
FF
1613 /* libphy will adjust the link state accordingly */
1614 netif_carrier_off(dev);
1615
80105bef
FF
1616 ret = register_netdev(dev);
1617 if (ret) {
1618 dev_err(&pdev->dev, "failed to register net_device\n");
1619 goto err;
1620 }
1621
1622 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
1623 dev_info(&pdev->dev,
1624 "Broadcom SYSTEMPORT" REV_FMT
1625 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1626 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
1627 priv->base, priv->irq0, priv->irq1, txq, rxq);
1628
1629 return 0;
1630err:
1631 free_netdev(dev);
1632 return ret;
1633}
1634
1635static int bcm_sysport_remove(struct platform_device *pdev)
1636{
1637 struct net_device *dev = dev_get_drvdata(&pdev->dev);
1638
1639 /* Not much to do, ndo_close has been called
1640 * and we use managed allocations
1641 */
1642 unregister_netdev(dev);
1643 free_netdev(dev);
1644 dev_set_drvdata(&pdev->dev, NULL);
1645
1646 return 0;
1647}
1648
40755a0f
FF
1649#ifdef CONFIG_PM_SLEEP
1650static int bcm_sysport_suspend(struct device *d)
1651{
1652 struct net_device *dev = dev_get_drvdata(d);
1653 struct bcm_sysport_priv *priv = netdev_priv(dev);
1654 unsigned int i;
1655 int ret;
1656 u32 reg;
1657
1658 if (!netif_running(dev))
1659 return 0;
1660
1661 bcm_sysport_netif_stop(dev);
1662
1663 phy_suspend(priv->phydev);
1664
1665 netif_device_detach(dev);
1666
1667 /* Disable UniMAC RX */
1668 umac_enable_set(priv, CMD_RX_EN, 0);
1669
1670 ret = rdma_enable_set(priv, 0);
1671 if (ret) {
1672 netdev_err(dev, "RDMA timeout!\n");
1673 return ret;
1674 }
1675
1676 /* Disable RXCHK if enabled */
1677 if (priv->rx_csum_en) {
1678 reg = rxchk_readl(priv, RXCHK_CONTROL);
1679 reg &= ~RXCHK_EN;
1680 rxchk_writel(priv, reg, RXCHK_CONTROL);
1681 }
1682
1683 /* Flush RX pipe */
1684 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1685
1686 ret = tdma_enable_set(priv, 0);
1687 if (ret) {
1688 netdev_err(dev, "TDMA timeout!\n");
1689 return ret;
1690 }
1691
1692 /* Wait for a packet boundary */
1693 usleep_range(2000, 3000);
1694
1695 umac_enable_set(priv, CMD_TX_EN, 0);
1696
1697 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1698
1699 /* Free RX/TX rings SW structures */
1700 for (i = 0; i < dev->num_tx_queues; i++)
1701 bcm_sysport_fini_tx_ring(priv, i);
1702 bcm_sysport_fini_rx_ring(priv);
1703
1704 return 0;
1705}
1706
1707static int bcm_sysport_resume(struct device *d)
1708{
1709 struct net_device *dev = dev_get_drvdata(d);
1710 struct bcm_sysport_priv *priv = netdev_priv(dev);
1711 unsigned int i;
1712 u32 reg;
1713 int ret;
1714
1715 if (!netif_running(dev))
1716 return 0;
1717
1718 /* Initialize both hardware and software ring */
1719 for (i = 0; i < dev->num_tx_queues; i++) {
1720 ret = bcm_sysport_init_tx_ring(priv, i);
1721 if (ret) {
1722 netdev_err(dev, "failed to initialize TX ring %d\n",
1723 i);
1724 goto out_free_tx_rings;
1725 }
1726 }
1727
1728 /* Initialize linked-list */
1729 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1730
1731 /* Initialize RX ring */
1732 ret = bcm_sysport_init_rx_ring(priv);
1733 if (ret) {
1734 netdev_err(dev, "failed to initialize RX ring\n");
1735 goto out_free_rx_ring;
1736 }
1737
1738 netif_device_attach(dev);
1739
1740 /* Enable RX interrupt and TX ring full interrupt */
1741 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1742
1743 /* RX pipe enable */
1744 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1745
1746 ret = rdma_enable_set(priv, 1);
1747 if (ret) {
1748 netdev_err(dev, "failed to enable RDMA\n");
1749 goto out_free_rx_ring;
1750 }
1751
1752 /* Enable rxhck */
1753 if (priv->rx_csum_en) {
1754 reg = rxchk_readl(priv, RXCHK_CONTROL);
1755 reg |= RXCHK_EN;
1756 rxchk_writel(priv, reg, RXCHK_CONTROL);
1757 }
1758
1759 rbuf_init(priv);
1760
1761 /* Set maximum frame length */
1762 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1763
1764 /* Set MAC address */
1765 umac_set_hw_addr(priv, dev->dev_addr);
1766
1767 umac_enable_set(priv, CMD_RX_EN, 1);
1768
1769 /* TX pipe enable */
1770 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1771
1772 umac_enable_set(priv, CMD_TX_EN, 1);
1773
1774 ret = tdma_enable_set(priv, 1);
1775 if (ret) {
1776 netdev_err(dev, "TDMA timeout!\n");
1777 goto out_free_rx_ring;
1778 }
1779
1780 phy_resume(priv->phydev);
1781
1782 bcm_sysport_netif_start(dev);
1783
1784 return 0;
1785
1786out_free_rx_ring:
1787 bcm_sysport_fini_rx_ring(priv);
1788out_free_tx_rings:
1789 for (i = 0; i < dev->num_tx_queues; i++)
1790 bcm_sysport_fini_tx_ring(priv, i);
1791 return ret;
1792}
1793#endif
1794
1795static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
1796 bcm_sysport_suspend, bcm_sysport_resume);
1797
80105bef
FF
1798static const struct of_device_id bcm_sysport_of_match[] = {
1799 { .compatible = "brcm,systemport-v1.00" },
1800 { .compatible = "brcm,systemport" },
1801 { /* sentinel */ }
1802};
1803
1804static struct platform_driver bcm_sysport_driver = {
1805 .probe = bcm_sysport_probe,
1806 .remove = bcm_sysport_remove,
1807 .driver = {
1808 .name = "brcm-systemport",
1809 .owner = THIS_MODULE,
1810 .of_match_table = bcm_sysport_of_match,
40755a0f 1811 .pm = &bcm_sysport_pm_ops,
80105bef
FF
1812 },
1813};
1814module_platform_driver(bcm_sysport_driver);
1815
1816MODULE_AUTHOR("Broadcom Corporation");
1817MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
1818MODULE_ALIAS("platform:brcm-systemport");
1819MODULE_LICENSE("GPL");
This page took 0.111025 seconds and 5 git commands to generate.