2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
47 #include <asm/unaligned.h>
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT 4
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY 0
57 #define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
62 #define RX_BUF_LENGTH 2048
63 #define SKB_ALIGNMENT 32
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
69 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv
*priv
,
76 void __iomem
*d
, u32 value
)
78 __raw_writel(value
, d
+ DMA_DESC_LENGTH_STATUS
);
81 static inline u32
dmadesc_get_length_status(struct bcmgenet_priv
*priv
,
84 return __raw_readl(d
+ DMA_DESC_LENGTH_STATUS
);
87 static inline void dmadesc_set_addr(struct bcmgenet_priv
*priv
,
91 __raw_writel(lower_32_bits(addr
), d
+ DMA_DESC_ADDRESS_LO
);
93 /* Register writes to GISB bus can take couple hundred nanoseconds
94 * and are done for each packet, save these expensive writes unless
95 * the platform is explicitly configured for 64-bits/LPAE.
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
99 __raw_writel(upper_32_bits(addr
), d
+ DMA_DESC_ADDRESS_HI
);
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv
*priv
,
105 void __iomem
*d
, dma_addr_t addr
, u32 val
)
107 dmadesc_set_length_status(priv
, d
, val
);
108 dmadesc_set_addr(priv
, d
, addr
);
111 static inline dma_addr_t
dmadesc_get_addr(struct bcmgenet_priv
*priv
,
116 addr
= __raw_readl(d
+ DMA_DESC_ADDRESS_LO
);
118 /* Register writes to GISB bus can take couple hundred nanoseconds
119 * and are done for each packet, save these expensive writes unless
120 * the platform is explicitly configured for 64-bits/LPAE.
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
124 addr
|= (u64
)__raw_readl(d
+ DMA_DESC_ADDRESS_HI
) << 32;
129 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
131 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
134 static inline u32
bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv
*priv
)
136 if (GENET_IS_V1(priv
))
137 return bcmgenet_rbuf_readl(priv
, RBUF_FLUSH_CTRL_V1
);
139 return bcmgenet_sys_readl(priv
, SYS_RBUF_FLUSH_CTRL
);
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
144 if (GENET_IS_V1(priv
))
145 bcmgenet_rbuf_writel(priv
, val
, RBUF_FLUSH_CTRL_V1
);
147 bcmgenet_sys_writel(priv
, val
, SYS_RBUF_FLUSH_CTRL
);
150 /* These macros are defined to deal with register map change
151 * between GENET1.1 and GENET2. Only those currently being used
152 * by driver are defined.
154 static inline u32
bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv
*priv
)
156 if (GENET_IS_V1(priv
))
157 return bcmgenet_rbuf_readl(priv
, TBUF_CTRL_V1
);
159 return __raw_readl(priv
->base
+
160 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
165 if (GENET_IS_V1(priv
))
166 bcmgenet_rbuf_writel(priv
, val
, TBUF_CTRL_V1
);
168 __raw_writel(val
, priv
->base
+
169 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
172 static inline u32
bcmgenet_bp_mc_get(struct bcmgenet_priv
*priv
)
174 if (GENET_IS_V1(priv
))
175 return bcmgenet_rbuf_readl(priv
, TBUF_BP_MC_V1
);
177 return __raw_readl(priv
->base
+
178 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv
*priv
, u32 val
)
183 if (GENET_IS_V1(priv
))
184 bcmgenet_rbuf_writel(priv
, val
, TBUF_BP_MC_V1
);
186 __raw_writel(val
, priv
->base
+
187 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
190 /* RX/TX DMA register accessors */
202 static const u8 bcmgenet_dma_regs_v3plus
[] = {
203 [DMA_RING_CFG
] = 0x00,
206 [DMA_SCB_BURST_SIZE
] = 0x0C,
207 [DMA_ARB_CTRL
] = 0x2C,
208 [DMA_PRIORITY_0
] = 0x30,
209 [DMA_PRIORITY_1
] = 0x34,
210 [DMA_PRIORITY_2
] = 0x38,
213 static const u8 bcmgenet_dma_regs_v2
[] = {
214 [DMA_RING_CFG
] = 0x00,
217 [DMA_SCB_BURST_SIZE
] = 0x0C,
218 [DMA_ARB_CTRL
] = 0x30,
219 [DMA_PRIORITY_0
] = 0x34,
220 [DMA_PRIORITY_1
] = 0x38,
221 [DMA_PRIORITY_2
] = 0x3C,
224 static const u8 bcmgenet_dma_regs_v1
[] = {
227 [DMA_SCB_BURST_SIZE
] = 0x0C,
228 [DMA_ARB_CTRL
] = 0x30,
229 [DMA_PRIORITY_0
] = 0x34,
230 [DMA_PRIORITY_1
] = 0x38,
231 [DMA_PRIORITY_2
] = 0x3C,
234 /* Set at runtime once bcmgenet version is known */
235 static const u8
*bcmgenet_dma_regs
;
237 static inline struct bcmgenet_priv
*dev_to_priv(struct device
*dev
)
239 return netdev_priv(dev_get_drvdata(dev
));
242 static inline u32
bcmgenet_tdma_readl(struct bcmgenet_priv
*priv
,
245 return __raw_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
246 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
249 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv
*priv
,
250 u32 val
, enum dma_reg r
)
252 __raw_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
253 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
256 static inline u32
bcmgenet_rdma_readl(struct bcmgenet_priv
*priv
,
259 return __raw_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
260 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
263 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv
*priv
,
264 u32 val
, enum dma_reg r
)
266 __raw_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
267 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
270 /* RDMA/TDMA ring registers and accessors
271 * we merge the common fields and just prefix with T/D the registers
272 * having different meaning depending on the direction
276 RDMA_WRITE_PTR
= TDMA_READ_PTR
,
278 RDMA_WRITE_PTR_HI
= TDMA_READ_PTR_HI
,
280 RDMA_PROD_INDEX
= TDMA_CONS_INDEX
,
282 RDMA_CONS_INDEX
= TDMA_PROD_INDEX
,
288 DMA_MBUF_DONE_THRESH
,
290 RDMA_XON_XOFF_THRESH
= TDMA_FLOW_PERIOD
,
292 RDMA_READ_PTR
= TDMA_WRITE_PTR
,
294 RDMA_READ_PTR_HI
= TDMA_WRITE_PTR_HI
297 /* GENET v4 supports 40-bits pointer addressing
298 * for obvious reasons the LO and HI word parts
299 * are contiguous, but this offsets the other
302 static const u8 genet_dma_ring_regs_v4
[] = {
303 [TDMA_READ_PTR
] = 0x00,
304 [TDMA_READ_PTR_HI
] = 0x04,
305 [TDMA_CONS_INDEX
] = 0x08,
306 [TDMA_PROD_INDEX
] = 0x0C,
307 [DMA_RING_BUF_SIZE
] = 0x10,
308 [DMA_START_ADDR
] = 0x14,
309 [DMA_START_ADDR_HI
] = 0x18,
310 [DMA_END_ADDR
] = 0x1C,
311 [DMA_END_ADDR_HI
] = 0x20,
312 [DMA_MBUF_DONE_THRESH
] = 0x24,
313 [TDMA_FLOW_PERIOD
] = 0x28,
314 [TDMA_WRITE_PTR
] = 0x2C,
315 [TDMA_WRITE_PTR_HI
] = 0x30,
318 static const u8 genet_dma_ring_regs_v123
[] = {
319 [TDMA_READ_PTR
] = 0x00,
320 [TDMA_CONS_INDEX
] = 0x04,
321 [TDMA_PROD_INDEX
] = 0x08,
322 [DMA_RING_BUF_SIZE
] = 0x0C,
323 [DMA_START_ADDR
] = 0x10,
324 [DMA_END_ADDR
] = 0x14,
325 [DMA_MBUF_DONE_THRESH
] = 0x18,
326 [TDMA_FLOW_PERIOD
] = 0x1C,
327 [TDMA_WRITE_PTR
] = 0x20,
330 /* Set at runtime once GENET version is known */
331 static const u8
*genet_dma_ring_regs
;
333 static inline u32
bcmgenet_tdma_ring_readl(struct bcmgenet_priv
*priv
,
337 return __raw_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
338 (DMA_RING_SIZE
* ring
) +
339 genet_dma_ring_regs
[r
]);
342 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv
*priv
,
343 unsigned int ring
, u32 val
,
346 __raw_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
347 (DMA_RING_SIZE
* ring
) +
348 genet_dma_ring_regs
[r
]);
351 static inline u32
bcmgenet_rdma_ring_readl(struct bcmgenet_priv
*priv
,
355 return __raw_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
356 (DMA_RING_SIZE
* ring
) +
357 genet_dma_ring_regs
[r
]);
360 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv
*priv
,
361 unsigned int ring
, u32 val
,
364 __raw_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
365 (DMA_RING_SIZE
* ring
) +
366 genet_dma_ring_regs
[r
]);
369 static int bcmgenet_get_settings(struct net_device
*dev
,
370 struct ethtool_cmd
*cmd
)
372 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
374 if (!netif_running(dev
))
380 return phy_ethtool_gset(priv
->phydev
, cmd
);
383 static int bcmgenet_set_settings(struct net_device
*dev
,
384 struct ethtool_cmd
*cmd
)
386 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
388 if (!netif_running(dev
))
394 return phy_ethtool_sset(priv
->phydev
, cmd
);
397 static int bcmgenet_set_rx_csum(struct net_device
*dev
,
398 netdev_features_t wanted
)
400 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
404 rx_csum_en
= !!(wanted
& NETIF_F_RXCSUM
);
406 rbuf_chk_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CHK_CTRL
);
408 /* enable rx checksumming */
410 rbuf_chk_ctrl
|= RBUF_RXCHK_EN
;
412 rbuf_chk_ctrl
&= ~RBUF_RXCHK_EN
;
413 priv
->desc_rxchk_en
= rx_csum_en
;
415 /* If UniMAC forwards CRC, we need to skip over it to get
416 * a valid CHK bit to be set in the per-packet status word
418 if (rx_csum_en
&& priv
->crc_fwd_en
)
419 rbuf_chk_ctrl
|= RBUF_SKIP_FCS
;
421 rbuf_chk_ctrl
&= ~RBUF_SKIP_FCS
;
423 bcmgenet_rbuf_writel(priv
, rbuf_chk_ctrl
, RBUF_CHK_CTRL
);
428 static int bcmgenet_set_tx_csum(struct net_device
*dev
,
429 netdev_features_t wanted
)
431 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
433 u32 tbuf_ctrl
, rbuf_ctrl
;
435 tbuf_ctrl
= bcmgenet_tbuf_ctrl_get(priv
);
436 rbuf_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
438 desc_64b_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
440 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
442 tbuf_ctrl
|= RBUF_64B_EN
;
443 rbuf_ctrl
|= RBUF_64B_EN
;
445 tbuf_ctrl
&= ~RBUF_64B_EN
;
446 rbuf_ctrl
&= ~RBUF_64B_EN
;
448 priv
->desc_64b_en
= desc_64b_en
;
450 bcmgenet_tbuf_ctrl_set(priv
, tbuf_ctrl
);
451 bcmgenet_rbuf_writel(priv
, rbuf_ctrl
, RBUF_CTRL
);
456 static int bcmgenet_set_features(struct net_device
*dev
,
457 netdev_features_t features
)
459 netdev_features_t changed
= features
^ dev
->features
;
460 netdev_features_t wanted
= dev
->wanted_features
;
463 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
464 ret
= bcmgenet_set_tx_csum(dev
, wanted
);
465 if (changed
& (NETIF_F_RXCSUM
))
466 ret
= bcmgenet_set_rx_csum(dev
, wanted
);
471 static u32
bcmgenet_get_msglevel(struct net_device
*dev
)
473 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
475 return priv
->msg_enable
;
478 static void bcmgenet_set_msglevel(struct net_device
*dev
, u32 level
)
480 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
482 priv
->msg_enable
= level
;
485 /* standard ethtool support functions. */
486 enum bcmgenet_stat_type
{
487 BCMGENET_STAT_NETDEV
= -1,
488 BCMGENET_STAT_MIB_RX
,
489 BCMGENET_STAT_MIB_TX
,
495 struct bcmgenet_stats
{
496 char stat_string
[ETH_GSTRING_LEN
];
499 enum bcmgenet_stat_type type
;
500 /* reg offset from UMAC base for misc counters */
504 #define STAT_NETDEV(m) { \
505 .stat_string = __stringify(m), \
506 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
507 .stat_offset = offsetof(struct net_device_stats, m), \
508 .type = BCMGENET_STAT_NETDEV, \
511 #define STAT_GENET_MIB(str, m, _type) { \
512 .stat_string = str, \
513 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
514 .stat_offset = offsetof(struct bcmgenet_priv, m), \
518 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
519 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
520 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
521 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
523 #define STAT_GENET_MISC(str, m, offset) { \
524 .stat_string = str, \
525 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
526 .stat_offset = offsetof(struct bcmgenet_priv, m), \
527 .type = BCMGENET_STAT_MISC, \
528 .reg_offset = offset, \
532 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
533 * between the end of TX stats and the beginning of the RX RUNT
535 #define BCMGENET_STAT_OFFSET 0xc
537 /* Hardware counters must be kept in sync because the order/offset
538 * is important here (order in structure declaration = order in hardware)
540 static const struct bcmgenet_stats bcmgenet_gstrings_stats
[] = {
542 STAT_NETDEV(rx_packets
),
543 STAT_NETDEV(tx_packets
),
544 STAT_NETDEV(rx_bytes
),
545 STAT_NETDEV(tx_bytes
),
546 STAT_NETDEV(rx_errors
),
547 STAT_NETDEV(tx_errors
),
548 STAT_NETDEV(rx_dropped
),
549 STAT_NETDEV(tx_dropped
),
550 STAT_NETDEV(multicast
),
551 /* UniMAC RSV counters */
552 STAT_GENET_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
553 STAT_GENET_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
554 STAT_GENET_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
555 STAT_GENET_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
556 STAT_GENET_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
557 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
558 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
559 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
560 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
561 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
562 STAT_GENET_MIB_RX("rx_pkts", mib
.rx
.pkt
),
563 STAT_GENET_MIB_RX("rx_bytes", mib
.rx
.bytes
),
564 STAT_GENET_MIB_RX("rx_multicast", mib
.rx
.mca
),
565 STAT_GENET_MIB_RX("rx_broadcast", mib
.rx
.bca
),
566 STAT_GENET_MIB_RX("rx_fcs", mib
.rx
.fcs
),
567 STAT_GENET_MIB_RX("rx_control", mib
.rx
.cf
),
568 STAT_GENET_MIB_RX("rx_pause", mib
.rx
.pf
),
569 STAT_GENET_MIB_RX("rx_unknown", mib
.rx
.uo
),
570 STAT_GENET_MIB_RX("rx_align", mib
.rx
.aln
),
571 STAT_GENET_MIB_RX("rx_outrange", mib
.rx
.flr
),
572 STAT_GENET_MIB_RX("rx_code", mib
.rx
.cde
),
573 STAT_GENET_MIB_RX("rx_carrier", mib
.rx
.fcr
),
574 STAT_GENET_MIB_RX("rx_oversize", mib
.rx
.ovr
),
575 STAT_GENET_MIB_RX("rx_jabber", mib
.rx
.jbr
),
576 STAT_GENET_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
577 STAT_GENET_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
578 STAT_GENET_MIB_RX("rx_unicast", mib
.rx
.uc
),
579 STAT_GENET_MIB_RX("rx_ppp", mib
.rx
.ppp
),
580 STAT_GENET_MIB_RX("rx_crc", mib
.rx
.rcrc
),
581 /* UniMAC TSV counters */
582 STAT_GENET_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
583 STAT_GENET_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
584 STAT_GENET_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
585 STAT_GENET_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
586 STAT_GENET_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
587 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
588 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
589 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
590 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
591 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
592 STAT_GENET_MIB_TX("tx_pkts", mib
.tx
.pkts
),
593 STAT_GENET_MIB_TX("tx_multicast", mib
.tx
.mca
),
594 STAT_GENET_MIB_TX("tx_broadcast", mib
.tx
.bca
),
595 STAT_GENET_MIB_TX("tx_pause", mib
.tx
.pf
),
596 STAT_GENET_MIB_TX("tx_control", mib
.tx
.cf
),
597 STAT_GENET_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
598 STAT_GENET_MIB_TX("tx_oversize", mib
.tx
.ovr
),
599 STAT_GENET_MIB_TX("tx_defer", mib
.tx
.drf
),
600 STAT_GENET_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
601 STAT_GENET_MIB_TX("tx_single_col", mib
.tx
.scl
),
602 STAT_GENET_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
603 STAT_GENET_MIB_TX("tx_late_col", mib
.tx
.lcl
),
604 STAT_GENET_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
605 STAT_GENET_MIB_TX("tx_frags", mib
.tx
.frg
),
606 STAT_GENET_MIB_TX("tx_total_col", mib
.tx
.ncl
),
607 STAT_GENET_MIB_TX("tx_jabber", mib
.tx
.jbr
),
608 STAT_GENET_MIB_TX("tx_bytes", mib
.tx
.bytes
),
609 STAT_GENET_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
610 STAT_GENET_MIB_TX("tx_unicast", mib
.tx
.uc
),
611 /* UniMAC RUNT counters */
612 STAT_GENET_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
613 STAT_GENET_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
614 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
615 STAT_GENET_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
616 /* Misc UniMAC counters */
617 STAT_GENET_MISC("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
,
619 STAT_GENET_MISC("rbuf_err_cnt", mib
.rbuf_err_cnt
, UMAC_RBUF_ERR_CNT
),
620 STAT_GENET_MISC("mdf_err_cnt", mib
.mdf_err_cnt
, UMAC_MDF_ERR_CNT
),
621 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
622 STAT_GENET_SOFT_MIB("rx_dma_failed", mib
.rx_dma_failed
),
623 STAT_GENET_SOFT_MIB("tx_dma_failed", mib
.tx_dma_failed
),
626 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
628 static void bcmgenet_get_drvinfo(struct net_device
*dev
,
629 struct ethtool_drvinfo
*info
)
631 strlcpy(info
->driver
, "bcmgenet", sizeof(info
->driver
));
632 strlcpy(info
->version
, "v2.0", sizeof(info
->version
));
633 info
->n_stats
= BCMGENET_STATS_LEN
;
636 static int bcmgenet_get_sset_count(struct net_device
*dev
, int string_set
)
638 switch (string_set
) {
640 return BCMGENET_STATS_LEN
;
646 static void bcmgenet_get_strings(struct net_device
*dev
, u32 stringset
,
653 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
654 memcpy(data
+ i
* ETH_GSTRING_LEN
,
655 bcmgenet_gstrings_stats
[i
].stat_string
,
662 static void bcmgenet_update_mib_counters(struct bcmgenet_priv
*priv
)
666 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
667 const struct bcmgenet_stats
*s
;
672 s
= &bcmgenet_gstrings_stats
[i
];
674 case BCMGENET_STAT_NETDEV
:
675 case BCMGENET_STAT_SOFT
:
677 case BCMGENET_STAT_MIB_RX
:
678 case BCMGENET_STAT_MIB_TX
:
679 case BCMGENET_STAT_RUNT
:
680 if (s
->type
!= BCMGENET_STAT_MIB_RX
)
681 offset
= BCMGENET_STAT_OFFSET
;
682 val
= bcmgenet_umac_readl(priv
,
683 UMAC_MIB_START
+ j
+ offset
);
685 case BCMGENET_STAT_MISC
:
686 val
= bcmgenet_umac_readl(priv
, s
->reg_offset
);
687 /* clear if overflowed */
689 bcmgenet_umac_writel(priv
, 0, s
->reg_offset
);
694 p
= (char *)priv
+ s
->stat_offset
;
699 static void bcmgenet_get_ethtool_stats(struct net_device
*dev
,
700 struct ethtool_stats
*stats
,
703 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
706 if (netif_running(dev
))
707 bcmgenet_update_mib_counters(priv
);
709 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
710 const struct bcmgenet_stats
*s
;
713 s
= &bcmgenet_gstrings_stats
[i
];
714 if (s
->type
== BCMGENET_STAT_NETDEV
)
715 p
= (char *)&dev
->stats
;
723 static void bcmgenet_eee_enable_set(struct net_device
*dev
, bool enable
)
725 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
726 u32 off
= priv
->hw_params
->tbuf_offset
+ TBUF_ENERGY_CTRL
;
729 if (enable
&& !priv
->clk_eee_enabled
) {
730 clk_prepare_enable(priv
->clk_eee
);
731 priv
->clk_eee_enabled
= true;
734 reg
= bcmgenet_umac_readl(priv
, UMAC_EEE_CTRL
);
739 bcmgenet_umac_writel(priv
, reg
, UMAC_EEE_CTRL
);
741 /* Enable EEE and switch to a 27Mhz clock automatically */
742 reg
= __raw_readl(priv
->base
+ off
);
744 reg
|= TBUF_EEE_EN
| TBUF_PM_EN
;
746 reg
&= ~(TBUF_EEE_EN
| TBUF_PM_EN
);
747 __raw_writel(reg
, priv
->base
+ off
);
749 /* Do the same for thing for RBUF */
750 reg
= bcmgenet_rbuf_readl(priv
, RBUF_ENERGY_CTRL
);
752 reg
|= RBUF_EEE_EN
| RBUF_PM_EN
;
754 reg
&= ~(RBUF_EEE_EN
| RBUF_PM_EN
);
755 bcmgenet_rbuf_writel(priv
, reg
, RBUF_ENERGY_CTRL
);
757 if (!enable
&& priv
->clk_eee_enabled
) {
758 clk_disable_unprepare(priv
->clk_eee
);
759 priv
->clk_eee_enabled
= false;
762 priv
->eee
.eee_enabled
= enable
;
763 priv
->eee
.eee_active
= enable
;
766 static int bcmgenet_get_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
768 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
769 struct ethtool_eee
*p
= &priv
->eee
;
771 if (GENET_IS_V1(priv
))
774 e
->eee_enabled
= p
->eee_enabled
;
775 e
->eee_active
= p
->eee_active
;
776 e
->tx_lpi_timer
= bcmgenet_umac_readl(priv
, UMAC_EEE_LPI_TIMER
);
778 return phy_ethtool_get_eee(priv
->phydev
, e
);
781 static int bcmgenet_set_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
783 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
784 struct ethtool_eee
*p
= &priv
->eee
;
787 if (GENET_IS_V1(priv
))
790 p
->eee_enabled
= e
->eee_enabled
;
792 if (!p
->eee_enabled
) {
793 bcmgenet_eee_enable_set(dev
, false);
795 ret
= phy_init_eee(priv
->phydev
, 0);
797 netif_err(priv
, hw
, dev
, "EEE initialization failed\n");
801 bcmgenet_umac_writel(priv
, e
->tx_lpi_timer
, UMAC_EEE_LPI_TIMER
);
802 bcmgenet_eee_enable_set(dev
, true);
805 return phy_ethtool_set_eee(priv
->phydev
, e
);
808 static int bcmgenet_nway_reset(struct net_device
*dev
)
810 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
812 return genphy_restart_aneg(priv
->phydev
);
815 /* standard ethtool support functions. */
816 static struct ethtool_ops bcmgenet_ethtool_ops
= {
817 .get_strings
= bcmgenet_get_strings
,
818 .get_sset_count
= bcmgenet_get_sset_count
,
819 .get_ethtool_stats
= bcmgenet_get_ethtool_stats
,
820 .get_settings
= bcmgenet_get_settings
,
821 .set_settings
= bcmgenet_set_settings
,
822 .get_drvinfo
= bcmgenet_get_drvinfo
,
823 .get_link
= ethtool_op_get_link
,
824 .get_msglevel
= bcmgenet_get_msglevel
,
825 .set_msglevel
= bcmgenet_set_msglevel
,
826 .get_wol
= bcmgenet_get_wol
,
827 .set_wol
= bcmgenet_set_wol
,
828 .get_eee
= bcmgenet_get_eee
,
829 .set_eee
= bcmgenet_set_eee
,
830 .nway_reset
= bcmgenet_nway_reset
,
833 /* Power down the unimac, based on mode. */
834 static void bcmgenet_power_down(struct bcmgenet_priv
*priv
,
835 enum bcmgenet_power_mode mode
)
840 case GENET_POWER_CABLE_SENSE
:
841 phy_detach(priv
->phydev
);
844 case GENET_POWER_WOL_MAGIC
:
845 bcmgenet_wol_power_down_cfg(priv
, mode
);
848 case GENET_POWER_PASSIVE
:
850 if (priv
->hw_params
->flags
& GENET_HAS_EXT
) {
851 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
852 reg
|= (EXT_PWR_DOWN_PHY
|
853 EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_BIAS
);
854 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
862 static void bcmgenet_power_up(struct bcmgenet_priv
*priv
,
863 enum bcmgenet_power_mode mode
)
867 if (!(priv
->hw_params
->flags
& GENET_HAS_EXT
))
870 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
873 case GENET_POWER_PASSIVE
:
874 reg
&= ~(EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_PHY
|
877 case GENET_POWER_CABLE_SENSE
:
879 reg
|= EXT_PWR_DN_EN_LD
;
881 case GENET_POWER_WOL_MAGIC
:
882 bcmgenet_wol_power_up_cfg(priv
, mode
);
888 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
890 if (mode
== GENET_POWER_PASSIVE
)
891 bcmgenet_mii_reset(priv
->dev
);
894 /* ioctl handle special commands that are not present in ethtool. */
895 static int bcmgenet_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
897 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
900 if (!netif_running(dev
))
910 val
= phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
921 static struct enet_cb
*bcmgenet_get_txcb(struct bcmgenet_priv
*priv
,
922 struct bcmgenet_tx_ring
*ring
)
924 struct enet_cb
*tx_cb_ptr
;
926 tx_cb_ptr
= ring
->cbs
;
927 tx_cb_ptr
+= ring
->write_ptr
- ring
->cb_ptr
;
929 /* Advancing local write pointer */
930 if (ring
->write_ptr
== ring
->end_ptr
)
931 ring
->write_ptr
= ring
->cb_ptr
;
938 /* Simple helper to free a control block's resources */
939 static void bcmgenet_free_cb(struct enet_cb
*cb
)
941 dev_kfree_skb_any(cb
->skb
);
943 dma_unmap_addr_set(cb
, dma_addr
, 0);
946 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv
*priv
,
947 struct bcmgenet_tx_ring
*ring
)
949 bcmgenet_intrl2_0_writel(priv
,
950 UMAC_IRQ_TXDMA_BDONE
| UMAC_IRQ_TXDMA_PDONE
,
951 INTRL2_CPU_MASK_SET
);
954 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv
*priv
,
955 struct bcmgenet_tx_ring
*ring
)
957 bcmgenet_intrl2_0_writel(priv
,
958 UMAC_IRQ_TXDMA_BDONE
| UMAC_IRQ_TXDMA_PDONE
,
959 INTRL2_CPU_MASK_CLEAR
);
962 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv
*priv
,
963 struct bcmgenet_tx_ring
*ring
)
965 bcmgenet_intrl2_1_writel(priv
, (1 << ring
->index
),
966 INTRL2_CPU_MASK_CLEAR
);
967 priv
->int1_mask
&= ~(1 << ring
->index
);
970 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv
*priv
,
971 struct bcmgenet_tx_ring
*ring
)
973 bcmgenet_intrl2_1_writel(priv
, (1 << ring
->index
),
974 INTRL2_CPU_MASK_SET
);
975 priv
->int1_mask
|= (1 << ring
->index
);
978 /* Unlocked version of the reclaim routine */
979 static unsigned int __bcmgenet_tx_reclaim(struct net_device
*dev
,
980 struct bcmgenet_tx_ring
*ring
)
982 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
983 struct enet_cb
*tx_cb_ptr
;
984 struct netdev_queue
*txq
;
985 unsigned int pkts_compl
= 0;
986 unsigned int c_index
;
987 unsigned int txbds_ready
;
988 unsigned int txbds_processed
= 0;
990 /* Compute how many buffers are transmitted since last xmit call */
991 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
);
992 c_index
&= DMA_C_INDEX_MASK
;
994 if (likely(c_index
>= ring
->c_index
))
995 txbds_ready
= c_index
- ring
->c_index
;
997 txbds_ready
= (DMA_C_INDEX_MASK
+ 1) - ring
->c_index
+ c_index
;
999 netif_dbg(priv
, tx_done
, dev
,
1000 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1001 __func__
, ring
->index
, ring
->c_index
, c_index
, txbds_ready
);
1003 /* Reclaim transmitted buffers */
1004 while (txbds_processed
< txbds_ready
) {
1005 tx_cb_ptr
= &priv
->tx_cbs
[ring
->clean_ptr
];
1006 if (tx_cb_ptr
->skb
) {
1008 dev
->stats
.tx_packets
++;
1009 dev
->stats
.tx_bytes
+= tx_cb_ptr
->skb
->len
;
1010 dma_unmap_single(&dev
->dev
,
1011 dma_unmap_addr(tx_cb_ptr
, dma_addr
),
1012 tx_cb_ptr
->skb
->len
,
1014 bcmgenet_free_cb(tx_cb_ptr
);
1015 } else if (dma_unmap_addr(tx_cb_ptr
, dma_addr
)) {
1016 dev
->stats
.tx_bytes
+=
1017 dma_unmap_len(tx_cb_ptr
, dma_len
);
1018 dma_unmap_page(&dev
->dev
,
1019 dma_unmap_addr(tx_cb_ptr
, dma_addr
),
1020 dma_unmap_len(tx_cb_ptr
, dma_len
),
1022 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, 0);
1026 if (likely(ring
->clean_ptr
< ring
->end_ptr
))
1029 ring
->clean_ptr
= ring
->cb_ptr
;
1032 ring
->free_bds
+= txbds_processed
;
1033 ring
->c_index
= (ring
->c_index
+ txbds_processed
) & DMA_C_INDEX_MASK
;
1035 if (ring
->free_bds
> (MAX_SKB_FRAGS
+ 1)) {
1036 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
1037 if (netif_tx_queue_stopped(txq
))
1038 netif_tx_wake_queue(txq
);
1044 static unsigned int bcmgenet_tx_reclaim(struct net_device
*dev
,
1045 struct bcmgenet_tx_ring
*ring
)
1047 unsigned int released
;
1048 unsigned long flags
;
1050 spin_lock_irqsave(&ring
->lock
, flags
);
1051 released
= __bcmgenet_tx_reclaim(dev
, ring
);
1052 spin_unlock_irqrestore(&ring
->lock
, flags
);
1057 static int bcmgenet_tx_poll(struct napi_struct
*napi
, int budget
)
1059 struct bcmgenet_tx_ring
*ring
=
1060 container_of(napi
, struct bcmgenet_tx_ring
, napi
);
1061 unsigned int work_done
= 0;
1063 work_done
= bcmgenet_tx_reclaim(ring
->priv
->dev
, ring
);
1065 if (work_done
== 0) {
1066 napi_complete(napi
);
1067 ring
->int_enable(ring
->priv
, ring
);
1075 static void bcmgenet_tx_reclaim_all(struct net_device
*dev
)
1077 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1080 if (netif_is_multiqueue(dev
)) {
1081 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
1082 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[i
]);
1085 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[DESC_INDEX
]);
1088 /* Transmits a single SKB (either head of a fragment or a single SKB)
1089 * caller must hold priv->lock
1091 static int bcmgenet_xmit_single(struct net_device
*dev
,
1092 struct sk_buff
*skb
,
1094 struct bcmgenet_tx_ring
*ring
)
1096 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1097 struct device
*kdev
= &priv
->pdev
->dev
;
1098 struct enet_cb
*tx_cb_ptr
;
1099 unsigned int skb_len
;
1104 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
1106 if (unlikely(!tx_cb_ptr
))
1109 tx_cb_ptr
->skb
= skb
;
1111 skb_len
= skb_headlen(skb
) < ETH_ZLEN
? ETH_ZLEN
: skb_headlen(skb
);
1113 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
1114 ret
= dma_mapping_error(kdev
, mapping
);
1116 priv
->mib
.tx_dma_failed
++;
1117 netif_err(priv
, tx_err
, dev
, "Tx DMA map failed\n");
1122 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
1123 dma_unmap_len_set(tx_cb_ptr
, dma_len
, skb
->len
);
1124 length_status
= (skb_len
<< DMA_BUFLENGTH_SHIFT
) | dma_desc_flags
|
1125 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
) |
1128 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1129 length_status
|= DMA_TX_DO_CSUM
;
1131 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
, length_status
);
1133 /* Decrement total BD count and advance our write pointer */
1134 ring
->free_bds
-= 1;
1135 ring
->prod_index
+= 1;
1136 ring
->prod_index
&= DMA_P_INDEX_MASK
;
1141 /* Transmit a SKB fragment */
1142 static int bcmgenet_xmit_frag(struct net_device
*dev
,
1145 struct bcmgenet_tx_ring
*ring
)
1147 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1148 struct device
*kdev
= &priv
->pdev
->dev
;
1149 struct enet_cb
*tx_cb_ptr
;
1153 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
1155 if (unlikely(!tx_cb_ptr
))
1157 tx_cb_ptr
->skb
= NULL
;
1159 mapping
= skb_frag_dma_map(kdev
, frag
, 0,
1160 skb_frag_size(frag
), DMA_TO_DEVICE
);
1161 ret
= dma_mapping_error(kdev
, mapping
);
1163 priv
->mib
.tx_dma_failed
++;
1164 netif_err(priv
, tx_err
, dev
, "%s: Tx DMA map failed\n",
1169 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
1170 dma_unmap_len_set(tx_cb_ptr
, dma_len
, frag
->size
);
1172 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
,
1173 (frag
->size
<< DMA_BUFLENGTH_SHIFT
) | dma_desc_flags
|
1174 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
));
1177 ring
->free_bds
-= 1;
1178 ring
->prod_index
+= 1;
1179 ring
->prod_index
&= DMA_P_INDEX_MASK
;
1184 /* Reallocate the SKB to put enough headroom in front of it and insert
1185 * the transmit checksum offsets in the descriptors
1187 static struct sk_buff
*bcmgenet_put_tx_csum(struct net_device
*dev
,
1188 struct sk_buff
*skb
)
1190 struct status_64
*status
= NULL
;
1191 struct sk_buff
*new_skb
;
1197 if (unlikely(skb_headroom(skb
) < sizeof(*status
))) {
1198 /* If 64 byte status block enabled, must make sure skb has
1199 * enough headroom for us to insert 64B status block.
1201 new_skb
= skb_realloc_headroom(skb
, sizeof(*status
));
1204 dev
->stats
.tx_errors
++;
1205 dev
->stats
.tx_dropped
++;
1211 skb_push(skb
, sizeof(*status
));
1212 status
= (struct status_64
*)skb
->data
;
1214 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1215 ip_ver
= htons(skb
->protocol
);
1218 ip_proto
= ip_hdr(skb
)->protocol
;
1221 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1227 offset
= skb_checksum_start_offset(skb
) - sizeof(*status
);
1228 tx_csum_info
= (offset
<< STATUS_TX_CSUM_START_SHIFT
) |
1229 (offset
+ skb
->csum_offset
);
1231 /* Set the length valid bit for TCP and UDP and just set
1232 * the special UDP flag for IPv4, else just set to 0.
1234 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1235 tx_csum_info
|= STATUS_TX_CSUM_LV
;
1236 if (ip_proto
== IPPROTO_UDP
&& ip_ver
== ETH_P_IP
)
1237 tx_csum_info
|= STATUS_TX_CSUM_PROTO_UDP
;
1242 status
->tx_csum_info
= tx_csum_info
;
1248 static netdev_tx_t
bcmgenet_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1250 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1251 struct bcmgenet_tx_ring
*ring
= NULL
;
1252 struct netdev_queue
*txq
;
1253 unsigned long flags
= 0;
1254 int nr_frags
, index
;
1259 index
= skb_get_queue_mapping(skb
);
1260 /* Mapping strategy:
1261 * queue_mapping = 0, unclassified, packet xmited through ring16
1262 * queue_mapping = 1, goes to ring 0. (highest priority queue
1263 * queue_mapping = 2, goes to ring 1.
1264 * queue_mapping = 3, goes to ring 2.
1265 * queue_mapping = 4, goes to ring 3.
1272 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1273 ring
= &priv
->tx_rings
[index
];
1274 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
1276 spin_lock_irqsave(&ring
->lock
, flags
);
1277 if (ring
->free_bds
<= nr_frags
+ 1) {
1278 netif_tx_stop_queue(txq
);
1279 netdev_err(dev
, "%s: tx ring %d full when queue %d awake\n",
1280 __func__
, index
, ring
->queue
);
1281 ret
= NETDEV_TX_BUSY
;
1285 if (skb_padto(skb
, ETH_ZLEN
)) {
1290 /* set the SKB transmit checksum */
1291 if (priv
->desc_64b_en
) {
1292 skb
= bcmgenet_put_tx_csum(dev
, skb
);
1299 dma_desc_flags
= DMA_SOP
;
1301 dma_desc_flags
|= DMA_EOP
;
1303 /* Transmit single SKB or head of fragment list */
1304 ret
= bcmgenet_xmit_single(dev
, skb
, dma_desc_flags
, ring
);
1311 for (i
= 0; i
< nr_frags
; i
++) {
1312 ret
= bcmgenet_xmit_frag(dev
,
1313 &skb_shinfo(skb
)->frags
[i
],
1314 (i
== nr_frags
- 1) ? DMA_EOP
: 0,
1322 skb_tx_timestamp(skb
);
1324 /* we kept a software copy of how much we should advance the TDMA
1325 * producer index, now write it down to the hardware
1327 bcmgenet_tdma_ring_writel(priv
, ring
->index
,
1328 ring
->prod_index
, TDMA_PROD_INDEX
);
1330 if (ring
->free_bds
<= (MAX_SKB_FRAGS
+ 1))
1331 netif_tx_stop_queue(txq
);
1334 spin_unlock_irqrestore(&ring
->lock
, flags
);
1339 static struct sk_buff
*bcmgenet_rx_refill(struct bcmgenet_priv
*priv
,
1342 struct device
*kdev
= &priv
->pdev
->dev
;
1343 struct sk_buff
*skb
;
1344 struct sk_buff
*rx_skb
;
1347 /* Allocate a new Rx skb */
1348 skb
= netdev_alloc_skb(priv
->dev
, priv
->rx_buf_len
+ SKB_ALIGNMENT
);
1350 priv
->mib
.alloc_rx_buff_failed
++;
1351 netif_err(priv
, rx_err
, priv
->dev
,
1352 "%s: Rx skb allocation failed\n", __func__
);
1356 /* DMA-map the new Rx skb */
1357 mapping
= dma_map_single(kdev
, skb
->data
, priv
->rx_buf_len
,
1359 if (dma_mapping_error(kdev
, mapping
)) {
1360 priv
->mib
.rx_dma_failed
++;
1361 dev_kfree_skb_any(skb
);
1362 netif_err(priv
, rx_err
, priv
->dev
,
1363 "%s: Rx skb DMA mapping failed\n", __func__
);
1367 /* Grab the current Rx skb from the ring and DMA-unmap it */
1370 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
1371 priv
->rx_buf_len
, DMA_FROM_DEVICE
);
1373 /* Put the new Rx skb on the ring */
1375 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1376 dmadesc_set_addr(priv
, cb
->bd_addr
, mapping
);
1378 /* Return the current Rx skb to caller */
1382 /* bcmgenet_desc_rx - descriptor based rx process.
1383 * this could be called from bottom half, or from NAPI polling method.
1385 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv
*priv
,
1387 unsigned int budget
)
1389 struct bcmgenet_rx_ring
*ring
= &priv
->rx_rings
[index
];
1390 struct net_device
*dev
= priv
->dev
;
1392 struct sk_buff
*skb
;
1393 u32 dma_length_status
;
1394 unsigned long dma_flag
;
1396 unsigned int rxpktprocessed
= 0, rxpkttoprocess
;
1397 unsigned int p_index
;
1398 unsigned int discards
;
1399 unsigned int chksum_ok
= 0;
1401 p_index
= bcmgenet_rdma_ring_readl(priv
, index
, RDMA_PROD_INDEX
);
1403 discards
= (p_index
>> DMA_P_INDEX_DISCARD_CNT_SHIFT
) &
1404 DMA_P_INDEX_DISCARD_CNT_MASK
;
1405 if (discards
> ring
->old_discards
) {
1406 discards
= discards
- ring
->old_discards
;
1407 dev
->stats
.rx_missed_errors
+= discards
;
1408 dev
->stats
.rx_errors
+= discards
;
1409 ring
->old_discards
+= discards
;
1411 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1412 if (ring
->old_discards
>= 0xC000) {
1413 ring
->old_discards
= 0;
1414 bcmgenet_rdma_ring_writel(priv
, index
, 0,
1419 p_index
&= DMA_P_INDEX_MASK
;
1421 if (likely(p_index
>= ring
->c_index
))
1422 rxpkttoprocess
= p_index
- ring
->c_index
;
1424 rxpkttoprocess
= (DMA_C_INDEX_MASK
+ 1) - ring
->c_index
+
1427 netif_dbg(priv
, rx_status
, dev
,
1428 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess
);
1430 while ((rxpktprocessed
< rxpkttoprocess
) &&
1431 (rxpktprocessed
< budget
)) {
1432 cb
= &priv
->rx_cbs
[ring
->read_ptr
];
1433 skb
= bcmgenet_rx_refill(priv
, cb
);
1435 if (unlikely(!skb
)) {
1436 dev
->stats
.rx_dropped
++;
1437 dev
->stats
.rx_errors
++;
1441 if (!priv
->desc_64b_en
) {
1443 dmadesc_get_length_status(priv
, cb
->bd_addr
);
1445 struct status_64
*status
;
1447 status
= (struct status_64
*)skb
->data
;
1448 dma_length_status
= status
->length_status
;
1451 /* DMA flags and length are still valid no matter how
1452 * we got the Receive Status Vector (64B RSB or register)
1454 dma_flag
= dma_length_status
& 0xffff;
1455 len
= dma_length_status
>> DMA_BUFLENGTH_SHIFT
;
1457 netif_dbg(priv
, rx_status
, dev
,
1458 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1459 __func__
, p_index
, ring
->c_index
,
1460 ring
->read_ptr
, dma_length_status
);
1462 if (unlikely(!(dma_flag
& DMA_EOP
) || !(dma_flag
& DMA_SOP
))) {
1463 netif_err(priv
, rx_status
, dev
,
1464 "dropping fragmented packet!\n");
1465 dev
->stats
.rx_dropped
++;
1466 dev
->stats
.rx_errors
++;
1467 dev_kfree_skb_any(skb
);
1472 if (unlikely(dma_flag
& (DMA_RX_CRC_ERROR
|
1477 netif_err(priv
, rx_status
, dev
, "dma_flag=0x%x\n",
1478 (unsigned int)dma_flag
);
1479 if (dma_flag
& DMA_RX_CRC_ERROR
)
1480 dev
->stats
.rx_crc_errors
++;
1481 if (dma_flag
& DMA_RX_OV
)
1482 dev
->stats
.rx_over_errors
++;
1483 if (dma_flag
& DMA_RX_NO
)
1484 dev
->stats
.rx_frame_errors
++;
1485 if (dma_flag
& DMA_RX_LG
)
1486 dev
->stats
.rx_length_errors
++;
1487 dev
->stats
.rx_dropped
++;
1488 dev
->stats
.rx_errors
++;
1489 dev_kfree_skb_any(skb
);
1491 } /* error packet */
1493 chksum_ok
= (dma_flag
& priv
->dma_rx_chk_bit
) &&
1494 priv
->desc_rxchk_en
;
1497 if (priv
->desc_64b_en
) {
1502 if (likely(chksum_ok
))
1503 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1505 /* remove hardware 2bytes added for IP alignment */
1509 if (priv
->crc_fwd_en
) {
1510 skb_trim(skb
, len
- ETH_FCS_LEN
);
1514 /*Finish setting up the received SKB and send it to the kernel*/
1515 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
1516 dev
->stats
.rx_packets
++;
1517 dev
->stats
.rx_bytes
+= len
;
1518 if (dma_flag
& DMA_RX_MULT
)
1519 dev
->stats
.multicast
++;
1522 napi_gro_receive(&priv
->napi
, skb
);
1523 netif_dbg(priv
, rx_status
, dev
, "pushed up to kernel\n");
1527 if (likely(ring
->read_ptr
< ring
->end_ptr
))
1530 ring
->read_ptr
= ring
->cb_ptr
;
1532 ring
->c_index
= (ring
->c_index
+ 1) & DMA_C_INDEX_MASK
;
1533 bcmgenet_rdma_ring_writel(priv
, index
, ring
->c_index
, RDMA_CONS_INDEX
);
1536 return rxpktprocessed
;
1539 /* Assign skb to RX DMA descriptor. */
1540 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv
*priv
,
1541 struct bcmgenet_rx_ring
*ring
)
1544 struct sk_buff
*skb
;
1547 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
1549 /* loop here for each buffer needing assign */
1550 for (i
= 0; i
< ring
->size
; i
++) {
1552 skb
= bcmgenet_rx_refill(priv
, cb
);
1554 dev_kfree_skb_any(skb
);
1562 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv
*priv
)
1567 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1568 cb
= &priv
->rx_cbs
[i
];
1570 if (dma_unmap_addr(cb
, dma_addr
)) {
1571 dma_unmap_single(&priv
->dev
->dev
,
1572 dma_unmap_addr(cb
, dma_addr
),
1573 priv
->rx_buf_len
, DMA_FROM_DEVICE
);
1574 dma_unmap_addr_set(cb
, dma_addr
, 0);
1578 bcmgenet_free_cb(cb
);
1582 static void umac_enable_set(struct bcmgenet_priv
*priv
, u32 mask
, bool enable
)
1586 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
1591 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
1593 /* UniMAC stops on a packet boundary, wait for a full-size packet
1597 usleep_range(1000, 2000);
1600 static int reset_umac(struct bcmgenet_priv
*priv
)
1602 struct device
*kdev
= &priv
->pdev
->dev
;
1603 unsigned int timeout
= 0;
1606 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1607 bcmgenet_rbuf_ctrl_set(priv
, 0);
1610 /* disable MAC while updating its registers */
1611 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
1613 /* issue soft reset, wait for it to complete */
1614 bcmgenet_umac_writel(priv
, CMD_SW_RESET
, UMAC_CMD
);
1615 while (timeout
++ < 1000) {
1616 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
1617 if (!(reg
& CMD_SW_RESET
))
1623 if (timeout
== 1000) {
1625 "timeout waiting for MAC to come out of reset\n");
1632 static void bcmgenet_intr_disable(struct bcmgenet_priv
*priv
)
1634 /* Mask all interrupts.*/
1635 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
1636 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
1637 bcmgenet_intrl2_0_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1638 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
1639 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
1640 bcmgenet_intrl2_1_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1643 static int init_umac(struct bcmgenet_priv
*priv
)
1645 struct device
*kdev
= &priv
->pdev
->dev
;
1647 u32 reg
, cpu_mask_clear
;
1650 dev_dbg(&priv
->pdev
->dev
, "bcmgenet: init_umac\n");
1652 ret
= reset_umac(priv
);
1656 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
1657 /* clear tx/rx counter */
1658 bcmgenet_umac_writel(priv
,
1659 MIB_RESET_RX
| MIB_RESET_TX
| MIB_RESET_RUNT
,
1661 bcmgenet_umac_writel(priv
, 0, UMAC_MIB_CTRL
);
1663 bcmgenet_umac_writel(priv
, ENET_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1665 /* init rx registers, enable ip header optimization */
1666 reg
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
1667 reg
|= RBUF_ALIGN_2B
;
1668 bcmgenet_rbuf_writel(priv
, reg
, RBUF_CTRL
);
1670 if (!GENET_IS_V1(priv
) && !GENET_IS_V2(priv
))
1671 bcmgenet_rbuf_writel(priv
, 1, RBUF_TBUF_SIZE_CTRL
);
1673 bcmgenet_intr_disable(priv
);
1675 cpu_mask_clear
= UMAC_IRQ_RXDMA_BDONE
| UMAC_IRQ_TXDMA_BDONE
;
1677 dev_dbg(kdev
, "%s:Enabling RXDMA_BDONE interrupt\n", __func__
);
1679 /* Monitor cable plug/unplugged event for internal PHY */
1680 if (phy_is_internal(priv
->phydev
)) {
1681 cpu_mask_clear
|= (UMAC_IRQ_LINK_DOWN
| UMAC_IRQ_LINK_UP
);
1682 } else if (priv
->ext_phy
) {
1683 cpu_mask_clear
|= (UMAC_IRQ_LINK_DOWN
| UMAC_IRQ_LINK_UP
);
1684 } else if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
1685 reg
= bcmgenet_bp_mc_get(priv
);
1686 reg
|= BIT(priv
->hw_params
->bp_in_en_shift
);
1688 /* bp_mask: back pressure mask */
1689 if (netif_is_multiqueue(priv
->dev
))
1690 reg
|= priv
->hw_params
->bp_in_mask
;
1692 reg
&= ~priv
->hw_params
->bp_in_mask
;
1693 bcmgenet_bp_mc_set(priv
, reg
);
1696 /* Enable MDIO interrupts on GENET v3+ */
1697 if (priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
)
1698 cpu_mask_clear
|= UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
;
1700 bcmgenet_intrl2_0_writel(priv
, cpu_mask_clear
, INTRL2_CPU_MASK_CLEAR
);
1702 for (index
= 0; index
< priv
->hw_params
->tx_queues
; index
++)
1703 bcmgenet_intrl2_1_writel(priv
, (1 << index
),
1704 INTRL2_CPU_MASK_CLEAR
);
1706 /* Enable rx/tx engine.*/
1707 dev_dbg(kdev
, "done init umac\n");
1712 /* Initialize a Tx ring along with corresponding hardware registers */
1713 static void bcmgenet_init_tx_ring(struct bcmgenet_priv
*priv
,
1714 unsigned int index
, unsigned int size
,
1715 unsigned int start_ptr
, unsigned int end_ptr
)
1717 struct bcmgenet_tx_ring
*ring
= &priv
->tx_rings
[index
];
1718 u32 words_per_bd
= WORDS_PER_BD(priv
);
1719 u32 flow_period_val
= 0;
1721 spin_lock_init(&ring
->lock
);
1723 netif_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_tx_poll
, 64);
1724 ring
->index
= index
;
1725 if (index
== DESC_INDEX
) {
1727 ring
->int_enable
= bcmgenet_tx_ring16_int_enable
;
1728 ring
->int_disable
= bcmgenet_tx_ring16_int_disable
;
1730 ring
->queue
= index
+ 1;
1731 ring
->int_enable
= bcmgenet_tx_ring_int_enable
;
1732 ring
->int_disable
= bcmgenet_tx_ring_int_disable
;
1734 ring
->cbs
= priv
->tx_cbs
+ start_ptr
;
1736 ring
->clean_ptr
= start_ptr
;
1738 ring
->free_bds
= size
;
1739 ring
->write_ptr
= start_ptr
;
1740 ring
->cb_ptr
= start_ptr
;
1741 ring
->end_ptr
= end_ptr
- 1;
1742 ring
->prod_index
= 0;
1744 /* Set flow period for ring != 16 */
1745 if (index
!= DESC_INDEX
)
1746 flow_period_val
= ENET_MAX_MTU_SIZE
<< 16;
1748 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_PROD_INDEX
);
1749 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_CONS_INDEX
);
1750 bcmgenet_tdma_ring_writel(priv
, index
, 1, DMA_MBUF_DONE_THRESH
);
1751 /* Disable rate control for now */
1752 bcmgenet_tdma_ring_writel(priv
, index
, flow_period_val
,
1754 bcmgenet_tdma_ring_writel(priv
, index
,
1755 ((size
<< DMA_RING_SIZE_SHIFT
) |
1756 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
1758 /* Set start and end address, read and write pointers */
1759 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1761 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1763 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1765 bcmgenet_tdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
1768 napi_enable(&ring
->napi
);
1771 static void bcmgenet_fini_tx_ring(struct bcmgenet_priv
*priv
,
1774 struct bcmgenet_tx_ring
*ring
= &priv
->tx_rings
[index
];
1776 napi_disable(&ring
->napi
);
1777 netif_napi_del(&ring
->napi
);
1780 /* Initialize a RDMA ring */
1781 static int bcmgenet_init_rx_ring(struct bcmgenet_priv
*priv
,
1782 unsigned int index
, unsigned int size
,
1783 unsigned int start_ptr
, unsigned int end_ptr
)
1785 struct bcmgenet_rx_ring
*ring
= &priv
->rx_rings
[index
];
1786 u32 words_per_bd
= WORDS_PER_BD(priv
);
1789 ring
->index
= index
;
1790 ring
->cbs
= priv
->rx_cbs
+ start_ptr
;
1793 ring
->read_ptr
= start_ptr
;
1794 ring
->cb_ptr
= start_ptr
;
1795 ring
->end_ptr
= end_ptr
- 1;
1797 ret
= bcmgenet_alloc_rx_buffers(priv
, ring
);
1801 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_PROD_INDEX
);
1802 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_CONS_INDEX
);
1803 bcmgenet_rdma_ring_writel(priv
, index
, 1, DMA_MBUF_DONE_THRESH
);
1804 bcmgenet_rdma_ring_writel(priv
, index
,
1805 ((size
<< DMA_RING_SIZE_SHIFT
) |
1806 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
1807 bcmgenet_rdma_ring_writel(priv
, index
,
1808 (DMA_FC_THRESH_LO
<<
1809 DMA_XOFF_THRESHOLD_SHIFT
) |
1810 DMA_FC_THRESH_HI
, RDMA_XON_XOFF_THRESH
);
1812 /* Set start and end address, read and write pointers */
1813 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1815 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1817 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
1819 bcmgenet_rdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
1825 /* Initialize Tx queues
1827 * Queues 0-3 are priority-based, each one has 32 descriptors,
1828 * with queue 0 being the highest priority queue.
1830 * Queue 16 is the default Tx queue with
1831 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
1833 * The transmit control block pool is then partitioned as follows:
1834 * - Tx queue 0 uses tx_cbs[0..31]
1835 * - Tx queue 1 uses tx_cbs[32..63]
1836 * - Tx queue 2 uses tx_cbs[64..95]
1837 * - Tx queue 3 uses tx_cbs[96..127]
1838 * - Tx queue 16 uses tx_cbs[128..255]
1840 static void bcmgenet_init_tx_queues(struct net_device
*dev
)
1842 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1844 u32 dma_ctrl
, ring_cfg
;
1845 u32 dma_priority
[3] = {0, 0, 0};
1847 dma_ctrl
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
1848 dma_enable
= dma_ctrl
& DMA_EN
;
1849 dma_ctrl
&= ~DMA_EN
;
1850 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
1855 /* Enable strict priority arbiter mode */
1856 bcmgenet_tdma_writel(priv
, DMA_ARBITER_SP
, DMA_ARB_CTRL
);
1858 /* Initialize Tx priority queues */
1859 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++) {
1860 bcmgenet_init_tx_ring(priv
, i
, priv
->hw_params
->tx_bds_per_q
,
1861 i
* priv
->hw_params
->tx_bds_per_q
,
1862 (i
+ 1) * priv
->hw_params
->tx_bds_per_q
);
1863 ring_cfg
|= (1 << i
);
1864 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
1865 dma_priority
[DMA_PRIO_REG_INDEX(i
)] |=
1866 ((GENET_Q0_PRIORITY
+ i
) << DMA_PRIO_REG_SHIFT(i
));
1869 /* Initialize Tx default queue 16 */
1870 bcmgenet_init_tx_ring(priv
, DESC_INDEX
, GENET_Q16_TX_BD_CNT
,
1871 priv
->hw_params
->tx_queues
*
1872 priv
->hw_params
->tx_bds_per_q
,
1874 ring_cfg
|= (1 << DESC_INDEX
);
1875 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
1876 dma_priority
[DMA_PRIO_REG_INDEX(DESC_INDEX
)] |=
1877 ((GENET_Q0_PRIORITY
+ priv
->hw_params
->tx_queues
) <<
1878 DMA_PRIO_REG_SHIFT(DESC_INDEX
));
1880 /* Set Tx queue priorities */
1881 bcmgenet_tdma_writel(priv
, dma_priority
[0], DMA_PRIORITY_0
);
1882 bcmgenet_tdma_writel(priv
, dma_priority
[1], DMA_PRIORITY_1
);
1883 bcmgenet_tdma_writel(priv
, dma_priority
[2], DMA_PRIORITY_2
);
1885 /* Enable Tx queues */
1886 bcmgenet_tdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
1891 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
1894 /* Initialize Rx queues
1896 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
1897 * used to direct traffic to these queues.
1899 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
1901 static int bcmgenet_init_rx_queues(struct net_device
*dev
)
1903 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1910 dma_ctrl
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
1911 dma_enable
= dma_ctrl
& DMA_EN
;
1912 dma_ctrl
&= ~DMA_EN
;
1913 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
1918 /* Initialize Rx priority queues */
1919 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++) {
1920 ret
= bcmgenet_init_rx_ring(priv
, i
,
1921 priv
->hw_params
->rx_bds_per_q
,
1922 i
* priv
->hw_params
->rx_bds_per_q
,
1924 priv
->hw_params
->rx_bds_per_q
);
1928 ring_cfg
|= (1 << i
);
1929 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
1932 /* Initialize Rx default queue 16 */
1933 ret
= bcmgenet_init_rx_ring(priv
, DESC_INDEX
, GENET_Q16_RX_BD_CNT
,
1934 priv
->hw_params
->rx_queues
*
1935 priv
->hw_params
->rx_bds_per_q
,
1940 ring_cfg
|= (1 << DESC_INDEX
);
1941 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
1944 bcmgenet_rdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
1946 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1949 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
1954 static int bcmgenet_dma_teardown(struct bcmgenet_priv
*priv
)
1960 /* Disable TDMA to stop add more frames in TX DMA */
1961 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
1963 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
1965 /* Check TDMA status register to confirm TDMA is disabled */
1966 while (timeout
++ < DMA_TIMEOUT_VAL
) {
1967 reg
= bcmgenet_tdma_readl(priv
, DMA_STATUS
);
1968 if (reg
& DMA_DISABLED
)
1974 if (timeout
== DMA_TIMEOUT_VAL
) {
1975 netdev_warn(priv
->dev
, "Timed out while disabling TX DMA\n");
1979 /* Wait 10ms for packet drain in both tx and rx dma */
1980 usleep_range(10000, 20000);
1983 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
1985 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
1988 /* Check RDMA status register to confirm RDMA is disabled */
1989 while (timeout
++ < DMA_TIMEOUT_VAL
) {
1990 reg
= bcmgenet_rdma_readl(priv
, DMA_STATUS
);
1991 if (reg
& DMA_DISABLED
)
1997 if (timeout
== DMA_TIMEOUT_VAL
) {
1998 netdev_warn(priv
->dev
, "Timed out while disabling RX DMA\n");
2005 static void __bcmgenet_fini_dma(struct bcmgenet_priv
*priv
)
2010 bcmgenet_dma_teardown(priv
);
2012 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
2013 if (priv
->tx_cbs
[i
].skb
!= NULL
) {
2014 dev_kfree_skb(priv
->tx_cbs
[i
].skb
);
2015 priv
->tx_cbs
[i
].skb
= NULL
;
2019 bcmgenet_free_rx_buffers(priv
);
2020 kfree(priv
->rx_cbs
);
2021 kfree(priv
->tx_cbs
);
2024 static void bcmgenet_fini_dma(struct bcmgenet_priv
*priv
)
2028 bcmgenet_fini_tx_ring(priv
, DESC_INDEX
);
2030 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
2031 bcmgenet_fini_tx_ring(priv
, i
);
2033 __bcmgenet_fini_dma(priv
);
2036 /* init_edma: Initialize DMA control register */
2037 static int bcmgenet_init_dma(struct bcmgenet_priv
*priv
)
2043 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
2046 bcmgenet_rdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
2048 /* Initialize common Rx ring structures */
2049 priv
->rx_bds
= priv
->base
+ priv
->hw_params
->rdma_offset
;
2050 priv
->num_rx_bds
= TOTAL_DESC
;
2051 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct enet_cb
),
2056 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
2057 cb
= priv
->rx_cbs
+ i
;
2058 cb
->bd_addr
= priv
->rx_bds
+ i
* DMA_DESC_SIZE
;
2061 /* Initialize Rx queues */
2062 ret
= bcmgenet_init_rx_queues(priv
->dev
);
2064 netdev_err(priv
->dev
, "failed to initialize Rx queues\n");
2065 bcmgenet_free_rx_buffers(priv
);
2066 kfree(priv
->rx_cbs
);
2071 bcmgenet_tdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
2073 /* Initialize common TX ring structures */
2074 priv
->tx_bds
= priv
->base
+ priv
->hw_params
->tdma_offset
;
2075 priv
->num_tx_bds
= TOTAL_DESC
;
2076 priv
->tx_cbs
= kcalloc(priv
->num_tx_bds
, sizeof(struct enet_cb
),
2078 if (!priv
->tx_cbs
) {
2079 __bcmgenet_fini_dma(priv
);
2083 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
2084 cb
= priv
->tx_cbs
+ i
;
2085 cb
->bd_addr
= priv
->tx_bds
+ i
* DMA_DESC_SIZE
;
2088 /* Initialize Tx queues */
2089 bcmgenet_init_tx_queues(priv
->dev
);
2094 /* NAPI polling method*/
2095 static int bcmgenet_poll(struct napi_struct
*napi
, int budget
)
2097 struct bcmgenet_priv
*priv
= container_of(napi
,
2098 struct bcmgenet_priv
, napi
);
2099 unsigned int work_done
;
2101 work_done
= bcmgenet_desc_rx(priv
, DESC_INDEX
, budget
);
2103 if (work_done
< budget
) {
2104 napi_complete(napi
);
2105 bcmgenet_intrl2_0_writel(priv
, UMAC_IRQ_RXDMA_BDONE
,
2106 INTRL2_CPU_MASK_CLEAR
);
2112 /* Interrupt bottom half */
2113 static void bcmgenet_irq_task(struct work_struct
*work
)
2115 struct bcmgenet_priv
*priv
= container_of(
2116 work
, struct bcmgenet_priv
, bcmgenet_irq_work
);
2118 netif_dbg(priv
, intr
, priv
->dev
, "%s\n", __func__
);
2120 if (priv
->irq0_stat
& UMAC_IRQ_MPD_R
) {
2121 priv
->irq0_stat
&= ~UMAC_IRQ_MPD_R
;
2122 netif_dbg(priv
, wol
, priv
->dev
,
2123 "magic packet detected, waking up\n");
2124 bcmgenet_power_up(priv
, GENET_POWER_WOL_MAGIC
);
2127 /* Link UP/DOWN event */
2128 if ((priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
) &&
2129 (priv
->irq0_stat
& (UMAC_IRQ_LINK_UP
|UMAC_IRQ_LINK_DOWN
))) {
2130 phy_mac_interrupt(priv
->phydev
,
2131 priv
->irq0_stat
& UMAC_IRQ_LINK_UP
);
2132 priv
->irq0_stat
&= ~(UMAC_IRQ_LINK_UP
|UMAC_IRQ_LINK_DOWN
);
2136 /* bcmgenet_isr1: interrupt handler for ring buffer. */
2137 static irqreturn_t
bcmgenet_isr1(int irq
, void *dev_id
)
2139 struct bcmgenet_priv
*priv
= dev_id
;
2140 struct bcmgenet_tx_ring
*ring
;
2143 /* Save irq status for bottom-half processing. */
2145 bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_STAT
) &
2146 ~bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2147 /* clear interrupts */
2148 bcmgenet_intrl2_1_writel(priv
, priv
->irq1_stat
, INTRL2_CPU_CLEAR
);
2150 netif_dbg(priv
, intr
, priv
->dev
,
2151 "%s: IRQ=0x%x\n", __func__
, priv
->irq1_stat
);
2153 /* Check the MBDONE interrupts.
2154 * packet is done, reclaim descriptors
2156 for (index
= 0; index
< priv
->hw_params
->tx_queues
; index
++) {
2157 if (!(priv
->irq1_stat
& BIT(index
)))
2160 ring
= &priv
->tx_rings
[index
];
2162 if (likely(napi_schedule_prep(&ring
->napi
))) {
2163 ring
->int_disable(priv
, ring
);
2164 __napi_schedule(&ring
->napi
);
2171 /* bcmgenet_isr0: Handle various interrupts. */
2172 static irqreturn_t
bcmgenet_isr0(int irq
, void *dev_id
)
2174 struct bcmgenet_priv
*priv
= dev_id
;
2176 /* Save irq status for bottom-half processing. */
2178 bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_STAT
) &
2179 ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2180 /* clear interrupts */
2181 bcmgenet_intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
2183 netif_dbg(priv
, intr
, priv
->dev
,
2184 "IRQ=0x%x\n", priv
->irq0_stat
);
2186 if (priv
->irq0_stat
& (UMAC_IRQ_RXDMA_BDONE
| UMAC_IRQ_RXDMA_PDONE
)) {
2187 /* We use NAPI(software interrupt throttling, if
2188 * Rx Descriptor throttling is not used.
2189 * Disable interrupt, will be enabled in the poll method.
2191 if (likely(napi_schedule_prep(&priv
->napi
))) {
2192 bcmgenet_intrl2_0_writel(priv
, UMAC_IRQ_RXDMA_BDONE
,
2193 INTRL2_CPU_MASK_SET
);
2194 __napi_schedule(&priv
->napi
);
2197 if (priv
->irq0_stat
&
2198 (UMAC_IRQ_TXDMA_BDONE
| UMAC_IRQ_TXDMA_PDONE
)) {
2199 struct bcmgenet_tx_ring
*ring
= &priv
->tx_rings
[DESC_INDEX
];
2201 if (likely(napi_schedule_prep(&ring
->napi
))) {
2202 ring
->int_disable(priv
, ring
);
2203 __napi_schedule(&ring
->napi
);
2206 if (priv
->irq0_stat
& (UMAC_IRQ_PHY_DET_R
|
2207 UMAC_IRQ_PHY_DET_F
|
2209 UMAC_IRQ_LINK_DOWN
|
2213 /* all other interested interrupts handled in bottom half */
2214 schedule_work(&priv
->bcmgenet_irq_work
);
2217 if ((priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
) &&
2218 priv
->irq0_stat
& (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
)) {
2219 priv
->irq0_stat
&= ~(UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
);
2226 static irqreturn_t
bcmgenet_wol_isr(int irq
, void *dev_id
)
2228 struct bcmgenet_priv
*priv
= dev_id
;
2230 pm_wakeup_event(&priv
->pdev
->dev
, 0);
2235 static void bcmgenet_umac_reset(struct bcmgenet_priv
*priv
)
2239 reg
= bcmgenet_rbuf_ctrl_get(priv
);
2241 bcmgenet_rbuf_ctrl_set(priv
, reg
);
2245 bcmgenet_rbuf_ctrl_set(priv
, reg
);
2249 static void bcmgenet_set_hw_addr(struct bcmgenet_priv
*priv
,
2250 unsigned char *addr
)
2252 bcmgenet_umac_writel(priv
, (addr
[0] << 24) | (addr
[1] << 16) |
2253 (addr
[2] << 8) | addr
[3], UMAC_MAC0
);
2254 bcmgenet_umac_writel(priv
, (addr
[4] << 8) | addr
[5], UMAC_MAC1
);
2257 /* Returns a reusable dma control register value */
2258 static u32
bcmgenet_dma_disable(struct bcmgenet_priv
*priv
)
2264 dma_ctrl
= 1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
) | DMA_EN
;
2265 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2267 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2269 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2271 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2273 bcmgenet_umac_writel(priv
, 1, UMAC_TX_FLUSH
);
2275 bcmgenet_umac_writel(priv
, 0, UMAC_TX_FLUSH
);
2280 static void bcmgenet_enable_dma(struct bcmgenet_priv
*priv
, u32 dma_ctrl
)
2284 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2286 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2288 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2290 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2293 static void bcmgenet_netif_start(struct net_device
*dev
)
2295 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2297 /* Start the network engine */
2298 napi_enable(&priv
->napi
);
2300 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, true);
2302 if (phy_is_internal(priv
->phydev
))
2303 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
2305 netif_tx_start_all_queues(dev
);
2307 phy_start(priv
->phydev
);
2310 static int bcmgenet_open(struct net_device
*dev
)
2312 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2313 unsigned long dma_ctrl
;
2317 netif_dbg(priv
, ifup
, dev
, "bcmgenet_open\n");
2319 /* Turn on the clock */
2320 if (!IS_ERR(priv
->clk
))
2321 clk_prepare_enable(priv
->clk
);
2323 /* take MAC out of reset */
2324 bcmgenet_umac_reset(priv
);
2326 ret
= init_umac(priv
);
2328 goto err_clk_disable
;
2330 /* disable ethernet MAC while updating its registers */
2331 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, false);
2333 /* Make sure we reflect the value of CRC_CMD_FWD */
2334 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2335 priv
->crc_fwd_en
= !!(reg
& CMD_CRC_FWD
);
2337 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
2339 if (phy_is_internal(priv
->phydev
)) {
2340 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
2341 reg
|= EXT_ENERGY_DET_MASK
;
2342 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
2345 /* Disable RX/TX DMA and flush TX queues */
2346 dma_ctrl
= bcmgenet_dma_disable(priv
);
2348 /* Reinitialize TDMA and RDMA and SW housekeeping */
2349 ret
= bcmgenet_init_dma(priv
);
2351 netdev_err(dev
, "failed to initialize DMA\n");
2355 /* Always enable ring 16 - descriptor ring */
2356 bcmgenet_enable_dma(priv
, dma_ctrl
);
2358 ret
= request_irq(priv
->irq0
, bcmgenet_isr0
, IRQF_SHARED
,
2361 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq0
);
2365 ret
= request_irq(priv
->irq1
, bcmgenet_isr1
, IRQF_SHARED
,
2368 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq1
);
2372 /* Re-configure the port multiplexer towards the PHY device */
2373 bcmgenet_mii_config(priv
->dev
, false);
2375 phy_connect_direct(dev
, priv
->phydev
, bcmgenet_mii_setup
,
2376 priv
->phy_interface
);
2378 bcmgenet_netif_start(dev
);
2383 free_irq(priv
->irq0
, dev
);
2385 bcmgenet_fini_dma(priv
);
2387 if (!IS_ERR(priv
->clk
))
2388 clk_disable_unprepare(priv
->clk
);
2392 static void bcmgenet_netif_stop(struct net_device
*dev
)
2394 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2396 netif_tx_stop_all_queues(dev
);
2397 napi_disable(&priv
->napi
);
2398 phy_stop(priv
->phydev
);
2400 bcmgenet_intr_disable(priv
);
2402 /* Wait for pending work items to complete. Since interrupts are
2403 * disabled no new work will be scheduled.
2405 cancel_work_sync(&priv
->bcmgenet_irq_work
);
2407 priv
->old_link
= -1;
2408 priv
->old_speed
= -1;
2409 priv
->old_duplex
= -1;
2410 priv
->old_pause
= -1;
2413 static int bcmgenet_close(struct net_device
*dev
)
2415 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2418 netif_dbg(priv
, ifdown
, dev
, "bcmgenet_close\n");
2420 bcmgenet_netif_stop(dev
);
2422 /* Really kill the PHY state machine and disconnect from it */
2423 phy_disconnect(priv
->phydev
);
2425 /* Disable MAC receive */
2426 umac_enable_set(priv
, CMD_RX_EN
, false);
2428 ret
= bcmgenet_dma_teardown(priv
);
2432 /* Disable MAC transmit. TX DMA disabled have to done before this */
2433 umac_enable_set(priv
, CMD_TX_EN
, false);
2436 bcmgenet_tx_reclaim_all(dev
);
2437 bcmgenet_fini_dma(priv
);
2439 free_irq(priv
->irq0
, priv
);
2440 free_irq(priv
->irq1
, priv
);
2442 if (phy_is_internal(priv
->phydev
))
2443 bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
2445 if (!IS_ERR(priv
->clk
))
2446 clk_disable_unprepare(priv
->clk
);
2451 static void bcmgenet_timeout(struct net_device
*dev
)
2453 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2455 netif_dbg(priv
, tx_err
, dev
, "bcmgenet_timeout\n");
2457 dev
->trans_start
= jiffies
;
2459 dev
->stats
.tx_errors
++;
2461 netif_tx_wake_all_queues(dev
);
2464 #define MAX_MC_COUNT 16
2466 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv
*priv
,
2467 unsigned char *addr
,
2473 bcmgenet_umac_writel(priv
, addr
[0] << 8 | addr
[1],
2474 UMAC_MDF_ADDR
+ (*i
* 4));
2475 bcmgenet_umac_writel(priv
, addr
[2] << 24 | addr
[3] << 16 |
2476 addr
[4] << 8 | addr
[5],
2477 UMAC_MDF_ADDR
+ ((*i
+ 1) * 4));
2478 reg
= bcmgenet_umac_readl(priv
, UMAC_MDF_CTRL
);
2479 reg
|= (1 << (MAX_MC_COUNT
- *mc
));
2480 bcmgenet_umac_writel(priv
, reg
, UMAC_MDF_CTRL
);
2485 static void bcmgenet_set_rx_mode(struct net_device
*dev
)
2487 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2488 struct netdev_hw_addr
*ha
;
2492 netif_dbg(priv
, hw
, dev
, "%s: %08X\n", __func__
, dev
->flags
);
2494 /* Promiscuous mode */
2495 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2496 if (dev
->flags
& IFF_PROMISC
) {
2498 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2499 bcmgenet_umac_writel(priv
, 0, UMAC_MDF_CTRL
);
2502 reg
&= ~CMD_PROMISC
;
2503 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2506 /* UniMac doesn't support ALLMULTI */
2507 if (dev
->flags
& IFF_ALLMULTI
) {
2508 netdev_warn(dev
, "ALLMULTI is not supported\n");
2512 /* update MDF filter */
2516 bcmgenet_set_mdf_addr(priv
, dev
->broadcast
, &i
, &mc
);
2517 /* my own address.*/
2518 bcmgenet_set_mdf_addr(priv
, dev
->dev_addr
, &i
, &mc
);
2520 if (netdev_uc_count(dev
) > (MAX_MC_COUNT
- mc
))
2523 if (!netdev_uc_empty(dev
))
2524 netdev_for_each_uc_addr(ha
, dev
)
2525 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
2527 if (netdev_mc_empty(dev
) || netdev_mc_count(dev
) >= (MAX_MC_COUNT
- mc
))
2530 netdev_for_each_mc_addr(ha
, dev
)
2531 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
2534 /* Set the hardware MAC address. */
2535 static int bcmgenet_set_mac_addr(struct net_device
*dev
, void *p
)
2537 struct sockaddr
*addr
= p
;
2539 /* Setting the MAC address at the hardware level is not possible
2540 * without disabling the UniMAC RX/TX enable bits.
2542 if (netif_running(dev
))
2545 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
2550 static const struct net_device_ops bcmgenet_netdev_ops
= {
2551 .ndo_open
= bcmgenet_open
,
2552 .ndo_stop
= bcmgenet_close
,
2553 .ndo_start_xmit
= bcmgenet_xmit
,
2554 .ndo_tx_timeout
= bcmgenet_timeout
,
2555 .ndo_set_rx_mode
= bcmgenet_set_rx_mode
,
2556 .ndo_set_mac_address
= bcmgenet_set_mac_addr
,
2557 .ndo_do_ioctl
= bcmgenet_ioctl
,
2558 .ndo_set_features
= bcmgenet_set_features
,
2561 /* Array of GENET hardware parameters/characteristics */
2562 static struct bcmgenet_hw_params bcmgenet_hw_params
[] = {
2568 .bp_in_en_shift
= 16,
2569 .bp_in_mask
= 0xffff,
2570 .hfb_filter_cnt
= 16,
2572 .hfb_offset
= 0x1000,
2573 .rdma_offset
= 0x2000,
2574 .tdma_offset
= 0x3000,
2582 .bp_in_en_shift
= 16,
2583 .bp_in_mask
= 0xffff,
2584 .hfb_filter_cnt
= 16,
2586 .tbuf_offset
= 0x0600,
2587 .hfb_offset
= 0x1000,
2588 .hfb_reg_offset
= 0x2000,
2589 .rdma_offset
= 0x3000,
2590 .tdma_offset
= 0x4000,
2592 .flags
= GENET_HAS_EXT
,
2599 .bp_in_en_shift
= 17,
2600 .bp_in_mask
= 0x1ffff,
2601 .hfb_filter_cnt
= 48,
2603 .tbuf_offset
= 0x0600,
2604 .hfb_offset
= 0x8000,
2605 .hfb_reg_offset
= 0xfc00,
2606 .rdma_offset
= 0x10000,
2607 .tdma_offset
= 0x11000,
2609 .flags
= GENET_HAS_EXT
| GENET_HAS_MDIO_INTR
,
2616 .bp_in_en_shift
= 17,
2617 .bp_in_mask
= 0x1ffff,
2618 .hfb_filter_cnt
= 48,
2620 .tbuf_offset
= 0x0600,
2621 .hfb_offset
= 0x8000,
2622 .hfb_reg_offset
= 0xfc00,
2623 .rdma_offset
= 0x2000,
2624 .tdma_offset
= 0x4000,
2626 .flags
= GENET_HAS_40BITS
| GENET_HAS_EXT
| GENET_HAS_MDIO_INTR
,
2630 /* Infer hardware parameters from the detected GENET version */
2631 static void bcmgenet_set_hw_params(struct bcmgenet_priv
*priv
)
2633 struct bcmgenet_hw_params
*params
;
2638 if (GENET_IS_V4(priv
)) {
2639 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
2640 genet_dma_ring_regs
= genet_dma_ring_regs_v4
;
2641 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
2642 priv
->version
= GENET_V4
;
2643 } else if (GENET_IS_V3(priv
)) {
2644 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
2645 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
2646 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
2647 priv
->version
= GENET_V3
;
2648 } else if (GENET_IS_V2(priv
)) {
2649 bcmgenet_dma_regs
= bcmgenet_dma_regs_v2
;
2650 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
2651 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
2652 priv
->version
= GENET_V2
;
2653 } else if (GENET_IS_V1(priv
)) {
2654 bcmgenet_dma_regs
= bcmgenet_dma_regs_v1
;
2655 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
2656 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
2657 priv
->version
= GENET_V1
;
2660 /* enum genet_version starts at 1 */
2661 priv
->hw_params
= &bcmgenet_hw_params
[priv
->version
];
2662 params
= priv
->hw_params
;
2664 /* Read GENET HW version */
2665 reg
= bcmgenet_sys_readl(priv
, SYS_REV_CTRL
);
2666 major
= (reg
>> 24 & 0x0f);
2669 else if (major
== 0)
2671 if (major
!= priv
->version
) {
2672 dev_err(&priv
->pdev
->dev
,
2673 "GENET version mismatch, got: %d, configured for: %d\n",
2674 major
, priv
->version
);
2677 /* Print the GENET core version */
2678 dev_info(&priv
->pdev
->dev
, "GENET " GENET_VER_FMT
,
2679 major
, (reg
>> 16) & 0x0f, reg
& 0xffff);
2681 /* Store the integrated PHY revision for the MDIO probing function
2682 * to pass this information to the PHY driver. The PHY driver expects
2683 * to find the PHY major revision in bits 15:8 while the GENET register
2684 * stores that information in bits 7:0, account for that.
2686 * On newer chips, starting with PHY revision G0, a new scheme is
2687 * deployed similar to the Starfighter 2 switch with GPHY major
2688 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
2689 * is reserved as well as special value 0x01ff, we have a small
2690 * heuristic to check for the new GPHY revision and re-arrange things
2691 * so the GPHY driver is happy.
2693 gphy_rev
= reg
& 0xffff;
2695 /* This is the good old scheme, just GPHY major, no minor nor patch */
2696 if ((gphy_rev
& 0xf0) != 0)
2697 priv
->gphy_rev
= gphy_rev
<< 8;
2699 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
2700 else if ((gphy_rev
& 0xff00) != 0)
2701 priv
->gphy_rev
= gphy_rev
;
2703 /* This is reserved so should require special treatment */
2704 else if (gphy_rev
== 0 || gphy_rev
== 0x01ff) {
2705 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev
);
2709 #ifdef CONFIG_PHYS_ADDR_T_64BIT
2710 if (!(params
->flags
& GENET_HAS_40BITS
))
2711 pr_warn("GENET does not support 40-bits PA\n");
2714 pr_debug("Configuration for version: %d\n"
2715 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
2716 "BP << en: %2d, BP msk: 0x%05x\n"
2717 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2718 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2719 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2722 params
->tx_queues
, params
->tx_bds_per_q
,
2723 params
->rx_queues
, params
->rx_bds_per_q
,
2724 params
->bp_in_en_shift
, params
->bp_in_mask
,
2725 params
->hfb_filter_cnt
, params
->qtag_mask
,
2726 params
->tbuf_offset
, params
->hfb_offset
,
2727 params
->hfb_reg_offset
,
2728 params
->rdma_offset
, params
->tdma_offset
,
2729 params
->words_per_bd
);
2732 static const struct of_device_id bcmgenet_match
[] = {
2733 { .compatible
= "brcm,genet-v1", .data
= (void *)GENET_V1
},
2734 { .compatible
= "brcm,genet-v2", .data
= (void *)GENET_V2
},
2735 { .compatible
= "brcm,genet-v3", .data
= (void *)GENET_V3
},
2736 { .compatible
= "brcm,genet-v4", .data
= (void *)GENET_V4
},
2740 static int bcmgenet_probe(struct platform_device
*pdev
)
2742 struct bcmgenet_platform_data
*pd
= pdev
->dev
.platform_data
;
2743 struct device_node
*dn
= pdev
->dev
.of_node
;
2744 const struct of_device_id
*of_id
= NULL
;
2745 struct bcmgenet_priv
*priv
;
2746 struct net_device
*dev
;
2747 const void *macaddr
;
2751 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
2752 dev
= alloc_etherdev_mqs(sizeof(*priv
), GENET_MAX_MQ_CNT
+ 1,
2753 GENET_MAX_MQ_CNT
+ 1);
2755 dev_err(&pdev
->dev
, "can't allocate net device\n");
2760 of_id
= of_match_node(bcmgenet_match
, dn
);
2765 priv
= netdev_priv(dev
);
2766 priv
->irq0
= platform_get_irq(pdev
, 0);
2767 priv
->irq1
= platform_get_irq(pdev
, 1);
2768 priv
->wol_irq
= platform_get_irq(pdev
, 2);
2769 if (!priv
->irq0
|| !priv
->irq1
) {
2770 dev_err(&pdev
->dev
, "can't find IRQs\n");
2776 macaddr
= of_get_mac_address(dn
);
2778 dev_err(&pdev
->dev
, "can't find MAC address\n");
2783 macaddr
= pd
->mac_address
;
2786 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2787 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
2788 if (IS_ERR(priv
->base
)) {
2789 err
= PTR_ERR(priv
->base
);
2793 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2794 dev_set_drvdata(&pdev
->dev
, dev
);
2795 ether_addr_copy(dev
->dev_addr
, macaddr
);
2796 dev
->watchdog_timeo
= 2 * HZ
;
2797 dev
->ethtool_ops
= &bcmgenet_ethtool_ops
;
2798 dev
->netdev_ops
= &bcmgenet_netdev_ops
;
2799 netif_napi_add(dev
, &priv
->napi
, bcmgenet_poll
, 64);
2801 priv
->msg_enable
= netif_msg_init(-1, GENET_MSG_DEFAULT
);
2803 /* Set hardware features */
2804 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
|
2805 NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
2807 /* Request the WOL interrupt and advertise suspend if available */
2808 priv
->wol_irq_disabled
= true;
2809 err
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
, bcmgenet_wol_isr
, 0,
2812 device_set_wakeup_capable(&pdev
->dev
, 1);
2814 /* Set the needed headroom to account for any possible
2815 * features enabling/disabling at runtime
2817 dev
->needed_headroom
+= 64;
2819 netdev_boot_setup_check(dev
);
2824 priv
->version
= (enum bcmgenet_version
)of_id
->data
;
2826 priv
->version
= pd
->genet_version
;
2828 priv
->clk
= devm_clk_get(&priv
->pdev
->dev
, "enet");
2829 if (IS_ERR(priv
->clk
))
2830 dev_warn(&priv
->pdev
->dev
, "failed to get enet clock\n");
2832 if (!IS_ERR(priv
->clk
))
2833 clk_prepare_enable(priv
->clk
);
2835 bcmgenet_set_hw_params(priv
);
2837 /* Mii wait queue */
2838 init_waitqueue_head(&priv
->wq
);
2839 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2840 priv
->rx_buf_len
= RX_BUF_LENGTH
;
2841 INIT_WORK(&priv
->bcmgenet_irq_work
, bcmgenet_irq_task
);
2843 priv
->clk_wol
= devm_clk_get(&priv
->pdev
->dev
, "enet-wol");
2844 if (IS_ERR(priv
->clk_wol
))
2845 dev_warn(&priv
->pdev
->dev
, "failed to get enet-wol clock\n");
2847 priv
->clk_eee
= devm_clk_get(&priv
->pdev
->dev
, "enet-eee");
2848 if (IS_ERR(priv
->clk_eee
)) {
2849 dev_warn(&priv
->pdev
->dev
, "failed to get enet-eee clock\n");
2850 priv
->clk_eee
= NULL
;
2853 err
= reset_umac(priv
);
2855 goto err_clk_disable
;
2857 err
= bcmgenet_mii_init(dev
);
2859 goto err_clk_disable
;
2861 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2862 * just the ring 16 descriptor based TX
2864 netif_set_real_num_tx_queues(priv
->dev
, priv
->hw_params
->tx_queues
+ 1);
2865 netif_set_real_num_rx_queues(priv
->dev
, priv
->hw_params
->rx_queues
+ 1);
2867 /* libphy will determine the link state */
2868 netif_carrier_off(dev
);
2870 /* Turn off the main clock, WOL clock is handled separately */
2871 if (!IS_ERR(priv
->clk
))
2872 clk_disable_unprepare(priv
->clk
);
2874 err
= register_netdev(dev
);
2881 if (!IS_ERR(priv
->clk
))
2882 clk_disable_unprepare(priv
->clk
);
2888 static int bcmgenet_remove(struct platform_device
*pdev
)
2890 struct bcmgenet_priv
*priv
= dev_to_priv(&pdev
->dev
);
2892 dev_set_drvdata(&pdev
->dev
, NULL
);
2893 unregister_netdev(priv
->dev
);
2894 bcmgenet_mii_exit(priv
->dev
);
2895 free_netdev(priv
->dev
);
2900 #ifdef CONFIG_PM_SLEEP
2901 static int bcmgenet_suspend(struct device
*d
)
2903 struct net_device
*dev
= dev_get_drvdata(d
);
2904 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2907 if (!netif_running(dev
))
2910 bcmgenet_netif_stop(dev
);
2912 phy_suspend(priv
->phydev
);
2914 netif_device_detach(dev
);
2916 /* Disable MAC receive */
2917 umac_enable_set(priv
, CMD_RX_EN
, false);
2919 ret
= bcmgenet_dma_teardown(priv
);
2923 /* Disable MAC transmit. TX DMA disabled have to done before this */
2924 umac_enable_set(priv
, CMD_TX_EN
, false);
2927 bcmgenet_tx_reclaim_all(dev
);
2928 bcmgenet_fini_dma(priv
);
2930 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
2931 if (device_may_wakeup(d
) && priv
->wolopts
) {
2932 bcmgenet_power_down(priv
, GENET_POWER_WOL_MAGIC
);
2933 clk_prepare_enable(priv
->clk_wol
);
2936 /* Turn off the clocks */
2937 clk_disable_unprepare(priv
->clk
);
2942 static int bcmgenet_resume(struct device
*d
)
2944 struct net_device
*dev
= dev_get_drvdata(d
);
2945 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2946 unsigned long dma_ctrl
;
2950 if (!netif_running(dev
))
2953 /* Turn on the clock */
2954 ret
= clk_prepare_enable(priv
->clk
);
2958 bcmgenet_umac_reset(priv
);
2960 ret
= init_umac(priv
);
2962 goto out_clk_disable
;
2964 /* From WOL-enabled suspend, switch to regular clock */
2966 clk_disable_unprepare(priv
->clk_wol
);
2968 phy_init_hw(priv
->phydev
);
2969 /* Speed settings must be restored */
2970 bcmgenet_mii_config(priv
->dev
, false);
2972 /* disable ethernet MAC while updating its registers */
2973 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, false);
2975 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
2977 if (phy_is_internal(priv
->phydev
)) {
2978 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
2979 reg
|= EXT_ENERGY_DET_MASK
;
2980 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
2984 bcmgenet_power_up(priv
, GENET_POWER_WOL_MAGIC
);
2986 /* Disable RX/TX DMA and flush TX queues */
2987 dma_ctrl
= bcmgenet_dma_disable(priv
);
2989 /* Reinitialize TDMA and RDMA and SW housekeeping */
2990 ret
= bcmgenet_init_dma(priv
);
2992 netdev_err(dev
, "failed to initialize DMA\n");
2993 goto out_clk_disable
;
2996 /* Always enable ring 16 - descriptor ring */
2997 bcmgenet_enable_dma(priv
, dma_ctrl
);
2999 netif_device_attach(dev
);
3001 phy_resume(priv
->phydev
);
3003 if (priv
->eee
.eee_enabled
)
3004 bcmgenet_eee_enable_set(dev
, true);
3006 bcmgenet_netif_start(dev
);
3011 clk_disable_unprepare(priv
->clk
);
3014 #endif /* CONFIG_PM_SLEEP */
3016 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops
, bcmgenet_suspend
, bcmgenet_resume
);
3018 static struct platform_driver bcmgenet_driver
= {
3019 .probe
= bcmgenet_probe
,
3020 .remove
= bcmgenet_remove
,
3023 .of_match_table
= bcmgenet_match
,
3024 .pm
= &bcmgenet_pm_ops
,
3027 module_platform_driver(bcmgenet_driver
);
3029 MODULE_AUTHOR("Broadcom Corporation");
3030 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3031 MODULE_ALIAS("platform:bcmgenet");
3032 MODULE_LICENSE("GPL");