2 * Cadence MACB/GEM Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/circ_buf.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
21 #include <linux/gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/interrupt.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_data/macb.h>
28 #include <linux/platform_device.h>
29 #include <linux/phy.h>
31 #include <linux/of_device.h>
32 #include <linux/of_gpio.h>
33 #include <linux/of_mdio.h>
34 #include <linux/of_net.h>
38 #define MACB_RX_BUFFER_SIZE 128
39 #define RX_BUFFER_MULTIPLE 64 /* bytes */
40 #define RX_RING_SIZE 512 /* must be power of 2 */
41 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
43 #define TX_RING_SIZE 128 /* must be power of 2 */
44 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
46 /* level of occupied TX descriptors under which we wake up TX process */
47 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
49 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
51 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
54 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
56 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
57 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
59 #define GEM_MTU_MIN_SIZE 68
61 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
62 #define MACB_WOL_ENABLED (0x1 << 1)
64 /* Graceful stop timeouts in us. We should allow up to
65 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
67 #define MACB_HALT_TIMEOUT 1230
69 /* Ring buffer accessors */
70 static unsigned int macb_tx_ring_wrap(unsigned int index
)
72 return index
& (TX_RING_SIZE
- 1);
75 static struct macb_dma_desc
*macb_tx_desc(struct macb_queue
*queue
,
78 return &queue
->tx_ring
[macb_tx_ring_wrap(index
)];
81 static struct macb_tx_skb
*macb_tx_skb(struct macb_queue
*queue
,
84 return &queue
->tx_skb
[macb_tx_ring_wrap(index
)];
87 static dma_addr_t
macb_tx_dma(struct macb_queue
*queue
, unsigned int index
)
91 offset
= macb_tx_ring_wrap(index
) * sizeof(struct macb_dma_desc
);
93 return queue
->tx_ring_dma
+ offset
;
96 static unsigned int macb_rx_ring_wrap(unsigned int index
)
98 return index
& (RX_RING_SIZE
- 1);
101 static struct macb_dma_desc
*macb_rx_desc(struct macb
*bp
, unsigned int index
)
103 return &bp
->rx_ring
[macb_rx_ring_wrap(index
)];
106 static void *macb_rx_buffer(struct macb
*bp
, unsigned int index
)
108 return bp
->rx_buffers
+ bp
->rx_buffer_size
* macb_rx_ring_wrap(index
);
112 static u32
hw_readl_native(struct macb
*bp
, int offset
)
114 return __raw_readl(bp
->regs
+ offset
);
117 static void hw_writel_native(struct macb
*bp
, int offset
, u32 value
)
119 __raw_writel(value
, bp
->regs
+ offset
);
122 static u32
hw_readl(struct macb
*bp
, int offset
)
124 return readl_relaxed(bp
->regs
+ offset
);
127 static void hw_writel(struct macb
*bp
, int offset
, u32 value
)
129 writel_relaxed(value
, bp
->regs
+ offset
);
132 /* Find the CPU endianness by using the loopback bit of NCR register. When the
133 * CPU is in big endian we need to program swapped mode for management
136 static bool hw_is_native_io(void __iomem
*addr
)
138 u32 value
= MACB_BIT(LLB
);
140 __raw_writel(value
, addr
+ MACB_NCR
);
141 value
= __raw_readl(addr
+ MACB_NCR
);
143 /* Write 0 back to disable everything */
144 __raw_writel(0, addr
+ MACB_NCR
);
146 return value
== MACB_BIT(LLB
);
149 static bool hw_is_gem(void __iomem
*addr
, bool native_io
)
154 id
= __raw_readl(addr
+ MACB_MID
);
156 id
= readl_relaxed(addr
+ MACB_MID
);
158 return MACB_BFEXT(IDNUM
, id
) >= 0x2;
161 static void macb_set_hwaddr(struct macb
*bp
)
166 bottom
= cpu_to_le32(*((u32
*)bp
->dev
->dev_addr
));
167 macb_or_gem_writel(bp
, SA1B
, bottom
);
168 top
= cpu_to_le16(*((u16
*)(bp
->dev
->dev_addr
+ 4)));
169 macb_or_gem_writel(bp
, SA1T
, top
);
171 /* Clear unused address register sets */
172 macb_or_gem_writel(bp
, SA2B
, 0);
173 macb_or_gem_writel(bp
, SA2T
, 0);
174 macb_or_gem_writel(bp
, SA3B
, 0);
175 macb_or_gem_writel(bp
, SA3T
, 0);
176 macb_or_gem_writel(bp
, SA4B
, 0);
177 macb_or_gem_writel(bp
, SA4T
, 0);
180 static void macb_get_hwaddr(struct macb
*bp
)
182 struct macb_platform_data
*pdata
;
188 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
190 /* Check all 4 address register for valid address */
191 for (i
= 0; i
< 4; i
++) {
192 bottom
= macb_or_gem_readl(bp
, SA1B
+ i
* 8);
193 top
= macb_or_gem_readl(bp
, SA1T
+ i
* 8);
195 if (pdata
&& pdata
->rev_eth_addr
) {
196 addr
[5] = bottom
& 0xff;
197 addr
[4] = (bottom
>> 8) & 0xff;
198 addr
[3] = (bottom
>> 16) & 0xff;
199 addr
[2] = (bottom
>> 24) & 0xff;
200 addr
[1] = top
& 0xff;
201 addr
[0] = (top
& 0xff00) >> 8;
203 addr
[0] = bottom
& 0xff;
204 addr
[1] = (bottom
>> 8) & 0xff;
205 addr
[2] = (bottom
>> 16) & 0xff;
206 addr
[3] = (bottom
>> 24) & 0xff;
207 addr
[4] = top
& 0xff;
208 addr
[5] = (top
>> 8) & 0xff;
211 if (is_valid_ether_addr(addr
)) {
212 memcpy(bp
->dev
->dev_addr
, addr
, sizeof(addr
));
217 dev_info(&bp
->pdev
->dev
, "invalid hw address, using random\n");
218 eth_hw_addr_random(bp
->dev
);
221 static int macb_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
223 struct macb
*bp
= bus
->priv
;
226 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
227 | MACB_BF(RW
, MACB_MAN_READ
)
228 | MACB_BF(PHYA
, mii_id
)
229 | MACB_BF(REGA
, regnum
)
230 | MACB_BF(CODE
, MACB_MAN_CODE
)));
232 /* wait for end of transfer */
233 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
236 value
= MACB_BFEXT(DATA
, macb_readl(bp
, MAN
));
241 static int macb_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
244 struct macb
*bp
= bus
->priv
;
246 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
247 | MACB_BF(RW
, MACB_MAN_WRITE
)
248 | MACB_BF(PHYA
, mii_id
)
249 | MACB_BF(REGA
, regnum
)
250 | MACB_BF(CODE
, MACB_MAN_CODE
)
251 | MACB_BF(DATA
, value
)));
253 /* wait for end of transfer */
254 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
261 * macb_set_tx_clk() - Set a clock to a new frequency
262 * @clk Pointer to the clock to change
263 * @rate New frequency in Hz
264 * @dev Pointer to the struct net_device
266 static void macb_set_tx_clk(struct clk
*clk
, int speed
, struct net_device
*dev
)
268 long ferr
, rate
, rate_rounded
;
287 rate_rounded
= clk_round_rate(clk
, rate
);
288 if (rate_rounded
< 0)
291 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
294 ferr
= abs(rate_rounded
- rate
);
295 ferr
= DIV_ROUND_UP(ferr
, rate
/ 100000);
297 netdev_warn(dev
, "unable to generate target frequency: %ld Hz\n",
300 if (clk_set_rate(clk
, rate_rounded
))
301 netdev_err(dev
, "adjusting tx_clk failed.\n");
304 static void macb_handle_link_change(struct net_device
*dev
)
306 struct macb
*bp
= netdev_priv(dev
);
307 struct phy_device
*phydev
= bp
->phy_dev
;
309 int status_change
= 0;
311 spin_lock_irqsave(&bp
->lock
, flags
);
314 if ((bp
->speed
!= phydev
->speed
) ||
315 (bp
->duplex
!= phydev
->duplex
)) {
318 reg
= macb_readl(bp
, NCFGR
);
319 reg
&= ~(MACB_BIT(SPD
) | MACB_BIT(FD
));
321 reg
&= ~GEM_BIT(GBE
);
325 if (phydev
->speed
== SPEED_100
)
326 reg
|= MACB_BIT(SPD
);
327 if (phydev
->speed
== SPEED_1000
&&
328 bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
331 macb_or_gem_writel(bp
, NCFGR
, reg
);
333 bp
->speed
= phydev
->speed
;
334 bp
->duplex
= phydev
->duplex
;
339 if (phydev
->link
!= bp
->link
) {
344 bp
->link
= phydev
->link
;
349 spin_unlock_irqrestore(&bp
->lock
, flags
);
353 /* Update the TX clock rate if and only if the link is
354 * up and there has been a link change.
356 macb_set_tx_clk(bp
->tx_clk
, phydev
->speed
, dev
);
358 netif_carrier_on(dev
);
359 netdev_info(dev
, "link up (%d/%s)\n",
361 phydev
->duplex
== DUPLEX_FULL
?
364 netif_carrier_off(dev
);
365 netdev_info(dev
, "link down\n");
370 /* based on au1000_eth. c*/
371 static int macb_mii_probe(struct net_device
*dev
)
373 struct macb
*bp
= netdev_priv(dev
);
374 struct macb_platform_data
*pdata
;
375 struct phy_device
*phydev
;
379 phydev
= phy_find_first(bp
->mii_bus
);
381 netdev_err(dev
, "no PHY found\n");
385 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
386 if (pdata
&& gpio_is_valid(pdata
->phy_irq_pin
)) {
387 ret
= devm_gpio_request(&bp
->pdev
->dev
, pdata
->phy_irq_pin
,
390 phy_irq
= gpio_to_irq(pdata
->phy_irq_pin
);
391 phydev
->irq
= (phy_irq
< 0) ? PHY_POLL
: phy_irq
;
395 /* attach the mac to the phy */
396 ret
= phy_connect_direct(dev
, phydev
, &macb_handle_link_change
,
399 netdev_err(dev
, "Could not attach to PHY\n");
403 /* mask with MAC supported features */
404 if (macb_is_gem(bp
) && bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
405 phydev
->supported
&= PHY_GBIT_FEATURES
;
407 phydev
->supported
&= PHY_BASIC_FEATURES
;
409 if (bp
->caps
& MACB_CAPS_NO_GIGABIT_HALF
)
410 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
412 phydev
->advertising
= phydev
->supported
;
417 bp
->phy_dev
= phydev
;
422 static int macb_mii_init(struct macb
*bp
)
424 struct macb_platform_data
*pdata
;
425 struct device_node
*np
;
428 /* Enable management port */
429 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
431 bp
->mii_bus
= mdiobus_alloc();
437 bp
->mii_bus
->name
= "MACB_mii_bus";
438 bp
->mii_bus
->read
= &macb_mdio_read
;
439 bp
->mii_bus
->write
= &macb_mdio_write
;
440 snprintf(bp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
441 bp
->pdev
->name
, bp
->pdev
->id
);
442 bp
->mii_bus
->priv
= bp
;
443 bp
->mii_bus
->parent
= &bp
->pdev
->dev
;
444 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
446 dev_set_drvdata(&bp
->dev
->dev
, bp
->mii_bus
);
448 np
= bp
->pdev
->dev
.of_node
;
450 /* try dt phy registration */
451 err
= of_mdiobus_register(bp
->mii_bus
, np
);
453 /* fallback to standard phy registration if no phy were
454 * found during dt phy registration
456 if (!err
&& !phy_find_first(bp
->mii_bus
)) {
457 for (i
= 0; i
< PHY_MAX_ADDR
; i
++) {
458 struct phy_device
*phydev
;
460 phydev
= mdiobus_scan(bp
->mii_bus
, i
);
461 if (IS_ERR(phydev
) &&
462 PTR_ERR(phydev
) != -ENODEV
) {
463 err
= PTR_ERR(phydev
);
469 goto err_out_unregister_bus
;
473 bp
->mii_bus
->phy_mask
= pdata
->phy_mask
;
475 err
= mdiobus_register(bp
->mii_bus
);
479 goto err_out_free_mdiobus
;
481 err
= macb_mii_probe(bp
->dev
);
483 goto err_out_unregister_bus
;
487 err_out_unregister_bus
:
488 mdiobus_unregister(bp
->mii_bus
);
489 err_out_free_mdiobus
:
490 mdiobus_free(bp
->mii_bus
);
495 static void macb_update_stats(struct macb
*bp
)
497 u32
*p
= &bp
->hw_stats
.macb
.rx_pause_frames
;
498 u32
*end
= &bp
->hw_stats
.macb
.tx_pause_frames
+ 1;
499 int offset
= MACB_PFR
;
501 WARN_ON((unsigned long)(end
- p
- 1) != (MACB_TPF
- MACB_PFR
) / 4);
503 for (; p
< end
; p
++, offset
+= 4)
504 *p
+= bp
->macb_reg_readl(bp
, offset
);
507 static int macb_halt_tx(struct macb
*bp
)
509 unsigned long halt_time
, timeout
;
512 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(THALT
));
514 timeout
= jiffies
+ usecs_to_jiffies(MACB_HALT_TIMEOUT
);
517 status
= macb_readl(bp
, TSR
);
518 if (!(status
& MACB_BIT(TGO
)))
521 usleep_range(10, 250);
522 } while (time_before(halt_time
, timeout
));
527 static void macb_tx_unmap(struct macb
*bp
, struct macb_tx_skb
*tx_skb
)
529 if (tx_skb
->mapping
) {
530 if (tx_skb
->mapped_as_page
)
531 dma_unmap_page(&bp
->pdev
->dev
, tx_skb
->mapping
,
532 tx_skb
->size
, DMA_TO_DEVICE
);
534 dma_unmap_single(&bp
->pdev
->dev
, tx_skb
->mapping
,
535 tx_skb
->size
, DMA_TO_DEVICE
);
540 dev_kfree_skb_any(tx_skb
->skb
);
545 static void macb_tx_error_task(struct work_struct
*work
)
547 struct macb_queue
*queue
= container_of(work
, struct macb_queue
,
549 struct macb
*bp
= queue
->bp
;
550 struct macb_tx_skb
*tx_skb
;
551 struct macb_dma_desc
*desc
;
556 netdev_vdbg(bp
->dev
, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
557 (unsigned int)(queue
- bp
->queues
),
558 queue
->tx_tail
, queue
->tx_head
);
560 /* Prevent the queue IRQ handlers from running: each of them may call
561 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
562 * As explained below, we have to halt the transmission before updating
563 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
564 * network engine about the macb/gem being halted.
566 spin_lock_irqsave(&bp
->lock
, flags
);
568 /* Make sure nobody is trying to queue up new packets */
569 netif_tx_stop_all_queues(bp
->dev
);
571 /* Stop transmission now
572 * (in case we have just queued new packets)
573 * macb/gem must be halted to write TBQP register
575 if (macb_halt_tx(bp
))
576 /* Just complain for now, reinitializing TX path can be good */
577 netdev_err(bp
->dev
, "BUG: halt tx timed out\n");
579 /* Treat frames in TX queue including the ones that caused the error.
580 * Free transmit buffers in upper layer.
582 for (tail
= queue
->tx_tail
; tail
!= queue
->tx_head
; tail
++) {
585 desc
= macb_tx_desc(queue
, tail
);
587 tx_skb
= macb_tx_skb(queue
, tail
);
590 if (ctrl
& MACB_BIT(TX_USED
)) {
591 /* skb is set for the last buffer of the frame */
593 macb_tx_unmap(bp
, tx_skb
);
595 tx_skb
= macb_tx_skb(queue
, tail
);
599 /* ctrl still refers to the first buffer descriptor
600 * since it's the only one written back by the hardware
602 if (!(ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))) {
603 netdev_vdbg(bp
->dev
, "txerr skb %u (data %p) TX complete\n",
604 macb_tx_ring_wrap(tail
), skb
->data
);
605 bp
->stats
.tx_packets
++;
606 bp
->stats
.tx_bytes
+= skb
->len
;
609 /* "Buffers exhausted mid-frame" errors may only happen
610 * if the driver is buggy, so complain loudly about
611 * those. Statistics are updated by hardware.
613 if (ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))
615 "BUG: TX buffers exhausted mid-frame\n");
617 desc
->ctrl
= ctrl
| MACB_BIT(TX_USED
);
620 macb_tx_unmap(bp
, tx_skb
);
623 /* Set end of TX queue */
624 desc
= macb_tx_desc(queue
, 0);
626 desc
->ctrl
= MACB_BIT(TX_USED
);
628 /* Make descriptor updates visible to hardware */
631 /* Reinitialize the TX desc queue */
632 queue_writel(queue
, TBQP
, queue
->tx_ring_dma
);
633 /* Make TX ring reflect state of hardware */
637 /* Housework before enabling TX IRQ */
638 macb_writel(bp
, TSR
, macb_readl(bp
, TSR
));
639 queue_writel(queue
, IER
, MACB_TX_INT_FLAGS
);
641 /* Now we are ready to start transmission again */
642 netif_tx_start_all_queues(bp
->dev
);
643 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
645 spin_unlock_irqrestore(&bp
->lock
, flags
);
648 static void macb_tx_interrupt(struct macb_queue
*queue
)
653 struct macb
*bp
= queue
->bp
;
654 u16 queue_index
= queue
- bp
->queues
;
656 status
= macb_readl(bp
, TSR
);
657 macb_writel(bp
, TSR
, status
);
659 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
660 queue_writel(queue
, ISR
, MACB_BIT(TCOMP
));
662 netdev_vdbg(bp
->dev
, "macb_tx_interrupt status = 0x%03lx\n",
663 (unsigned long)status
);
665 head
= queue
->tx_head
;
666 for (tail
= queue
->tx_tail
; tail
!= head
; tail
++) {
667 struct macb_tx_skb
*tx_skb
;
669 struct macb_dma_desc
*desc
;
672 desc
= macb_tx_desc(queue
, tail
);
674 /* Make hw descriptor updates visible to CPU */
679 /* TX_USED bit is only set by hardware on the very first buffer
680 * descriptor of the transmitted frame.
682 if (!(ctrl
& MACB_BIT(TX_USED
)))
685 /* Process all buffers of the current transmitted frame */
687 tx_skb
= macb_tx_skb(queue
, tail
);
690 /* First, update TX stats if needed */
692 netdev_vdbg(bp
->dev
, "skb %u (data %p) TX complete\n",
693 macb_tx_ring_wrap(tail
), skb
->data
);
694 bp
->stats
.tx_packets
++;
695 bp
->stats
.tx_bytes
+= skb
->len
;
698 /* Now we can safely release resources */
699 macb_tx_unmap(bp
, tx_skb
);
701 /* skb is set only for the last buffer of the frame.
702 * WARNING: at this point skb has been freed by
710 queue
->tx_tail
= tail
;
711 if (__netif_subqueue_stopped(bp
->dev
, queue_index
) &&
712 CIRC_CNT(queue
->tx_head
, queue
->tx_tail
,
713 TX_RING_SIZE
) <= MACB_TX_WAKEUP_THRESH
)
714 netif_wake_subqueue(bp
->dev
, queue_index
);
717 static void gem_rx_refill(struct macb
*bp
)
723 while (CIRC_SPACE(bp
->rx_prepared_head
, bp
->rx_tail
,
725 entry
= macb_rx_ring_wrap(bp
->rx_prepared_head
);
727 /* Make hw descriptor updates visible to CPU */
730 bp
->rx_prepared_head
++;
732 if (!bp
->rx_skbuff
[entry
]) {
733 /* allocate sk_buff for this free entry in ring */
734 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buffer_size
);
735 if (unlikely(!skb
)) {
737 "Unable to allocate sk_buff\n");
741 /* now fill corresponding descriptor entry */
742 paddr
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
745 if (dma_mapping_error(&bp
->pdev
->dev
, paddr
)) {
750 bp
->rx_skbuff
[entry
] = skb
;
752 if (entry
== RX_RING_SIZE
- 1)
753 paddr
|= MACB_BIT(RX_WRAP
);
754 bp
->rx_ring
[entry
].addr
= paddr
;
755 bp
->rx_ring
[entry
].ctrl
= 0;
757 /* properly align Ethernet header */
758 skb_reserve(skb
, NET_IP_ALIGN
);
760 bp
->rx_ring
[entry
].addr
&= ~MACB_BIT(RX_USED
);
761 bp
->rx_ring
[entry
].ctrl
= 0;
765 /* Make descriptor updates visible to hardware */
768 netdev_vdbg(bp
->dev
, "rx ring: prepared head %d, tail %d\n",
769 bp
->rx_prepared_head
, bp
->rx_tail
);
772 /* Mark DMA descriptors from begin up to and not including end as unused */
773 static void discard_partial_frame(struct macb
*bp
, unsigned int begin
,
778 for (frag
= begin
; frag
!= end
; frag
++) {
779 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, frag
);
781 desc
->addr
&= ~MACB_BIT(RX_USED
);
784 /* Make descriptor updates visible to hardware */
787 /* When this happens, the hardware stats registers for
788 * whatever caused this is updated, so we don't have to record
793 static int gem_rx(struct macb
*bp
, int budget
)
798 struct macb_dma_desc
*desc
;
801 while (count
< budget
) {
804 entry
= macb_rx_ring_wrap(bp
->rx_tail
);
805 desc
= &bp
->rx_ring
[entry
];
807 /* Make hw descriptor updates visible to CPU */
813 if (!(addr
& MACB_BIT(RX_USED
)))
819 if (!(ctrl
& MACB_BIT(RX_SOF
) && ctrl
& MACB_BIT(RX_EOF
))) {
821 "not whole frame pointed by descriptor\n");
822 bp
->stats
.rx_dropped
++;
825 skb
= bp
->rx_skbuff
[entry
];
826 if (unlikely(!skb
)) {
828 "inconsistent Rx descriptor chain\n");
829 bp
->stats
.rx_dropped
++;
832 /* now everything is ready for receiving packet */
833 bp
->rx_skbuff
[entry
] = NULL
;
834 len
= ctrl
& bp
->rx_frm_len_mask
;
836 netdev_vdbg(bp
->dev
, "gem_rx %u (len %u)\n", entry
, len
);
839 addr
= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, addr
));
840 dma_unmap_single(&bp
->pdev
->dev
, addr
,
841 bp
->rx_buffer_size
, DMA_FROM_DEVICE
);
843 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
844 skb_checksum_none_assert(skb
);
845 if (bp
->dev
->features
& NETIF_F_RXCSUM
&&
846 !(bp
->dev
->flags
& IFF_PROMISC
) &&
847 GEM_BFEXT(RX_CSUM
, ctrl
) & GEM_RX_CSUM_CHECKED_MASK
)
848 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
850 bp
->stats
.rx_packets
++;
851 bp
->stats
.rx_bytes
+= skb
->len
;
853 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
854 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
855 skb
->len
, skb
->csum
);
856 print_hex_dump(KERN_DEBUG
, " mac: ", DUMP_PREFIX_ADDRESS
, 16, 1,
857 skb_mac_header(skb
), 16, true);
858 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_ADDRESS
, 16, 1,
859 skb
->data
, 32, true);
862 netif_receive_skb(skb
);
870 static int macb_rx_frame(struct macb
*bp
, unsigned int first_frag
,
871 unsigned int last_frag
)
877 struct macb_dma_desc
*desc
;
879 desc
= macb_rx_desc(bp
, last_frag
);
880 len
= desc
->ctrl
& bp
->rx_frm_len_mask
;
882 netdev_vdbg(bp
->dev
, "macb_rx_frame frags %u - %u (len %u)\n",
883 macb_rx_ring_wrap(first_frag
),
884 macb_rx_ring_wrap(last_frag
), len
);
886 /* The ethernet header starts NET_IP_ALIGN bytes into the
887 * first buffer. Since the header is 14 bytes, this makes the
888 * payload word-aligned.
890 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
891 * the two padding bytes into the skb so that we avoid hitting
892 * the slowpath in memcpy(), and pull them off afterwards.
894 skb
= netdev_alloc_skb(bp
->dev
, len
+ NET_IP_ALIGN
);
896 bp
->stats
.rx_dropped
++;
897 for (frag
= first_frag
; ; frag
++) {
898 desc
= macb_rx_desc(bp
, frag
);
899 desc
->addr
&= ~MACB_BIT(RX_USED
);
900 if (frag
== last_frag
)
904 /* Make descriptor updates visible to hardware */
912 skb_checksum_none_assert(skb
);
915 for (frag
= first_frag
; ; frag
++) {
916 unsigned int frag_len
= bp
->rx_buffer_size
;
918 if (offset
+ frag_len
> len
) {
919 if (unlikely(frag
!= last_frag
)) {
920 dev_kfree_skb_any(skb
);
923 frag_len
= len
- offset
;
925 skb_copy_to_linear_data_offset(skb
, offset
,
926 macb_rx_buffer(bp
, frag
),
928 offset
+= bp
->rx_buffer_size
;
929 desc
= macb_rx_desc(bp
, frag
);
930 desc
->addr
&= ~MACB_BIT(RX_USED
);
932 if (frag
== last_frag
)
936 /* Make descriptor updates visible to hardware */
939 __skb_pull(skb
, NET_IP_ALIGN
);
940 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
942 bp
->stats
.rx_packets
++;
943 bp
->stats
.rx_bytes
+= skb
->len
;
944 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
945 skb
->len
, skb
->csum
);
946 netif_receive_skb(skb
);
951 static inline void macb_init_rx_ring(struct macb
*bp
)
956 addr
= bp
->rx_buffers_dma
;
957 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
958 bp
->rx_ring
[i
].addr
= addr
;
959 bp
->rx_ring
[i
].ctrl
= 0;
960 addr
+= bp
->rx_buffer_size
;
962 bp
->rx_ring
[RX_RING_SIZE
- 1].addr
|= MACB_BIT(RX_WRAP
);
965 static int macb_rx(struct macb
*bp
, int budget
)
967 bool reset_rx_queue
= false;
972 for (tail
= bp
->rx_tail
; budget
> 0; tail
++) {
973 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, tail
);
976 /* Make hw descriptor updates visible to CPU */
982 if (!(addr
& MACB_BIT(RX_USED
)))
985 if (ctrl
& MACB_BIT(RX_SOF
)) {
986 if (first_frag
!= -1)
987 discard_partial_frame(bp
, first_frag
, tail
);
991 if (ctrl
& MACB_BIT(RX_EOF
)) {
994 if (unlikely(first_frag
== -1)) {
995 reset_rx_queue
= true;
999 dropped
= macb_rx_frame(bp
, first_frag
, tail
);
1001 if (unlikely(dropped
< 0)) {
1002 reset_rx_queue
= true;
1012 if (unlikely(reset_rx_queue
)) {
1013 unsigned long flags
;
1016 netdev_err(bp
->dev
, "RX queue corruption: reset it\n");
1018 spin_lock_irqsave(&bp
->lock
, flags
);
1020 ctrl
= macb_readl(bp
, NCR
);
1021 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1023 macb_init_rx_ring(bp
);
1024 macb_writel(bp
, RBQP
, bp
->rx_ring_dma
);
1026 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1028 spin_unlock_irqrestore(&bp
->lock
, flags
);
1032 if (first_frag
!= -1)
1033 bp
->rx_tail
= first_frag
;
1040 static int macb_poll(struct napi_struct
*napi
, int budget
)
1042 struct macb
*bp
= container_of(napi
, struct macb
, napi
);
1046 status
= macb_readl(bp
, RSR
);
1047 macb_writel(bp
, RSR
, status
);
1051 netdev_vdbg(bp
->dev
, "poll: status = %08lx, budget = %d\n",
1052 (unsigned long)status
, budget
);
1054 work_done
= bp
->macbgem_ops
.mog_rx(bp
, budget
);
1055 if (work_done
< budget
) {
1056 napi_complete(napi
);
1058 /* Packets received while interrupts were disabled */
1059 status
= macb_readl(bp
, RSR
);
1061 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1062 macb_writel(bp
, ISR
, MACB_BIT(RCOMP
));
1063 napi_reschedule(napi
);
1065 macb_writel(bp
, IER
, MACB_RX_INT_FLAGS
);
1069 /* TODO: Handle errors */
1074 static irqreturn_t
macb_interrupt(int irq
, void *dev_id
)
1076 struct macb_queue
*queue
= dev_id
;
1077 struct macb
*bp
= queue
->bp
;
1078 struct net_device
*dev
= bp
->dev
;
1081 status
= queue_readl(queue
, ISR
);
1083 if (unlikely(!status
))
1086 spin_lock(&bp
->lock
);
1089 /* close possible race with dev_close */
1090 if (unlikely(!netif_running(dev
))) {
1091 queue_writel(queue
, IDR
, -1);
1092 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1093 queue_writel(queue
, ISR
, -1);
1097 netdev_vdbg(bp
->dev
, "queue = %u, isr = 0x%08lx\n",
1098 (unsigned int)(queue
- bp
->queues
),
1099 (unsigned long)status
);
1101 if (status
& MACB_RX_INT_FLAGS
) {
1102 /* There's no point taking any more interrupts
1103 * until we have processed the buffers. The
1104 * scheduling call may fail if the poll routine
1105 * is already scheduled, so disable interrupts
1108 queue_writel(queue
, IDR
, MACB_RX_INT_FLAGS
);
1109 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1110 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1112 if (napi_schedule_prep(&bp
->napi
)) {
1113 netdev_vdbg(bp
->dev
, "scheduling RX softirq\n");
1114 __napi_schedule(&bp
->napi
);
1118 if (unlikely(status
& (MACB_TX_ERR_FLAGS
))) {
1119 queue_writel(queue
, IDR
, MACB_TX_INT_FLAGS
);
1120 schedule_work(&queue
->tx_error_task
);
1122 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1123 queue_writel(queue
, ISR
, MACB_TX_ERR_FLAGS
);
1128 if (status
& MACB_BIT(TCOMP
))
1129 macb_tx_interrupt(queue
);
1131 /* Link change detection isn't possible with RMII, so we'll
1132 * add that if/when we get our hands on a full-blown MII PHY.
1135 /* There is a hardware issue under heavy load where DMA can
1136 * stop, this causes endless "used buffer descriptor read"
1137 * interrupts but it can be cleared by re-enabling RX. See
1138 * the at91 manual, section 41.3.1 or the Zynq manual
1139 * section 16.7.4 for details.
1141 if (status
& MACB_BIT(RXUBR
)) {
1142 ctrl
= macb_readl(bp
, NCR
);
1143 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1144 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1146 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1147 queue_writel(queue
, ISR
, MACB_BIT(RXUBR
));
1150 if (status
& MACB_BIT(ISR_ROVR
)) {
1151 /* We missed at least one packet */
1152 if (macb_is_gem(bp
))
1153 bp
->hw_stats
.gem
.rx_overruns
++;
1155 bp
->hw_stats
.macb
.rx_overruns
++;
1157 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1158 queue_writel(queue
, ISR
, MACB_BIT(ISR_ROVR
));
1161 if (status
& MACB_BIT(HRESP
)) {
1162 /* TODO: Reset the hardware, and maybe move the
1163 * netdev_err to a lower-priority context as well
1166 netdev_err(dev
, "DMA bus error: HRESP not OK\n");
1168 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1169 queue_writel(queue
, ISR
, MACB_BIT(HRESP
));
1172 status
= queue_readl(queue
, ISR
);
1175 spin_unlock(&bp
->lock
);
1180 #ifdef CONFIG_NET_POLL_CONTROLLER
1181 /* Polling receive - used by netconsole and other diagnostic tools
1182 * to allow network i/o with interrupts disabled.
1184 static void macb_poll_controller(struct net_device
*dev
)
1186 struct macb
*bp
= netdev_priv(dev
);
1187 struct macb_queue
*queue
;
1188 unsigned long flags
;
1191 local_irq_save(flags
);
1192 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
1193 macb_interrupt(dev
->irq
, queue
);
1194 local_irq_restore(flags
);
1198 static unsigned int macb_tx_map(struct macb
*bp
,
1199 struct macb_queue
*queue
,
1200 struct sk_buff
*skb
)
1203 unsigned int len
, entry
, i
, tx_head
= queue
->tx_head
;
1204 struct macb_tx_skb
*tx_skb
= NULL
;
1205 struct macb_dma_desc
*desc
;
1206 unsigned int offset
, size
, count
= 0;
1207 unsigned int f
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1208 unsigned int eof
= 1;
1211 /* First, map non-paged data */
1212 len
= skb_headlen(skb
);
1215 size
= min(len
, bp
->max_tx_length
);
1216 entry
= macb_tx_ring_wrap(tx_head
);
1217 tx_skb
= &queue
->tx_skb
[entry
];
1219 mapping
= dma_map_single(&bp
->pdev
->dev
,
1221 size
, DMA_TO_DEVICE
);
1222 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1225 /* Save info to properly release resources */
1227 tx_skb
->mapping
= mapping
;
1228 tx_skb
->size
= size
;
1229 tx_skb
->mapped_as_page
= false;
1237 /* Then, map paged data from fragments */
1238 for (f
= 0; f
< nr_frags
; f
++) {
1239 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1241 len
= skb_frag_size(frag
);
1244 size
= min(len
, bp
->max_tx_length
);
1245 entry
= macb_tx_ring_wrap(tx_head
);
1246 tx_skb
= &queue
->tx_skb
[entry
];
1248 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
,
1249 offset
, size
, DMA_TO_DEVICE
);
1250 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1253 /* Save info to properly release resources */
1255 tx_skb
->mapping
= mapping
;
1256 tx_skb
->size
= size
;
1257 tx_skb
->mapped_as_page
= true;
1266 /* Should never happen */
1267 if (unlikely(!tx_skb
)) {
1268 netdev_err(bp
->dev
, "BUG! empty skb!\n");
1272 /* This is the last buffer of the frame: save socket buffer */
1275 /* Update TX ring: update buffer descriptors in reverse order
1276 * to avoid race condition
1279 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1280 * to set the end of TX queue
1283 entry
= macb_tx_ring_wrap(i
);
1284 ctrl
= MACB_BIT(TX_USED
);
1285 desc
= &queue
->tx_ring
[entry
];
1290 entry
= macb_tx_ring_wrap(i
);
1291 tx_skb
= &queue
->tx_skb
[entry
];
1292 desc
= &queue
->tx_ring
[entry
];
1294 ctrl
= (u32
)tx_skb
->size
;
1296 ctrl
|= MACB_BIT(TX_LAST
);
1299 if (unlikely(entry
== (TX_RING_SIZE
- 1)))
1300 ctrl
|= MACB_BIT(TX_WRAP
);
1302 /* Set TX buffer descriptor */
1303 desc
->addr
= tx_skb
->mapping
;
1304 /* desc->addr must be visible to hardware before clearing
1305 * 'TX_USED' bit in desc->ctrl.
1309 } while (i
!= queue
->tx_head
);
1311 queue
->tx_head
= tx_head
;
1316 netdev_err(bp
->dev
, "TX DMA map failed\n");
1318 for (i
= queue
->tx_head
; i
!= tx_head
; i
++) {
1319 tx_skb
= macb_tx_skb(queue
, i
);
1321 macb_tx_unmap(bp
, tx_skb
);
1327 static int macb_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1329 u16 queue_index
= skb_get_queue_mapping(skb
);
1330 struct macb
*bp
= netdev_priv(dev
);
1331 struct macb_queue
*queue
= &bp
->queues
[queue_index
];
1332 unsigned long flags
;
1333 unsigned int count
, nr_frags
, frag_size
, f
;
1335 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1336 netdev_vdbg(bp
->dev
,
1337 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1338 queue_index
, skb
->len
, skb
->head
, skb
->data
,
1339 skb_tail_pointer(skb
), skb_end_pointer(skb
));
1340 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_OFFSET
, 16, 1,
1341 skb
->data
, 16, true);
1344 /* Count how many TX buffer descriptors are needed to send this
1345 * socket buffer: skb fragments of jumbo frames may need to be
1346 * split into many buffer descriptors.
1348 count
= DIV_ROUND_UP(skb_headlen(skb
), bp
->max_tx_length
);
1349 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1350 for (f
= 0; f
< nr_frags
; f
++) {
1351 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[f
]);
1352 count
+= DIV_ROUND_UP(frag_size
, bp
->max_tx_length
);
1355 spin_lock_irqsave(&bp
->lock
, flags
);
1357 /* This is a hard error, log it. */
1358 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, TX_RING_SIZE
) < count
) {
1359 netif_stop_subqueue(dev
, queue_index
);
1360 spin_unlock_irqrestore(&bp
->lock
, flags
);
1361 netdev_dbg(bp
->dev
, "tx_head = %u, tx_tail = %u\n",
1362 queue
->tx_head
, queue
->tx_tail
);
1363 return NETDEV_TX_BUSY
;
1366 /* Map socket buffer for DMA transfer */
1367 if (!macb_tx_map(bp
, queue
, skb
)) {
1368 dev_kfree_skb_any(skb
);
1372 /* Make newly initialized descriptor visible to hardware */
1375 skb_tx_timestamp(skb
);
1377 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1379 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, TX_RING_SIZE
) < 1)
1380 netif_stop_subqueue(dev
, queue_index
);
1383 spin_unlock_irqrestore(&bp
->lock
, flags
);
1385 return NETDEV_TX_OK
;
1388 static void macb_init_rx_buffer_size(struct macb
*bp
, size_t size
)
1390 if (!macb_is_gem(bp
)) {
1391 bp
->rx_buffer_size
= MACB_RX_BUFFER_SIZE
;
1393 bp
->rx_buffer_size
= size
;
1395 if (bp
->rx_buffer_size
% RX_BUFFER_MULTIPLE
) {
1397 "RX buffer must be multiple of %d bytes, expanding\n",
1398 RX_BUFFER_MULTIPLE
);
1399 bp
->rx_buffer_size
=
1400 roundup(bp
->rx_buffer_size
, RX_BUFFER_MULTIPLE
);
1404 netdev_dbg(bp
->dev
, "mtu [%u] rx_buffer_size [%Zu]\n",
1405 bp
->dev
->mtu
, bp
->rx_buffer_size
);
1408 static void gem_free_rx_buffers(struct macb
*bp
)
1410 struct sk_buff
*skb
;
1411 struct macb_dma_desc
*desc
;
1418 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1419 skb
= bp
->rx_skbuff
[i
];
1424 desc
= &bp
->rx_ring
[i
];
1425 addr
= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, desc
->addr
));
1426 dma_unmap_single(&bp
->pdev
->dev
, addr
, bp
->rx_buffer_size
,
1428 dev_kfree_skb_any(skb
);
1432 kfree(bp
->rx_skbuff
);
1433 bp
->rx_skbuff
= NULL
;
1436 static void macb_free_rx_buffers(struct macb
*bp
)
1438 if (bp
->rx_buffers
) {
1439 dma_free_coherent(&bp
->pdev
->dev
,
1440 RX_RING_SIZE
* bp
->rx_buffer_size
,
1441 bp
->rx_buffers
, bp
->rx_buffers_dma
);
1442 bp
->rx_buffers
= NULL
;
1446 static void macb_free_consistent(struct macb
*bp
)
1448 struct macb_queue
*queue
;
1451 bp
->macbgem_ops
.mog_free_rx_buffers(bp
);
1453 dma_free_coherent(&bp
->pdev
->dev
, RX_RING_BYTES
,
1454 bp
->rx_ring
, bp
->rx_ring_dma
);
1458 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1459 kfree(queue
->tx_skb
);
1460 queue
->tx_skb
= NULL
;
1461 if (queue
->tx_ring
) {
1462 dma_free_coherent(&bp
->pdev
->dev
, TX_RING_BYTES
,
1463 queue
->tx_ring
, queue
->tx_ring_dma
);
1464 queue
->tx_ring
= NULL
;
1469 static int gem_alloc_rx_buffers(struct macb
*bp
)
1473 size
= RX_RING_SIZE
* sizeof(struct sk_buff
*);
1474 bp
->rx_skbuff
= kzalloc(size
, GFP_KERNEL
);
1479 "Allocated %d RX struct sk_buff entries at %p\n",
1480 RX_RING_SIZE
, bp
->rx_skbuff
);
1484 static int macb_alloc_rx_buffers(struct macb
*bp
)
1488 size
= RX_RING_SIZE
* bp
->rx_buffer_size
;
1489 bp
->rx_buffers
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1490 &bp
->rx_buffers_dma
, GFP_KERNEL
);
1491 if (!bp
->rx_buffers
)
1495 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1496 size
, (unsigned long)bp
->rx_buffers_dma
, bp
->rx_buffers
);
1500 static int macb_alloc_consistent(struct macb
*bp
)
1502 struct macb_queue
*queue
;
1506 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1507 size
= TX_RING_BYTES
;
1508 queue
->tx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1509 &queue
->tx_ring_dma
,
1511 if (!queue
->tx_ring
)
1514 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1515 q
, size
, (unsigned long)queue
->tx_ring_dma
,
1518 size
= TX_RING_SIZE
* sizeof(struct macb_tx_skb
);
1519 queue
->tx_skb
= kmalloc(size
, GFP_KERNEL
);
1524 size
= RX_RING_BYTES
;
1525 bp
->rx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1526 &bp
->rx_ring_dma
, GFP_KERNEL
);
1530 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1531 size
, (unsigned long)bp
->rx_ring_dma
, bp
->rx_ring
);
1533 if (bp
->macbgem_ops
.mog_alloc_rx_buffers(bp
))
1539 macb_free_consistent(bp
);
1543 static void gem_init_rings(struct macb
*bp
)
1545 struct macb_queue
*queue
;
1549 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1550 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1551 queue
->tx_ring
[i
].addr
= 0;
1552 queue
->tx_ring
[i
].ctrl
= MACB_BIT(TX_USED
);
1554 queue
->tx_ring
[TX_RING_SIZE
- 1].ctrl
|= MACB_BIT(TX_WRAP
);
1560 bp
->rx_prepared_head
= 0;
1565 static void macb_init_rings(struct macb
*bp
)
1569 macb_init_rx_ring(bp
);
1571 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1572 bp
->queues
[0].tx_ring
[i
].addr
= 0;
1573 bp
->queues
[0].tx_ring
[i
].ctrl
= MACB_BIT(TX_USED
);
1575 bp
->queues
[0].tx_head
= 0;
1576 bp
->queues
[0].tx_tail
= 0;
1577 bp
->queues
[0].tx_ring
[TX_RING_SIZE
- 1].ctrl
|= MACB_BIT(TX_WRAP
);
1582 static void macb_reset_hw(struct macb
*bp
)
1584 struct macb_queue
*queue
;
1587 /* Disable RX and TX (XXX: Should we halt the transmission
1590 macb_writel(bp
, NCR
, 0);
1592 /* Clear the stats registers (XXX: Update stats first?) */
1593 macb_writel(bp
, NCR
, MACB_BIT(CLRSTAT
));
1595 /* Clear all status flags */
1596 macb_writel(bp
, TSR
, -1);
1597 macb_writel(bp
, RSR
, -1);
1599 /* Disable all interrupts */
1600 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1601 queue_writel(queue
, IDR
, -1);
1602 queue_readl(queue
, ISR
);
1603 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1604 queue_writel(queue
, ISR
, -1);
1608 static u32
gem_mdc_clk_div(struct macb
*bp
)
1611 unsigned long pclk_hz
= clk_get_rate(bp
->pclk
);
1613 if (pclk_hz
<= 20000000)
1614 config
= GEM_BF(CLK
, GEM_CLK_DIV8
);
1615 else if (pclk_hz
<= 40000000)
1616 config
= GEM_BF(CLK
, GEM_CLK_DIV16
);
1617 else if (pclk_hz
<= 80000000)
1618 config
= GEM_BF(CLK
, GEM_CLK_DIV32
);
1619 else if (pclk_hz
<= 120000000)
1620 config
= GEM_BF(CLK
, GEM_CLK_DIV48
);
1621 else if (pclk_hz
<= 160000000)
1622 config
= GEM_BF(CLK
, GEM_CLK_DIV64
);
1624 config
= GEM_BF(CLK
, GEM_CLK_DIV96
);
1629 static u32
macb_mdc_clk_div(struct macb
*bp
)
1632 unsigned long pclk_hz
;
1634 if (macb_is_gem(bp
))
1635 return gem_mdc_clk_div(bp
);
1637 pclk_hz
= clk_get_rate(bp
->pclk
);
1638 if (pclk_hz
<= 20000000)
1639 config
= MACB_BF(CLK
, MACB_CLK_DIV8
);
1640 else if (pclk_hz
<= 40000000)
1641 config
= MACB_BF(CLK
, MACB_CLK_DIV16
);
1642 else if (pclk_hz
<= 80000000)
1643 config
= MACB_BF(CLK
, MACB_CLK_DIV32
);
1645 config
= MACB_BF(CLK
, MACB_CLK_DIV64
);
1650 /* Get the DMA bus width field of the network configuration register that we
1651 * should program. We find the width from decoding the design configuration
1652 * register to find the maximum supported data bus width.
1654 static u32
macb_dbw(struct macb
*bp
)
1656 if (!macb_is_gem(bp
))
1659 switch (GEM_BFEXT(DBWDEF
, gem_readl(bp
, DCFG1
))) {
1661 return GEM_BF(DBW
, GEM_DBW128
);
1663 return GEM_BF(DBW
, GEM_DBW64
);
1666 return GEM_BF(DBW
, GEM_DBW32
);
1670 /* Configure the receive DMA engine
1671 * - use the correct receive buffer size
1672 * - set best burst length for DMA operations
1673 * (if not supported by FIFO, it will fallback to default)
1674 * - set both rx/tx packet buffers to full memory size
1675 * These are configurable parameters for GEM.
1677 static void macb_configure_dma(struct macb
*bp
)
1681 if (macb_is_gem(bp
)) {
1682 dmacfg
= gem_readl(bp
, DMACFG
) & ~GEM_BF(RXBS
, -1L);
1683 dmacfg
|= GEM_BF(RXBS
, bp
->rx_buffer_size
/ RX_BUFFER_MULTIPLE
);
1684 if (bp
->dma_burst_length
)
1685 dmacfg
= GEM_BFINS(FBLDO
, bp
->dma_burst_length
, dmacfg
);
1686 dmacfg
|= GEM_BIT(TXPBMS
) | GEM_BF(RXBMS
, -1L);
1687 dmacfg
&= ~GEM_BIT(ENDIA_PKT
);
1690 dmacfg
&= ~GEM_BIT(ENDIA_DESC
);
1692 dmacfg
|= GEM_BIT(ENDIA_DESC
); /* CPU in big endian */
1694 if (bp
->dev
->features
& NETIF_F_HW_CSUM
)
1695 dmacfg
|= GEM_BIT(TXCOEN
);
1697 dmacfg
&= ~GEM_BIT(TXCOEN
);
1698 netdev_dbg(bp
->dev
, "Cadence configure DMA with 0x%08x\n",
1700 gem_writel(bp
, DMACFG
, dmacfg
);
1704 static void macb_init_hw(struct macb
*bp
)
1706 struct macb_queue
*queue
;
1712 macb_set_hwaddr(bp
);
1714 config
= macb_mdc_clk_div(bp
);
1715 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
1716 config
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
1717 config
|= MACB_BF(RBOF
, NET_IP_ALIGN
); /* Make eth data aligned */
1718 config
|= MACB_BIT(PAE
); /* PAuse Enable */
1719 config
|= MACB_BIT(DRFCS
); /* Discard Rx FCS */
1720 if (bp
->caps
& MACB_CAPS_JUMBO
)
1721 config
|= MACB_BIT(JFRAME
); /* Enable jumbo frames */
1723 config
|= MACB_BIT(BIG
); /* Receive oversized frames */
1724 if (bp
->dev
->flags
& IFF_PROMISC
)
1725 config
|= MACB_BIT(CAF
); /* Copy All Frames */
1726 else if (macb_is_gem(bp
) && bp
->dev
->features
& NETIF_F_RXCSUM
)
1727 config
|= GEM_BIT(RXCOEN
);
1728 if (!(bp
->dev
->flags
& IFF_BROADCAST
))
1729 config
|= MACB_BIT(NBC
); /* No BroadCast */
1730 config
|= macb_dbw(bp
);
1731 macb_writel(bp
, NCFGR
, config
);
1732 if ((bp
->caps
& MACB_CAPS_JUMBO
) && bp
->jumbo_max_len
)
1733 gem_writel(bp
, JML
, bp
->jumbo_max_len
);
1734 bp
->speed
= SPEED_10
;
1735 bp
->duplex
= DUPLEX_HALF
;
1736 bp
->rx_frm_len_mask
= MACB_RX_FRMLEN_MASK
;
1737 if (bp
->caps
& MACB_CAPS_JUMBO
)
1738 bp
->rx_frm_len_mask
= MACB_RX_JFRMLEN_MASK
;
1740 macb_configure_dma(bp
);
1742 /* Initialize TX and RX buffers */
1743 macb_writel(bp
, RBQP
, bp
->rx_ring_dma
);
1744 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1745 queue_writel(queue
, TBQP
, queue
->tx_ring_dma
);
1747 /* Enable interrupts */
1748 queue_writel(queue
, IER
,
1754 /* Enable TX and RX */
1755 macb_writel(bp
, NCR
, MACB_BIT(RE
) | MACB_BIT(TE
) | MACB_BIT(MPE
));
1758 /* The hash address register is 64 bits long and takes up two
1759 * locations in the memory map. The least significant bits are stored
1760 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1762 * The unicast hash enable and the multicast hash enable bits in the
1763 * network configuration register enable the reception of hash matched
1764 * frames. The destination address is reduced to a 6 bit index into
1765 * the 64 bit hash register using the following hash function. The
1766 * hash function is an exclusive or of every sixth bit of the
1767 * destination address.
1769 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1770 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1771 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1772 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1773 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1774 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1776 * da[0] represents the least significant bit of the first byte
1777 * received, that is, the multicast/unicast indicator, and da[47]
1778 * represents the most significant bit of the last byte received. If
1779 * the hash index, hi[n], points to a bit that is set in the hash
1780 * register then the frame will be matched according to whether the
1781 * frame is multicast or unicast. A multicast match will be signalled
1782 * if the multicast hash enable bit is set, da[0] is 1 and the hash
1783 * index points to a bit set in the hash register. A unicast match
1784 * will be signalled if the unicast hash enable bit is set, da[0] is 0
1785 * and the hash index points to a bit set in the hash register. To
1786 * receive all multicast frames, the hash register should be set with
1787 * all ones and the multicast hash enable bit should be set in the
1788 * network configuration register.
1791 static inline int hash_bit_value(int bitnr
, __u8
*addr
)
1793 if (addr
[bitnr
/ 8] & (1 << (bitnr
% 8)))
1798 /* Return the hash index value for the specified address. */
1799 static int hash_get_index(__u8
*addr
)
1804 for (j
= 0; j
< 6; j
++) {
1805 for (i
= 0, bitval
= 0; i
< 8; i
++)
1806 bitval
^= hash_bit_value(i
* 6 + j
, addr
);
1808 hash_index
|= (bitval
<< j
);
1814 /* Add multicast addresses to the internal multicast-hash table. */
1815 static void macb_sethashtable(struct net_device
*dev
)
1817 struct netdev_hw_addr
*ha
;
1818 unsigned long mc_filter
[2];
1820 struct macb
*bp
= netdev_priv(dev
);
1825 netdev_for_each_mc_addr(ha
, dev
) {
1826 bitnr
= hash_get_index(ha
->addr
);
1827 mc_filter
[bitnr
>> 5] |= 1 << (bitnr
& 31);
1830 macb_or_gem_writel(bp
, HRB
, mc_filter
[0]);
1831 macb_or_gem_writel(bp
, HRT
, mc_filter
[1]);
1834 /* Enable/Disable promiscuous and multicast modes. */
1835 static void macb_set_rx_mode(struct net_device
*dev
)
1838 struct macb
*bp
= netdev_priv(dev
);
1840 cfg
= macb_readl(bp
, NCFGR
);
1842 if (dev
->flags
& IFF_PROMISC
) {
1843 /* Enable promiscuous mode */
1844 cfg
|= MACB_BIT(CAF
);
1846 /* Disable RX checksum offload */
1847 if (macb_is_gem(bp
))
1848 cfg
&= ~GEM_BIT(RXCOEN
);
1850 /* Disable promiscuous mode */
1851 cfg
&= ~MACB_BIT(CAF
);
1853 /* Enable RX checksum offload only if requested */
1854 if (macb_is_gem(bp
) && dev
->features
& NETIF_F_RXCSUM
)
1855 cfg
|= GEM_BIT(RXCOEN
);
1858 if (dev
->flags
& IFF_ALLMULTI
) {
1859 /* Enable all multicast mode */
1860 macb_or_gem_writel(bp
, HRB
, -1);
1861 macb_or_gem_writel(bp
, HRT
, -1);
1862 cfg
|= MACB_BIT(NCFGR_MTI
);
1863 } else if (!netdev_mc_empty(dev
)) {
1864 /* Enable specific multicasts */
1865 macb_sethashtable(dev
);
1866 cfg
|= MACB_BIT(NCFGR_MTI
);
1867 } else if (dev
->flags
& (~IFF_ALLMULTI
)) {
1868 /* Disable all multicast mode */
1869 macb_or_gem_writel(bp
, HRB
, 0);
1870 macb_or_gem_writel(bp
, HRT
, 0);
1871 cfg
&= ~MACB_BIT(NCFGR_MTI
);
1874 macb_writel(bp
, NCFGR
, cfg
);
1877 static int macb_open(struct net_device
*dev
)
1879 struct macb
*bp
= netdev_priv(dev
);
1880 size_t bufsz
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
;
1883 netdev_dbg(bp
->dev
, "open\n");
1885 /* carrier starts down */
1886 netif_carrier_off(dev
);
1888 /* if the phy is not yet register, retry later*/
1892 /* RX buffers initialization */
1893 macb_init_rx_buffer_size(bp
, bufsz
);
1895 err
= macb_alloc_consistent(bp
);
1897 netdev_err(dev
, "Unable to allocate DMA memory (error %d)\n",
1902 napi_enable(&bp
->napi
);
1904 bp
->macbgem_ops
.mog_init_rings(bp
);
1907 /* schedule a link state check */
1908 phy_start(bp
->phy_dev
);
1910 netif_tx_start_all_queues(dev
);
1915 static int macb_close(struct net_device
*dev
)
1917 struct macb
*bp
= netdev_priv(dev
);
1918 unsigned long flags
;
1920 netif_tx_stop_all_queues(dev
);
1921 napi_disable(&bp
->napi
);
1924 phy_stop(bp
->phy_dev
);
1926 spin_lock_irqsave(&bp
->lock
, flags
);
1928 netif_carrier_off(dev
);
1929 spin_unlock_irqrestore(&bp
->lock
, flags
);
1931 macb_free_consistent(bp
);
1936 static int macb_change_mtu(struct net_device
*dev
, int new_mtu
)
1938 struct macb
*bp
= netdev_priv(dev
);
1941 if (netif_running(dev
))
1944 max_mtu
= ETH_DATA_LEN
;
1945 if (bp
->caps
& MACB_CAPS_JUMBO
)
1946 max_mtu
= gem_readl(bp
, JML
) - ETH_HLEN
- ETH_FCS_LEN
;
1948 if ((new_mtu
> max_mtu
) || (new_mtu
< GEM_MTU_MIN_SIZE
))
1956 static void gem_update_stats(struct macb
*bp
)
1959 u32
*p
= &bp
->hw_stats
.gem
.tx_octets_31_0
;
1961 for (i
= 0; i
< GEM_STATS_LEN
; ++i
, ++p
) {
1962 u32 offset
= gem_statistics
[i
].offset
;
1963 u64 val
= bp
->macb_reg_readl(bp
, offset
);
1965 bp
->ethtool_stats
[i
] += val
;
1968 if (offset
== GEM_OCTTXL
|| offset
== GEM_OCTRXL
) {
1969 /* Add GEM_OCTTXH, GEM_OCTRXH */
1970 val
= bp
->macb_reg_readl(bp
, offset
+ 4);
1971 bp
->ethtool_stats
[i
] += ((u64
)val
) << 32;
1977 static struct net_device_stats
*gem_get_stats(struct macb
*bp
)
1979 struct gem_stats
*hwstat
= &bp
->hw_stats
.gem
;
1980 struct net_device_stats
*nstat
= &bp
->stats
;
1982 gem_update_stats(bp
);
1984 nstat
->rx_errors
= (hwstat
->rx_frame_check_sequence_errors
+
1985 hwstat
->rx_alignment_errors
+
1986 hwstat
->rx_resource_errors
+
1987 hwstat
->rx_overruns
+
1988 hwstat
->rx_oversize_frames
+
1989 hwstat
->rx_jabbers
+
1990 hwstat
->rx_undersized_frames
+
1991 hwstat
->rx_length_field_frame_errors
);
1992 nstat
->tx_errors
= (hwstat
->tx_late_collisions
+
1993 hwstat
->tx_excessive_collisions
+
1994 hwstat
->tx_underrun
+
1995 hwstat
->tx_carrier_sense_errors
);
1996 nstat
->multicast
= hwstat
->rx_multicast_frames
;
1997 nstat
->collisions
= (hwstat
->tx_single_collision_frames
+
1998 hwstat
->tx_multiple_collision_frames
+
1999 hwstat
->tx_excessive_collisions
);
2000 nstat
->rx_length_errors
= (hwstat
->rx_oversize_frames
+
2001 hwstat
->rx_jabbers
+
2002 hwstat
->rx_undersized_frames
+
2003 hwstat
->rx_length_field_frame_errors
);
2004 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
;
2005 nstat
->rx_crc_errors
= hwstat
->rx_frame_check_sequence_errors
;
2006 nstat
->rx_frame_errors
= hwstat
->rx_alignment_errors
;
2007 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2008 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_collisions
;
2009 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_sense_errors
;
2010 nstat
->tx_fifo_errors
= hwstat
->tx_underrun
;
2015 static void gem_get_ethtool_stats(struct net_device
*dev
,
2016 struct ethtool_stats
*stats
, u64
*data
)
2020 bp
= netdev_priv(dev
);
2021 gem_update_stats(bp
);
2022 memcpy(data
, &bp
->ethtool_stats
, sizeof(u64
) * GEM_STATS_LEN
);
2025 static int gem_get_sset_count(struct net_device
*dev
, int sset
)
2029 return GEM_STATS_LEN
;
2035 static void gem_get_ethtool_strings(struct net_device
*dev
, u32 sset
, u8
*p
)
2041 for (i
= 0; i
< GEM_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
)
2042 memcpy(p
, gem_statistics
[i
].stat_string
,
2048 static struct net_device_stats
*macb_get_stats(struct net_device
*dev
)
2050 struct macb
*bp
= netdev_priv(dev
);
2051 struct net_device_stats
*nstat
= &bp
->stats
;
2052 struct macb_stats
*hwstat
= &bp
->hw_stats
.macb
;
2054 if (macb_is_gem(bp
))
2055 return gem_get_stats(bp
);
2057 /* read stats from hardware */
2058 macb_update_stats(bp
);
2060 /* Convert HW stats into netdevice stats */
2061 nstat
->rx_errors
= (hwstat
->rx_fcs_errors
+
2062 hwstat
->rx_align_errors
+
2063 hwstat
->rx_resource_errors
+
2064 hwstat
->rx_overruns
+
2065 hwstat
->rx_oversize_pkts
+
2066 hwstat
->rx_jabbers
+
2067 hwstat
->rx_undersize_pkts
+
2068 hwstat
->rx_length_mismatch
);
2069 nstat
->tx_errors
= (hwstat
->tx_late_cols
+
2070 hwstat
->tx_excessive_cols
+
2071 hwstat
->tx_underruns
+
2072 hwstat
->tx_carrier_errors
+
2073 hwstat
->sqe_test_errors
);
2074 nstat
->collisions
= (hwstat
->tx_single_cols
+
2075 hwstat
->tx_multiple_cols
+
2076 hwstat
->tx_excessive_cols
);
2077 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
2078 hwstat
->rx_jabbers
+
2079 hwstat
->rx_undersize_pkts
+
2080 hwstat
->rx_length_mismatch
);
2081 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
+
2082 hwstat
->rx_overruns
;
2083 nstat
->rx_crc_errors
= hwstat
->rx_fcs_errors
;
2084 nstat
->rx_frame_errors
= hwstat
->rx_align_errors
;
2085 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2086 /* XXX: What does "missed" mean? */
2087 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_cols
;
2088 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_errors
;
2089 nstat
->tx_fifo_errors
= hwstat
->tx_underruns
;
2090 /* Don't know about heartbeat or window errors... */
2095 static int macb_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2097 struct macb
*bp
= netdev_priv(dev
);
2098 struct phy_device
*phydev
= bp
->phy_dev
;
2103 return phy_ethtool_gset(phydev
, cmd
);
2106 static int macb_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2108 struct macb
*bp
= netdev_priv(dev
);
2109 struct phy_device
*phydev
= bp
->phy_dev
;
2114 return phy_ethtool_sset(phydev
, cmd
);
2117 static int macb_get_regs_len(struct net_device
*netdev
)
2119 return MACB_GREGS_NBR
* sizeof(u32
);
2122 static void macb_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2125 struct macb
*bp
= netdev_priv(dev
);
2126 unsigned int tail
, head
;
2129 regs
->version
= (macb_readl(bp
, MID
) & ((1 << MACB_REV_SIZE
) - 1))
2130 | MACB_GREGS_VERSION
;
2132 tail
= macb_tx_ring_wrap(bp
->queues
[0].tx_tail
);
2133 head
= macb_tx_ring_wrap(bp
->queues
[0].tx_head
);
2135 regs_buff
[0] = macb_readl(bp
, NCR
);
2136 regs_buff
[1] = macb_or_gem_readl(bp
, NCFGR
);
2137 regs_buff
[2] = macb_readl(bp
, NSR
);
2138 regs_buff
[3] = macb_readl(bp
, TSR
);
2139 regs_buff
[4] = macb_readl(bp
, RBQP
);
2140 regs_buff
[5] = macb_readl(bp
, TBQP
);
2141 regs_buff
[6] = macb_readl(bp
, RSR
);
2142 regs_buff
[7] = macb_readl(bp
, IMR
);
2144 regs_buff
[8] = tail
;
2145 regs_buff
[9] = head
;
2146 regs_buff
[10] = macb_tx_dma(&bp
->queues
[0], tail
);
2147 regs_buff
[11] = macb_tx_dma(&bp
->queues
[0], head
);
2149 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
))
2150 regs_buff
[12] = macb_or_gem_readl(bp
, USRIO
);
2151 if (macb_is_gem(bp
))
2152 regs_buff
[13] = gem_readl(bp
, DMACFG
);
2155 static void macb_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2157 struct macb
*bp
= netdev_priv(netdev
);
2162 if (bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) {
2163 wol
->supported
= WAKE_MAGIC
;
2165 if (bp
->wol
& MACB_WOL_ENABLED
)
2166 wol
->wolopts
|= WAKE_MAGIC
;
2170 static int macb_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2172 struct macb
*bp
= netdev_priv(netdev
);
2174 if (!(bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) ||
2175 (wol
->wolopts
& ~WAKE_MAGIC
))
2178 if (wol
->wolopts
& WAKE_MAGIC
)
2179 bp
->wol
|= MACB_WOL_ENABLED
;
2181 bp
->wol
&= ~MACB_WOL_ENABLED
;
2183 device_set_wakeup_enable(&bp
->pdev
->dev
, bp
->wol
& MACB_WOL_ENABLED
);
2188 static const struct ethtool_ops macb_ethtool_ops
= {
2189 .get_settings
= macb_get_settings
,
2190 .set_settings
= macb_set_settings
,
2191 .get_regs_len
= macb_get_regs_len
,
2192 .get_regs
= macb_get_regs
,
2193 .get_link
= ethtool_op_get_link
,
2194 .get_ts_info
= ethtool_op_get_ts_info
,
2195 .get_wol
= macb_get_wol
,
2196 .set_wol
= macb_set_wol
,
2199 static const struct ethtool_ops gem_ethtool_ops
= {
2200 .get_settings
= macb_get_settings
,
2201 .set_settings
= macb_set_settings
,
2202 .get_regs_len
= macb_get_regs_len
,
2203 .get_regs
= macb_get_regs
,
2204 .get_link
= ethtool_op_get_link
,
2205 .get_ts_info
= ethtool_op_get_ts_info
,
2206 .get_ethtool_stats
= gem_get_ethtool_stats
,
2207 .get_strings
= gem_get_ethtool_strings
,
2208 .get_sset_count
= gem_get_sset_count
,
2211 static int macb_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2213 struct macb
*bp
= netdev_priv(dev
);
2214 struct phy_device
*phydev
= bp
->phy_dev
;
2216 if (!netif_running(dev
))
2222 return phy_mii_ioctl(phydev
, rq
, cmd
);
2225 static int macb_set_features(struct net_device
*netdev
,
2226 netdev_features_t features
)
2228 struct macb
*bp
= netdev_priv(netdev
);
2229 netdev_features_t changed
= features
^ netdev
->features
;
2231 /* TX checksum offload */
2232 if ((changed
& NETIF_F_HW_CSUM
) && macb_is_gem(bp
)) {
2235 dmacfg
= gem_readl(bp
, DMACFG
);
2236 if (features
& NETIF_F_HW_CSUM
)
2237 dmacfg
|= GEM_BIT(TXCOEN
);
2239 dmacfg
&= ~GEM_BIT(TXCOEN
);
2240 gem_writel(bp
, DMACFG
, dmacfg
);
2243 /* RX checksum offload */
2244 if ((changed
& NETIF_F_RXCSUM
) && macb_is_gem(bp
)) {
2247 netcfg
= gem_readl(bp
, NCFGR
);
2248 if (features
& NETIF_F_RXCSUM
&&
2249 !(netdev
->flags
& IFF_PROMISC
))
2250 netcfg
|= GEM_BIT(RXCOEN
);
2252 netcfg
&= ~GEM_BIT(RXCOEN
);
2253 gem_writel(bp
, NCFGR
, netcfg
);
2259 static const struct net_device_ops macb_netdev_ops
= {
2260 .ndo_open
= macb_open
,
2261 .ndo_stop
= macb_close
,
2262 .ndo_start_xmit
= macb_start_xmit
,
2263 .ndo_set_rx_mode
= macb_set_rx_mode
,
2264 .ndo_get_stats
= macb_get_stats
,
2265 .ndo_do_ioctl
= macb_ioctl
,
2266 .ndo_validate_addr
= eth_validate_addr
,
2267 .ndo_change_mtu
= macb_change_mtu
,
2268 .ndo_set_mac_address
= eth_mac_addr
,
2269 #ifdef CONFIG_NET_POLL_CONTROLLER
2270 .ndo_poll_controller
= macb_poll_controller
,
2272 .ndo_set_features
= macb_set_features
,
2275 /* Configure peripheral capabilities according to device tree
2276 * and integration options used
2278 static void macb_configure_caps(struct macb
*bp
,
2279 const struct macb_config
*dt_conf
)
2284 bp
->caps
= dt_conf
->caps
;
2286 if (hw_is_gem(bp
->regs
, bp
->native_io
)) {
2287 bp
->caps
|= MACB_CAPS_MACB_IS_GEM
;
2289 dcfg
= gem_readl(bp
, DCFG1
);
2290 if (GEM_BFEXT(IRQCOR
, dcfg
) == 0)
2291 bp
->caps
|= MACB_CAPS_ISR_CLEAR_ON_WRITE
;
2292 dcfg
= gem_readl(bp
, DCFG2
);
2293 if ((dcfg
& (GEM_BIT(RX_PKT_BUFF
) | GEM_BIT(TX_PKT_BUFF
))) == 0)
2294 bp
->caps
|= MACB_CAPS_FIFO_MODE
;
2297 dev_dbg(&bp
->pdev
->dev
, "Cadence caps 0x%08x\n", bp
->caps
);
2300 static void macb_probe_queues(void __iomem
*mem
,
2302 unsigned int *queue_mask
,
2303 unsigned int *num_queues
)
2310 /* is it macb or gem ?
2312 * We need to read directly from the hardware here because
2313 * we are early in the probe process and don't have the
2314 * MACB_CAPS_MACB_IS_GEM flag positioned
2316 if (!hw_is_gem(mem
, native_io
))
2319 /* bit 0 is never set but queue 0 always exists */
2320 *queue_mask
= readl_relaxed(mem
+ GEM_DCFG6
) & 0xff;
2324 for (hw_q
= 1; hw_q
< MACB_MAX_QUEUES
; ++hw_q
)
2325 if (*queue_mask
& (1 << hw_q
))
2329 static int macb_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
2330 struct clk
**hclk
, struct clk
**tx_clk
)
2334 *pclk
= devm_clk_get(&pdev
->dev
, "pclk");
2335 if (IS_ERR(*pclk
)) {
2336 err
= PTR_ERR(*pclk
);
2337 dev_err(&pdev
->dev
, "failed to get macb_clk (%u)\n", err
);
2341 *hclk
= devm_clk_get(&pdev
->dev
, "hclk");
2342 if (IS_ERR(*hclk
)) {
2343 err
= PTR_ERR(*hclk
);
2344 dev_err(&pdev
->dev
, "failed to get hclk (%u)\n", err
);
2348 *tx_clk
= devm_clk_get(&pdev
->dev
, "tx_clk");
2349 if (IS_ERR(*tx_clk
))
2352 err
= clk_prepare_enable(*pclk
);
2354 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
2358 err
= clk_prepare_enable(*hclk
);
2360 dev_err(&pdev
->dev
, "failed to enable hclk (%u)\n", err
);
2361 goto err_disable_pclk
;
2364 err
= clk_prepare_enable(*tx_clk
);
2366 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
2367 goto err_disable_hclk
;
2373 clk_disable_unprepare(*hclk
);
2376 clk_disable_unprepare(*pclk
);
2381 static int macb_init(struct platform_device
*pdev
)
2383 struct net_device
*dev
= platform_get_drvdata(pdev
);
2384 unsigned int hw_q
, q
;
2385 struct macb
*bp
= netdev_priv(dev
);
2386 struct macb_queue
*queue
;
2390 /* set the queue register mapping once for all: queue0 has a special
2391 * register mapping but we don't want to test the queue index then
2392 * compute the corresponding register offset at run time.
2394 for (hw_q
= 0, q
= 0; hw_q
< MACB_MAX_QUEUES
; ++hw_q
) {
2395 if (!(bp
->queue_mask
& (1 << hw_q
)))
2398 queue
= &bp
->queues
[q
];
2401 queue
->ISR
= GEM_ISR(hw_q
- 1);
2402 queue
->IER
= GEM_IER(hw_q
- 1);
2403 queue
->IDR
= GEM_IDR(hw_q
- 1);
2404 queue
->IMR
= GEM_IMR(hw_q
- 1);
2405 queue
->TBQP
= GEM_TBQP(hw_q
- 1);
2407 /* queue0 uses legacy registers */
2408 queue
->ISR
= MACB_ISR
;
2409 queue
->IER
= MACB_IER
;
2410 queue
->IDR
= MACB_IDR
;
2411 queue
->IMR
= MACB_IMR
;
2412 queue
->TBQP
= MACB_TBQP
;
2415 /* get irq: here we use the linux queue index, not the hardware
2416 * queue index. the queue irq definitions in the device tree
2417 * must remove the optional gaps that could exist in the
2418 * hardware queue mask.
2420 queue
->irq
= platform_get_irq(pdev
, q
);
2421 err
= devm_request_irq(&pdev
->dev
, queue
->irq
, macb_interrupt
,
2422 IRQF_SHARED
, dev
->name
, queue
);
2425 "Unable to request IRQ %d (error %d)\n",
2430 INIT_WORK(&queue
->tx_error_task
, macb_tx_error_task
);
2434 dev
->netdev_ops
= &macb_netdev_ops
;
2435 netif_napi_add(dev
, &bp
->napi
, macb_poll
, 64);
2437 /* setup appropriated routines according to adapter type */
2438 if (macb_is_gem(bp
)) {
2439 bp
->max_tx_length
= GEM_MAX_TX_LEN
;
2440 bp
->macbgem_ops
.mog_alloc_rx_buffers
= gem_alloc_rx_buffers
;
2441 bp
->macbgem_ops
.mog_free_rx_buffers
= gem_free_rx_buffers
;
2442 bp
->macbgem_ops
.mog_init_rings
= gem_init_rings
;
2443 bp
->macbgem_ops
.mog_rx
= gem_rx
;
2444 dev
->ethtool_ops
= &gem_ethtool_ops
;
2446 bp
->max_tx_length
= MACB_MAX_TX_LEN
;
2447 bp
->macbgem_ops
.mog_alloc_rx_buffers
= macb_alloc_rx_buffers
;
2448 bp
->macbgem_ops
.mog_free_rx_buffers
= macb_free_rx_buffers
;
2449 bp
->macbgem_ops
.mog_init_rings
= macb_init_rings
;
2450 bp
->macbgem_ops
.mog_rx
= macb_rx
;
2451 dev
->ethtool_ops
= &macb_ethtool_ops
;
2455 dev
->hw_features
= NETIF_F_SG
;
2456 /* Checksum offload is only available on gem with packet buffer */
2457 if (macb_is_gem(bp
) && !(bp
->caps
& MACB_CAPS_FIFO_MODE
))
2458 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
2459 if (bp
->caps
& MACB_CAPS_SG_DISABLED
)
2460 dev
->hw_features
&= ~NETIF_F_SG
;
2461 dev
->features
= dev
->hw_features
;
2463 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
)) {
2465 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RGMII
)
2466 val
= GEM_BIT(RGMII
);
2467 else if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
&&
2468 (bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
2469 val
= MACB_BIT(RMII
);
2470 else if (!(bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
2471 val
= MACB_BIT(MII
);
2473 if (bp
->caps
& MACB_CAPS_USRIO_HAS_CLKEN
)
2474 val
|= MACB_BIT(CLKEN
);
2476 macb_or_gem_writel(bp
, USRIO
, val
);
2479 /* Set MII management clock divider */
2480 val
= macb_mdc_clk_div(bp
);
2481 val
|= macb_dbw(bp
);
2482 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
2483 val
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
2484 macb_writel(bp
, NCFGR
, val
);
2489 #if defined(CONFIG_OF)
2490 /* 1518 rounded up */
2491 #define AT91ETHER_MAX_RBUFF_SZ 0x600
2492 /* max number of receive buffers */
2493 #define AT91ETHER_MAX_RX_DESCR 9
2495 /* Initialize and start the Receiver and Transmit subsystems */
2496 static int at91ether_start(struct net_device
*dev
)
2498 struct macb
*lp
= netdev_priv(dev
);
2503 lp
->rx_ring
= dma_alloc_coherent(&lp
->pdev
->dev
,
2504 (AT91ETHER_MAX_RX_DESCR
*
2505 sizeof(struct macb_dma_desc
)),
2506 &lp
->rx_ring_dma
, GFP_KERNEL
);
2510 lp
->rx_buffers
= dma_alloc_coherent(&lp
->pdev
->dev
,
2511 AT91ETHER_MAX_RX_DESCR
*
2512 AT91ETHER_MAX_RBUFF_SZ
,
2513 &lp
->rx_buffers_dma
, GFP_KERNEL
);
2514 if (!lp
->rx_buffers
) {
2515 dma_free_coherent(&lp
->pdev
->dev
,
2516 AT91ETHER_MAX_RX_DESCR
*
2517 sizeof(struct macb_dma_desc
),
2518 lp
->rx_ring
, lp
->rx_ring_dma
);
2523 addr
= lp
->rx_buffers_dma
;
2524 for (i
= 0; i
< AT91ETHER_MAX_RX_DESCR
; i
++) {
2525 lp
->rx_ring
[i
].addr
= addr
;
2526 lp
->rx_ring
[i
].ctrl
= 0;
2527 addr
+= AT91ETHER_MAX_RBUFF_SZ
;
2530 /* Set the Wrap bit on the last descriptor */
2531 lp
->rx_ring
[AT91ETHER_MAX_RX_DESCR
- 1].addr
|= MACB_BIT(RX_WRAP
);
2533 /* Reset buffer index */
2536 /* Program address of descriptor list in Rx Buffer Queue register */
2537 macb_writel(lp
, RBQP
, lp
->rx_ring_dma
);
2539 /* Enable Receive and Transmit */
2540 ctl
= macb_readl(lp
, NCR
);
2541 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
) | MACB_BIT(TE
));
2546 /* Open the ethernet interface */
2547 static int at91ether_open(struct net_device
*dev
)
2549 struct macb
*lp
= netdev_priv(dev
);
2553 /* Clear internal statistics */
2554 ctl
= macb_readl(lp
, NCR
);
2555 macb_writel(lp
, NCR
, ctl
| MACB_BIT(CLRSTAT
));
2557 macb_set_hwaddr(lp
);
2559 ret
= at91ether_start(dev
);
2563 /* Enable MAC interrupts */
2564 macb_writel(lp
, IER
, MACB_BIT(RCOMP
) |
2566 MACB_BIT(ISR_TUND
) |
2569 MACB_BIT(ISR_ROVR
) |
2572 /* schedule a link state check */
2573 phy_start(lp
->phy_dev
);
2575 netif_start_queue(dev
);
2580 /* Close the interface */
2581 static int at91ether_close(struct net_device
*dev
)
2583 struct macb
*lp
= netdev_priv(dev
);
2586 /* Disable Receiver and Transmitter */
2587 ctl
= macb_readl(lp
, NCR
);
2588 macb_writel(lp
, NCR
, ctl
& ~(MACB_BIT(TE
) | MACB_BIT(RE
)));
2590 /* Disable MAC interrupts */
2591 macb_writel(lp
, IDR
, MACB_BIT(RCOMP
) |
2593 MACB_BIT(ISR_TUND
) |
2596 MACB_BIT(ISR_ROVR
) |
2599 netif_stop_queue(dev
);
2601 dma_free_coherent(&lp
->pdev
->dev
,
2602 AT91ETHER_MAX_RX_DESCR
*
2603 sizeof(struct macb_dma_desc
),
2604 lp
->rx_ring
, lp
->rx_ring_dma
);
2607 dma_free_coherent(&lp
->pdev
->dev
,
2608 AT91ETHER_MAX_RX_DESCR
* AT91ETHER_MAX_RBUFF_SZ
,
2609 lp
->rx_buffers
, lp
->rx_buffers_dma
);
2610 lp
->rx_buffers
= NULL
;
2615 /* Transmit packet */
2616 static int at91ether_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2618 struct macb
*lp
= netdev_priv(dev
);
2620 if (macb_readl(lp
, TSR
) & MACB_BIT(RM9200_BNQ
)) {
2621 netif_stop_queue(dev
);
2623 /* Store packet information (to free when Tx completed) */
2625 lp
->skb_length
= skb
->len
;
2626 lp
->skb_physaddr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
2629 /* Set address of the data in the Transmit Address register */
2630 macb_writel(lp
, TAR
, lp
->skb_physaddr
);
2631 /* Set length of the packet in the Transmit Control register */
2632 macb_writel(lp
, TCR
, skb
->len
);
2635 netdev_err(dev
, "%s called, but device is busy!\n", __func__
);
2636 return NETDEV_TX_BUSY
;
2639 return NETDEV_TX_OK
;
2642 /* Extract received frame from buffer descriptors and sent to upper layers.
2643 * (Called from interrupt context)
2645 static void at91ether_rx(struct net_device
*dev
)
2647 struct macb
*lp
= netdev_priv(dev
);
2648 unsigned char *p_recv
;
2649 struct sk_buff
*skb
;
2650 unsigned int pktlen
;
2652 while (lp
->rx_ring
[lp
->rx_tail
].addr
& MACB_BIT(RX_USED
)) {
2653 p_recv
= lp
->rx_buffers
+ lp
->rx_tail
* AT91ETHER_MAX_RBUFF_SZ
;
2654 pktlen
= MACB_BF(RX_FRMLEN
, lp
->rx_ring
[lp
->rx_tail
].ctrl
);
2655 skb
= netdev_alloc_skb(dev
, pktlen
+ 2);
2657 skb_reserve(skb
, 2);
2658 memcpy(skb_put(skb
, pktlen
), p_recv
, pktlen
);
2660 skb
->protocol
= eth_type_trans(skb
, dev
);
2661 lp
->stats
.rx_packets
++;
2662 lp
->stats
.rx_bytes
+= pktlen
;
2665 lp
->stats
.rx_dropped
++;
2668 if (lp
->rx_ring
[lp
->rx_tail
].ctrl
& MACB_BIT(RX_MHASH_MATCH
))
2669 lp
->stats
.multicast
++;
2671 /* reset ownership bit */
2672 lp
->rx_ring
[lp
->rx_tail
].addr
&= ~MACB_BIT(RX_USED
);
2674 /* wrap after last buffer */
2675 if (lp
->rx_tail
== AT91ETHER_MAX_RX_DESCR
- 1)
2682 /* MAC interrupt handler */
2683 static irqreturn_t
at91ether_interrupt(int irq
, void *dev_id
)
2685 struct net_device
*dev
= dev_id
;
2686 struct macb
*lp
= netdev_priv(dev
);
2689 /* MAC Interrupt Status register indicates what interrupts are pending.
2690 * It is automatically cleared once read.
2692 intstatus
= macb_readl(lp
, ISR
);
2694 /* Receive complete */
2695 if (intstatus
& MACB_BIT(RCOMP
))
2698 /* Transmit complete */
2699 if (intstatus
& MACB_BIT(TCOMP
)) {
2700 /* The TCOM bit is set even if the transmission failed */
2701 if (intstatus
& (MACB_BIT(ISR_TUND
) | MACB_BIT(ISR_RLE
)))
2702 lp
->stats
.tx_errors
++;
2705 dev_kfree_skb_irq(lp
->skb
);
2707 dma_unmap_single(NULL
, lp
->skb_physaddr
,
2708 lp
->skb_length
, DMA_TO_DEVICE
);
2709 lp
->stats
.tx_packets
++;
2710 lp
->stats
.tx_bytes
+= lp
->skb_length
;
2712 netif_wake_queue(dev
);
2715 /* Work-around for EMAC Errata section 41.3.1 */
2716 if (intstatus
& MACB_BIT(RXUBR
)) {
2717 ctl
= macb_readl(lp
, NCR
);
2718 macb_writel(lp
, NCR
, ctl
& ~MACB_BIT(RE
));
2719 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
));
2722 if (intstatus
& MACB_BIT(ISR_ROVR
))
2723 netdev_err(dev
, "ROVR error\n");
2728 #ifdef CONFIG_NET_POLL_CONTROLLER
2729 static void at91ether_poll_controller(struct net_device
*dev
)
2731 unsigned long flags
;
2733 local_irq_save(flags
);
2734 at91ether_interrupt(dev
->irq
, dev
);
2735 local_irq_restore(flags
);
2739 static const struct net_device_ops at91ether_netdev_ops
= {
2740 .ndo_open
= at91ether_open
,
2741 .ndo_stop
= at91ether_close
,
2742 .ndo_start_xmit
= at91ether_start_xmit
,
2743 .ndo_get_stats
= macb_get_stats
,
2744 .ndo_set_rx_mode
= macb_set_rx_mode
,
2745 .ndo_set_mac_address
= eth_mac_addr
,
2746 .ndo_do_ioctl
= macb_ioctl
,
2747 .ndo_validate_addr
= eth_validate_addr
,
2748 .ndo_change_mtu
= eth_change_mtu
,
2749 #ifdef CONFIG_NET_POLL_CONTROLLER
2750 .ndo_poll_controller
= at91ether_poll_controller
,
2754 static int at91ether_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
2755 struct clk
**hclk
, struct clk
**tx_clk
)
2762 *pclk
= devm_clk_get(&pdev
->dev
, "ether_clk");
2764 return PTR_ERR(*pclk
);
2766 err
= clk_prepare_enable(*pclk
);
2768 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
2775 static int at91ether_init(struct platform_device
*pdev
)
2777 struct net_device
*dev
= platform_get_drvdata(pdev
);
2778 struct macb
*bp
= netdev_priv(dev
);
2782 dev
->netdev_ops
= &at91ether_netdev_ops
;
2783 dev
->ethtool_ops
= &macb_ethtool_ops
;
2785 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, at91ether_interrupt
,
2790 macb_writel(bp
, NCR
, 0);
2792 reg
= MACB_BF(CLK
, MACB_CLK_DIV32
) | MACB_BIT(BIG
);
2793 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
2794 reg
|= MACB_BIT(RM9200_RMII
);
2796 macb_writel(bp
, NCFGR
, reg
);
2801 static const struct macb_config at91sam9260_config
= {
2802 .caps
= MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
2803 .clk_init
= macb_clk_init
,
2807 static const struct macb_config pc302gem_config
= {
2808 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
,
2809 .dma_burst_length
= 16,
2810 .clk_init
= macb_clk_init
,
2814 static const struct macb_config sama5d2_config
= {
2815 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
2816 .dma_burst_length
= 16,
2817 .clk_init
= macb_clk_init
,
2821 static const struct macb_config sama5d3_config
= {
2822 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
2823 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
2824 .dma_burst_length
= 16,
2825 .clk_init
= macb_clk_init
,
2829 static const struct macb_config sama5d4_config
= {
2830 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
2831 .dma_burst_length
= 4,
2832 .clk_init
= macb_clk_init
,
2836 static const struct macb_config emac_config
= {
2837 .clk_init
= at91ether_clk_init
,
2838 .init
= at91ether_init
,
2841 static const struct macb_config np4_config
= {
2842 .caps
= MACB_CAPS_USRIO_DISABLED
,
2843 .clk_init
= macb_clk_init
,
2847 static const struct macb_config zynqmp_config
= {
2848 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_JUMBO
,
2849 .dma_burst_length
= 16,
2850 .clk_init
= macb_clk_init
,
2852 .jumbo_max_len
= 10240,
2855 static const struct macb_config zynq_config
= {
2856 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_NO_GIGABIT_HALF
,
2857 .dma_burst_length
= 16,
2858 .clk_init
= macb_clk_init
,
2862 static const struct of_device_id macb_dt_ids
[] = {
2863 { .compatible
= "cdns,at32ap7000-macb" },
2864 { .compatible
= "cdns,at91sam9260-macb", .data
= &at91sam9260_config
},
2865 { .compatible
= "cdns,macb" },
2866 { .compatible
= "cdns,np4-macb", .data
= &np4_config
},
2867 { .compatible
= "cdns,pc302-gem", .data
= &pc302gem_config
},
2868 { .compatible
= "cdns,gem", .data
= &pc302gem_config
},
2869 { .compatible
= "atmel,sama5d2-gem", .data
= &sama5d2_config
},
2870 { .compatible
= "atmel,sama5d3-gem", .data
= &sama5d3_config
},
2871 { .compatible
= "atmel,sama5d4-gem", .data
= &sama5d4_config
},
2872 { .compatible
= "cdns,at91rm9200-emac", .data
= &emac_config
},
2873 { .compatible
= "cdns,emac", .data
= &emac_config
},
2874 { .compatible
= "cdns,zynqmp-gem", .data
= &zynqmp_config
},
2875 { .compatible
= "cdns,zynq-gem", .data
= &zynq_config
},
2878 MODULE_DEVICE_TABLE(of
, macb_dt_ids
);
2879 #endif /* CONFIG_OF */
2881 static int macb_probe(struct platform_device
*pdev
)
2883 int (*clk_init
)(struct platform_device
*, struct clk
**,
2884 struct clk
**, struct clk
**)
2886 int (*init
)(struct platform_device
*) = macb_init
;
2887 struct device_node
*np
= pdev
->dev
.of_node
;
2888 struct device_node
*phy_node
;
2889 const struct macb_config
*macb_config
= NULL
;
2890 struct clk
*pclk
, *hclk
= NULL
, *tx_clk
= NULL
;
2891 unsigned int queue_mask
, num_queues
;
2892 struct macb_platform_data
*pdata
;
2894 struct phy_device
*phydev
;
2895 struct net_device
*dev
;
2896 struct resource
*regs
;
2902 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2903 mem
= devm_ioremap_resource(&pdev
->dev
, regs
);
2905 return PTR_ERR(mem
);
2908 const struct of_device_id
*match
;
2910 match
= of_match_node(macb_dt_ids
, np
);
2911 if (match
&& match
->data
) {
2912 macb_config
= match
->data
;
2913 clk_init
= macb_config
->clk_init
;
2914 init
= macb_config
->init
;
2918 err
= clk_init(pdev
, &pclk
, &hclk
, &tx_clk
);
2922 native_io
= hw_is_native_io(mem
);
2924 macb_probe_queues(mem
, native_io
, &queue_mask
, &num_queues
);
2925 dev
= alloc_etherdev_mq(sizeof(*bp
), num_queues
);
2928 goto err_disable_clocks
;
2931 dev
->base_addr
= regs
->start
;
2933 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2935 bp
= netdev_priv(dev
);
2939 bp
->native_io
= native_io
;
2941 bp
->macb_reg_readl
= hw_readl_native
;
2942 bp
->macb_reg_writel
= hw_writel_native
;
2944 bp
->macb_reg_readl
= hw_readl
;
2945 bp
->macb_reg_writel
= hw_writel
;
2947 bp
->num_queues
= num_queues
;
2948 bp
->queue_mask
= queue_mask
;
2950 bp
->dma_burst_length
= macb_config
->dma_burst_length
;
2953 bp
->tx_clk
= tx_clk
;
2955 bp
->jumbo_max_len
= macb_config
->jumbo_max_len
;
2958 if (of_get_property(np
, "magic-packet", NULL
))
2959 bp
->wol
|= MACB_WOL_HAS_MAGIC_PACKET
;
2960 device_init_wakeup(&pdev
->dev
, bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
);
2962 spin_lock_init(&bp
->lock
);
2964 /* setup capabilities */
2965 macb_configure_caps(bp
, macb_config
);
2967 platform_set_drvdata(pdev
, dev
);
2969 dev
->irq
= platform_get_irq(pdev
, 0);
2972 goto err_disable_clocks
;
2975 mac
= of_get_mac_address(np
);
2977 ether_addr_copy(bp
->dev
->dev_addr
, mac
);
2979 macb_get_hwaddr(bp
);
2981 /* Power up the PHY if there is a GPIO reset */
2982 phy_node
= of_get_next_available_child(np
, NULL
);
2984 int gpio
= of_get_named_gpio(phy_node
, "reset-gpios", 0);
2986 if (gpio_is_valid(gpio
)) {
2987 bp
->reset_gpio
= gpio_to_desc(gpio
);
2988 gpiod_direction_output(bp
->reset_gpio
, 1);
2991 of_node_put(phy_node
);
2993 err
= of_get_phy_mode(np
);
2995 pdata
= dev_get_platdata(&pdev
->dev
);
2996 if (pdata
&& pdata
->is_rmii
)
2997 bp
->phy_interface
= PHY_INTERFACE_MODE_RMII
;
2999 bp
->phy_interface
= PHY_INTERFACE_MODE_MII
;
3001 bp
->phy_interface
= err
;
3004 /* IP specific init */
3007 goto err_out_free_netdev
;
3009 err
= macb_mii_init(bp
);
3011 goto err_out_free_netdev
;
3013 phydev
= bp
->phy_dev
;
3015 netif_carrier_off(dev
);
3017 err
= register_netdev(dev
);
3019 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
3020 goto err_out_unregister_mdio
;
3023 phy_attached_info(phydev
);
3025 netdev_info(dev
, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3026 macb_is_gem(bp
) ? "GEM" : "MACB", macb_readl(bp
, MID
),
3027 dev
->base_addr
, dev
->irq
, dev
->dev_addr
);
3031 err_out_unregister_mdio
:
3032 phy_disconnect(bp
->phy_dev
);
3033 mdiobus_unregister(bp
->mii_bus
);
3034 mdiobus_free(bp
->mii_bus
);
3036 /* Shutdown the PHY if there is a GPIO reset */
3038 gpiod_set_value(bp
->reset_gpio
, 0);
3040 err_out_free_netdev
:
3044 clk_disable_unprepare(tx_clk
);
3045 clk_disable_unprepare(hclk
);
3046 clk_disable_unprepare(pclk
);
3051 static int macb_remove(struct platform_device
*pdev
)
3053 struct net_device
*dev
;
3056 dev
= platform_get_drvdata(pdev
);
3059 bp
= netdev_priv(dev
);
3061 phy_disconnect(bp
->phy_dev
);
3062 mdiobus_unregister(bp
->mii_bus
);
3063 mdiobus_free(bp
->mii_bus
);
3065 /* Shutdown the PHY if there is a GPIO reset */
3067 gpiod_set_value(bp
->reset_gpio
, 0);
3069 unregister_netdev(dev
);
3070 clk_disable_unprepare(bp
->tx_clk
);
3071 clk_disable_unprepare(bp
->hclk
);
3072 clk_disable_unprepare(bp
->pclk
);
3079 static int __maybe_unused
macb_suspend(struct device
*dev
)
3081 struct platform_device
*pdev
= to_platform_device(dev
);
3082 struct net_device
*netdev
= platform_get_drvdata(pdev
);
3083 struct macb
*bp
= netdev_priv(netdev
);
3085 netif_carrier_off(netdev
);
3086 netif_device_detach(netdev
);
3088 if (bp
->wol
& MACB_WOL_ENABLED
) {
3089 macb_writel(bp
, IER
, MACB_BIT(WOL
));
3090 macb_writel(bp
, WOL
, MACB_BIT(MAG
));
3091 enable_irq_wake(bp
->queues
[0].irq
);
3093 clk_disable_unprepare(bp
->tx_clk
);
3094 clk_disable_unprepare(bp
->hclk
);
3095 clk_disable_unprepare(bp
->pclk
);
3101 static int __maybe_unused
macb_resume(struct device
*dev
)
3103 struct platform_device
*pdev
= to_platform_device(dev
);
3104 struct net_device
*netdev
= platform_get_drvdata(pdev
);
3105 struct macb
*bp
= netdev_priv(netdev
);
3107 if (bp
->wol
& MACB_WOL_ENABLED
) {
3108 macb_writel(bp
, IDR
, MACB_BIT(WOL
));
3109 macb_writel(bp
, WOL
, 0);
3110 disable_irq_wake(bp
->queues
[0].irq
);
3112 clk_prepare_enable(bp
->pclk
);
3113 clk_prepare_enable(bp
->hclk
);
3114 clk_prepare_enable(bp
->tx_clk
);
3117 netif_device_attach(netdev
);
3122 static SIMPLE_DEV_PM_OPS(macb_pm_ops
, macb_suspend
, macb_resume
);
3124 static struct platform_driver macb_driver
= {
3125 .probe
= macb_probe
,
3126 .remove
= macb_remove
,
3129 .of_match_table
= of_match_ptr(macb_dt_ids
),
3134 module_platform_driver(macb_driver
);
3136 MODULE_LICENSE("GPL");
3137 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3138 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3139 MODULE_ALIAS("platform:macb");