2 * Cadence MACB/GEM Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/circ_buf.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
21 #include <linux/gpio.h>
22 #include <linux/interrupt.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_data/macb.h>
27 #include <linux/platform_device.h>
28 #include <linux/phy.h>
30 #include <linux/of_device.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
36 #define MACB_RX_BUFFER_SIZE 128
37 #define RX_BUFFER_MULTIPLE 64 /* bytes */
38 #define RX_RING_SIZE 512 /* must be power of 2 */
39 #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
41 #define TX_RING_SIZE 128 /* must be power of 2 */
42 #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
44 /* level of occupied TX descriptors under which we wake up TX process */
45 #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
47 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
49 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
52 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
54 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
55 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
58 * Graceful stop timeouts in us. We should allow up to
59 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
61 #define MACB_HALT_TIMEOUT 1230
63 /* Ring buffer accessors */
64 static unsigned int macb_tx_ring_wrap(unsigned int index
)
66 return index
& (TX_RING_SIZE
- 1);
69 static struct macb_dma_desc
*macb_tx_desc(struct macb_queue
*queue
,
72 return &queue
->tx_ring
[macb_tx_ring_wrap(index
)];
75 static struct macb_tx_skb
*macb_tx_skb(struct macb_queue
*queue
,
78 return &queue
->tx_skb
[macb_tx_ring_wrap(index
)];
81 static dma_addr_t
macb_tx_dma(struct macb_queue
*queue
, unsigned int index
)
85 offset
= macb_tx_ring_wrap(index
) * sizeof(struct macb_dma_desc
);
87 return queue
->tx_ring_dma
+ offset
;
90 static unsigned int macb_rx_ring_wrap(unsigned int index
)
92 return index
& (RX_RING_SIZE
- 1);
95 static struct macb_dma_desc
*macb_rx_desc(struct macb
*bp
, unsigned int index
)
97 return &bp
->rx_ring
[macb_rx_ring_wrap(index
)];
100 static void *macb_rx_buffer(struct macb
*bp
, unsigned int index
)
102 return bp
->rx_buffers
+ bp
->rx_buffer_size
* macb_rx_ring_wrap(index
);
105 static void macb_set_hwaddr(struct macb
*bp
)
110 bottom
= cpu_to_le32(*((u32
*)bp
->dev
->dev_addr
));
111 macb_or_gem_writel(bp
, SA1B
, bottom
);
112 top
= cpu_to_le16(*((u16
*)(bp
->dev
->dev_addr
+ 4)));
113 macb_or_gem_writel(bp
, SA1T
, top
);
115 /* Clear unused address register sets */
116 macb_or_gem_writel(bp
, SA2B
, 0);
117 macb_or_gem_writel(bp
, SA2T
, 0);
118 macb_or_gem_writel(bp
, SA3B
, 0);
119 macb_or_gem_writel(bp
, SA3T
, 0);
120 macb_or_gem_writel(bp
, SA4B
, 0);
121 macb_or_gem_writel(bp
, SA4T
, 0);
124 static void macb_get_hwaddr(struct macb
*bp
)
126 struct macb_platform_data
*pdata
;
132 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
134 /* Check all 4 address register for vaild address */
135 for (i
= 0; i
< 4; i
++) {
136 bottom
= macb_or_gem_readl(bp
, SA1B
+ i
* 8);
137 top
= macb_or_gem_readl(bp
, SA1T
+ i
* 8);
139 if (pdata
&& pdata
->rev_eth_addr
) {
140 addr
[5] = bottom
& 0xff;
141 addr
[4] = (bottom
>> 8) & 0xff;
142 addr
[3] = (bottom
>> 16) & 0xff;
143 addr
[2] = (bottom
>> 24) & 0xff;
144 addr
[1] = top
& 0xff;
145 addr
[0] = (top
& 0xff00) >> 8;
147 addr
[0] = bottom
& 0xff;
148 addr
[1] = (bottom
>> 8) & 0xff;
149 addr
[2] = (bottom
>> 16) & 0xff;
150 addr
[3] = (bottom
>> 24) & 0xff;
151 addr
[4] = top
& 0xff;
152 addr
[5] = (top
>> 8) & 0xff;
155 if (is_valid_ether_addr(addr
)) {
156 memcpy(bp
->dev
->dev_addr
, addr
, sizeof(addr
));
161 netdev_info(bp
->dev
, "invalid hw address, using random\n");
162 eth_hw_addr_random(bp
->dev
);
165 static int macb_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
167 struct macb
*bp
= bus
->priv
;
170 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
171 | MACB_BF(RW
, MACB_MAN_READ
)
172 | MACB_BF(PHYA
, mii_id
)
173 | MACB_BF(REGA
, regnum
)
174 | MACB_BF(CODE
, MACB_MAN_CODE
)));
176 /* wait for end of transfer */
177 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
180 value
= MACB_BFEXT(DATA
, macb_readl(bp
, MAN
));
185 static int macb_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
188 struct macb
*bp
= bus
->priv
;
190 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
191 | MACB_BF(RW
, MACB_MAN_WRITE
)
192 | MACB_BF(PHYA
, mii_id
)
193 | MACB_BF(REGA
, regnum
)
194 | MACB_BF(CODE
, MACB_MAN_CODE
)
195 | MACB_BF(DATA
, value
)));
197 /* wait for end of transfer */
198 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
205 * macb_set_tx_clk() - Set a clock to a new frequency
206 * @clk Pointer to the clock to change
207 * @rate New frequency in Hz
208 * @dev Pointer to the struct net_device
210 static void macb_set_tx_clk(struct clk
*clk
, int speed
, struct net_device
*dev
)
212 long ferr
, rate
, rate_rounded
;
231 rate_rounded
= clk_round_rate(clk
, rate
);
232 if (rate_rounded
< 0)
235 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
238 ferr
= abs(rate_rounded
- rate
);
239 ferr
= DIV_ROUND_UP(ferr
, rate
/ 100000);
241 netdev_warn(dev
, "unable to generate target frequency: %ld Hz\n",
244 if (clk_set_rate(clk
, rate_rounded
))
245 netdev_err(dev
, "adjusting tx_clk failed.\n");
248 static void macb_handle_link_change(struct net_device
*dev
)
250 struct macb
*bp
= netdev_priv(dev
);
251 struct phy_device
*phydev
= bp
->phy_dev
;
254 int status_change
= 0;
256 spin_lock_irqsave(&bp
->lock
, flags
);
259 if ((bp
->speed
!= phydev
->speed
) ||
260 (bp
->duplex
!= phydev
->duplex
)) {
263 reg
= macb_readl(bp
, NCFGR
);
264 reg
&= ~(MACB_BIT(SPD
) | MACB_BIT(FD
));
266 reg
&= ~GEM_BIT(GBE
);
270 if (phydev
->speed
== SPEED_100
)
271 reg
|= MACB_BIT(SPD
);
272 if (phydev
->speed
== SPEED_1000
&&
273 bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
276 macb_or_gem_writel(bp
, NCFGR
, reg
);
278 bp
->speed
= phydev
->speed
;
279 bp
->duplex
= phydev
->duplex
;
284 if (phydev
->link
!= bp
->link
) {
289 bp
->link
= phydev
->link
;
294 spin_unlock_irqrestore(&bp
->lock
, flags
);
298 /* Update the TX clock rate if and only if the link is
299 * up and there has been a link change.
301 macb_set_tx_clk(bp
->tx_clk
, phydev
->speed
, dev
);
303 netif_carrier_on(dev
);
304 netdev_info(dev
, "link up (%d/%s)\n",
306 phydev
->duplex
== DUPLEX_FULL
?
309 netif_carrier_off(dev
);
310 netdev_info(dev
, "link down\n");
315 /* based on au1000_eth. c*/
316 static int macb_mii_probe(struct net_device
*dev
)
318 struct macb
*bp
= netdev_priv(dev
);
319 struct macb_platform_data
*pdata
;
320 struct phy_device
*phydev
;
324 phydev
= phy_find_first(bp
->mii_bus
);
326 netdev_err(dev
, "no PHY found\n");
330 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
331 if (pdata
&& gpio_is_valid(pdata
->phy_irq_pin
)) {
332 ret
= devm_gpio_request(&bp
->pdev
->dev
, pdata
->phy_irq_pin
, "phy int");
334 phy_irq
= gpio_to_irq(pdata
->phy_irq_pin
);
335 phydev
->irq
= (phy_irq
< 0) ? PHY_POLL
: phy_irq
;
339 /* attach the mac to the phy */
340 ret
= phy_connect_direct(dev
, phydev
, &macb_handle_link_change
,
343 netdev_err(dev
, "Could not attach to PHY\n");
347 /* mask with MAC supported features */
348 if (macb_is_gem(bp
) && bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
349 phydev
->supported
&= PHY_GBIT_FEATURES
;
351 phydev
->supported
&= PHY_BASIC_FEATURES
;
353 phydev
->advertising
= phydev
->supported
;
358 bp
->phy_dev
= phydev
;
363 static int macb_mii_init(struct macb
*bp
)
365 struct macb_platform_data
*pdata
;
366 struct device_node
*np
;
369 /* Enable management port */
370 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
372 bp
->mii_bus
= mdiobus_alloc();
373 if (bp
->mii_bus
== NULL
) {
378 bp
->mii_bus
->name
= "MACB_mii_bus";
379 bp
->mii_bus
->read
= &macb_mdio_read
;
380 bp
->mii_bus
->write
= &macb_mdio_write
;
381 snprintf(bp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
382 bp
->pdev
->name
, bp
->pdev
->id
);
383 bp
->mii_bus
->priv
= bp
;
384 bp
->mii_bus
->parent
= &bp
->dev
->dev
;
385 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
387 bp
->mii_bus
->irq
= kmalloc(sizeof(int)*PHY_MAX_ADDR
, GFP_KERNEL
);
388 if (!bp
->mii_bus
->irq
) {
390 goto err_out_free_mdiobus
;
393 dev_set_drvdata(&bp
->dev
->dev
, bp
->mii_bus
);
395 np
= bp
->pdev
->dev
.of_node
;
397 /* try dt phy registration */
398 err
= of_mdiobus_register(bp
->mii_bus
, np
);
400 /* fallback to standard phy registration if no phy were
401 found during dt phy registration */
402 if (!err
&& !phy_find_first(bp
->mii_bus
)) {
403 for (i
= 0; i
< PHY_MAX_ADDR
; i
++) {
404 struct phy_device
*phydev
;
406 phydev
= mdiobus_scan(bp
->mii_bus
, i
);
407 if (IS_ERR(phydev
)) {
408 err
= PTR_ERR(phydev
);
414 goto err_out_unregister_bus
;
417 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
418 bp
->mii_bus
->irq
[i
] = PHY_POLL
;
421 bp
->mii_bus
->phy_mask
= pdata
->phy_mask
;
423 err
= mdiobus_register(bp
->mii_bus
);
427 goto err_out_free_mdio_irq
;
429 err
= macb_mii_probe(bp
->dev
);
431 goto err_out_unregister_bus
;
435 err_out_unregister_bus
:
436 mdiobus_unregister(bp
->mii_bus
);
437 err_out_free_mdio_irq
:
438 kfree(bp
->mii_bus
->irq
);
439 err_out_free_mdiobus
:
440 mdiobus_free(bp
->mii_bus
);
445 static void macb_update_stats(struct macb
*bp
)
447 u32 __iomem
*reg
= bp
->regs
+ MACB_PFR
;
448 u32
*p
= &bp
->hw_stats
.macb
.rx_pause_frames
;
449 u32
*end
= &bp
->hw_stats
.macb
.tx_pause_frames
+ 1;
451 WARN_ON((unsigned long)(end
- p
- 1) != (MACB_TPF
- MACB_PFR
) / 4);
453 for(; p
< end
; p
++, reg
++)
454 *p
+= readl_relaxed(reg
);
457 static int macb_halt_tx(struct macb
*bp
)
459 unsigned long halt_time
, timeout
;
462 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(THALT
));
464 timeout
= jiffies
+ usecs_to_jiffies(MACB_HALT_TIMEOUT
);
467 status
= macb_readl(bp
, TSR
);
468 if (!(status
& MACB_BIT(TGO
)))
471 usleep_range(10, 250);
472 } while (time_before(halt_time
, timeout
));
477 static void macb_tx_unmap(struct macb
*bp
, struct macb_tx_skb
*tx_skb
)
479 if (tx_skb
->mapping
) {
480 if (tx_skb
->mapped_as_page
)
481 dma_unmap_page(&bp
->pdev
->dev
, tx_skb
->mapping
,
482 tx_skb
->size
, DMA_TO_DEVICE
);
484 dma_unmap_single(&bp
->pdev
->dev
, tx_skb
->mapping
,
485 tx_skb
->size
, DMA_TO_DEVICE
);
490 dev_kfree_skb_any(tx_skb
->skb
);
495 static void macb_tx_error_task(struct work_struct
*work
)
497 struct macb_queue
*queue
= container_of(work
, struct macb_queue
,
499 struct macb
*bp
= queue
->bp
;
500 struct macb_tx_skb
*tx_skb
;
501 struct macb_dma_desc
*desc
;
506 netdev_vdbg(bp
->dev
, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
507 (unsigned int)(queue
- bp
->queues
),
508 queue
->tx_tail
, queue
->tx_head
);
510 /* Prevent the queue IRQ handlers from running: each of them may call
511 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
512 * As explained below, we have to halt the transmission before updating
513 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
514 * network engine about the macb/gem being halted.
516 spin_lock_irqsave(&bp
->lock
, flags
);
518 /* Make sure nobody is trying to queue up new packets */
519 netif_tx_stop_all_queues(bp
->dev
);
522 * Stop transmission now
523 * (in case we have just queued new packets)
524 * macb/gem must be halted to write TBQP register
526 if (macb_halt_tx(bp
))
527 /* Just complain for now, reinitializing TX path can be good */
528 netdev_err(bp
->dev
, "BUG: halt tx timed out\n");
531 * Treat frames in TX queue including the ones that caused the error.
532 * Free transmit buffers in upper layer.
534 for (tail
= queue
->tx_tail
; tail
!= queue
->tx_head
; tail
++) {
537 desc
= macb_tx_desc(queue
, tail
);
539 tx_skb
= macb_tx_skb(queue
, tail
);
542 if (ctrl
& MACB_BIT(TX_USED
)) {
543 /* skb is set for the last buffer of the frame */
545 macb_tx_unmap(bp
, tx_skb
);
547 tx_skb
= macb_tx_skb(queue
, tail
);
551 /* ctrl still refers to the first buffer descriptor
552 * since it's the only one written back by the hardware
554 if (!(ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))) {
555 netdev_vdbg(bp
->dev
, "txerr skb %u (data %p) TX complete\n",
556 macb_tx_ring_wrap(tail
), skb
->data
);
557 bp
->stats
.tx_packets
++;
558 bp
->stats
.tx_bytes
+= skb
->len
;
562 * "Buffers exhausted mid-frame" errors may only happen
563 * if the driver is buggy, so complain loudly about those.
564 * Statistics are updated by hardware.
566 if (ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))
568 "BUG: TX buffers exhausted mid-frame\n");
570 desc
->ctrl
= ctrl
| MACB_BIT(TX_USED
);
573 macb_tx_unmap(bp
, tx_skb
);
576 /* Set end of TX queue */
577 desc
= macb_tx_desc(queue
, 0);
579 desc
->ctrl
= MACB_BIT(TX_USED
);
581 /* Make descriptor updates visible to hardware */
584 /* Reinitialize the TX desc queue */
585 queue_writel(queue
, TBQP
, queue
->tx_ring_dma
);
586 /* Make TX ring reflect state of hardware */
590 /* Housework before enabling TX IRQ */
591 macb_writel(bp
, TSR
, macb_readl(bp
, TSR
));
592 queue_writel(queue
, IER
, MACB_TX_INT_FLAGS
);
594 /* Now we are ready to start transmission again */
595 netif_tx_start_all_queues(bp
->dev
);
596 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
598 spin_unlock_irqrestore(&bp
->lock
, flags
);
601 static void macb_tx_interrupt(struct macb_queue
*queue
)
606 struct macb
*bp
= queue
->bp
;
607 u16 queue_index
= queue
- bp
->queues
;
609 status
= macb_readl(bp
, TSR
);
610 macb_writel(bp
, TSR
, status
);
612 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
613 queue_writel(queue
, ISR
, MACB_BIT(TCOMP
));
615 netdev_vdbg(bp
->dev
, "macb_tx_interrupt status = 0x%03lx\n",
616 (unsigned long)status
);
618 head
= queue
->tx_head
;
619 for (tail
= queue
->tx_tail
; tail
!= head
; tail
++) {
620 struct macb_tx_skb
*tx_skb
;
622 struct macb_dma_desc
*desc
;
625 desc
= macb_tx_desc(queue
, tail
);
627 /* Make hw descriptor updates visible to CPU */
632 /* TX_USED bit is only set by hardware on the very first buffer
633 * descriptor of the transmitted frame.
635 if (!(ctrl
& MACB_BIT(TX_USED
)))
638 /* Process all buffers of the current transmitted frame */
640 tx_skb
= macb_tx_skb(queue
, tail
);
643 /* First, update TX stats if needed */
645 netdev_vdbg(bp
->dev
, "skb %u (data %p) TX complete\n",
646 macb_tx_ring_wrap(tail
), skb
->data
);
647 bp
->stats
.tx_packets
++;
648 bp
->stats
.tx_bytes
+= skb
->len
;
651 /* Now we can safely release resources */
652 macb_tx_unmap(bp
, tx_skb
);
654 /* skb is set only for the last buffer of the frame.
655 * WARNING: at this point skb has been freed by
663 queue
->tx_tail
= tail
;
664 if (__netif_subqueue_stopped(bp
->dev
, queue_index
) &&
665 CIRC_CNT(queue
->tx_head
, queue
->tx_tail
,
666 TX_RING_SIZE
) <= MACB_TX_WAKEUP_THRESH
)
667 netif_wake_subqueue(bp
->dev
, queue_index
);
670 static void gem_rx_refill(struct macb
*bp
)
676 while (CIRC_SPACE(bp
->rx_prepared_head
, bp
->rx_tail
, RX_RING_SIZE
) > 0) {
677 entry
= macb_rx_ring_wrap(bp
->rx_prepared_head
);
679 /* Make hw descriptor updates visible to CPU */
682 bp
->rx_prepared_head
++;
684 if (bp
->rx_skbuff
[entry
] == NULL
) {
685 /* allocate sk_buff for this free entry in ring */
686 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buffer_size
);
687 if (unlikely(skb
== NULL
)) {
689 "Unable to allocate sk_buff\n");
693 /* now fill corresponding descriptor entry */
694 paddr
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
695 bp
->rx_buffer_size
, DMA_FROM_DEVICE
);
696 if (dma_mapping_error(&bp
->pdev
->dev
, paddr
)) {
701 bp
->rx_skbuff
[entry
] = skb
;
703 if (entry
== RX_RING_SIZE
- 1)
704 paddr
|= MACB_BIT(RX_WRAP
);
705 bp
->rx_ring
[entry
].addr
= paddr
;
706 bp
->rx_ring
[entry
].ctrl
= 0;
708 /* properly align Ethernet header */
709 skb_reserve(skb
, NET_IP_ALIGN
);
711 bp
->rx_ring
[entry
].addr
&= ~MACB_BIT(RX_USED
);
712 bp
->rx_ring
[entry
].ctrl
= 0;
716 /* Make descriptor updates visible to hardware */
719 netdev_vdbg(bp
->dev
, "rx ring: prepared head %d, tail %d\n",
720 bp
->rx_prepared_head
, bp
->rx_tail
);
723 /* Mark DMA descriptors from begin up to and not including end as unused */
724 static void discard_partial_frame(struct macb
*bp
, unsigned int begin
,
729 for (frag
= begin
; frag
!= end
; frag
++) {
730 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, frag
);
731 desc
->addr
&= ~MACB_BIT(RX_USED
);
734 /* Make descriptor updates visible to hardware */
738 * When this happens, the hardware stats registers for
739 * whatever caused this is updated, so we don't have to record
744 static int gem_rx(struct macb
*bp
, int budget
)
749 struct macb_dma_desc
*desc
;
752 while (count
< budget
) {
755 entry
= macb_rx_ring_wrap(bp
->rx_tail
);
756 desc
= &bp
->rx_ring
[entry
];
758 /* Make hw descriptor updates visible to CPU */
764 if (!(addr
& MACB_BIT(RX_USED
)))
770 if (!(ctrl
& MACB_BIT(RX_SOF
) && ctrl
& MACB_BIT(RX_EOF
))) {
772 "not whole frame pointed by descriptor\n");
773 bp
->stats
.rx_dropped
++;
776 skb
= bp
->rx_skbuff
[entry
];
777 if (unlikely(!skb
)) {
779 "inconsistent Rx descriptor chain\n");
780 bp
->stats
.rx_dropped
++;
783 /* now everything is ready for receiving packet */
784 bp
->rx_skbuff
[entry
] = NULL
;
785 len
= MACB_BFEXT(RX_FRMLEN
, ctrl
);
787 netdev_vdbg(bp
->dev
, "gem_rx %u (len %u)\n", entry
, len
);
790 addr
= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, addr
));
791 dma_unmap_single(&bp
->pdev
->dev
, addr
,
792 bp
->rx_buffer_size
, DMA_FROM_DEVICE
);
794 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
795 skb_checksum_none_assert(skb
);
796 if (bp
->dev
->features
& NETIF_F_RXCSUM
&&
797 !(bp
->dev
->flags
& IFF_PROMISC
) &&
798 GEM_BFEXT(RX_CSUM
, ctrl
) & GEM_RX_CSUM_CHECKED_MASK
)
799 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
801 bp
->stats
.rx_packets
++;
802 bp
->stats
.rx_bytes
+= skb
->len
;
804 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
805 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
806 skb
->len
, skb
->csum
);
807 print_hex_dump(KERN_DEBUG
, " mac: ", DUMP_PREFIX_ADDRESS
, 16, 1,
808 skb_mac_header(skb
), 16, true);
809 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_ADDRESS
, 16, 1,
810 skb
->data
, 32, true);
813 netif_receive_skb(skb
);
821 static int macb_rx_frame(struct macb
*bp
, unsigned int first_frag
,
822 unsigned int last_frag
)
828 struct macb_dma_desc
*desc
;
830 desc
= macb_rx_desc(bp
, last_frag
);
831 len
= MACB_BFEXT(RX_FRMLEN
, desc
->ctrl
);
833 netdev_vdbg(bp
->dev
, "macb_rx_frame frags %u - %u (len %u)\n",
834 macb_rx_ring_wrap(first_frag
),
835 macb_rx_ring_wrap(last_frag
), len
);
838 * The ethernet header starts NET_IP_ALIGN bytes into the
839 * first buffer. Since the header is 14 bytes, this makes the
840 * payload word-aligned.
842 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
843 * the two padding bytes into the skb so that we avoid hitting
844 * the slowpath in memcpy(), and pull them off afterwards.
846 skb
= netdev_alloc_skb(bp
->dev
, len
+ NET_IP_ALIGN
);
848 bp
->stats
.rx_dropped
++;
849 for (frag
= first_frag
; ; frag
++) {
850 desc
= macb_rx_desc(bp
, frag
);
851 desc
->addr
&= ~MACB_BIT(RX_USED
);
852 if (frag
== last_frag
)
856 /* Make descriptor updates visible to hardware */
864 skb_checksum_none_assert(skb
);
867 for (frag
= first_frag
; ; frag
++) {
868 unsigned int frag_len
= bp
->rx_buffer_size
;
870 if (offset
+ frag_len
> len
) {
871 BUG_ON(frag
!= last_frag
);
872 frag_len
= len
- offset
;
874 skb_copy_to_linear_data_offset(skb
, offset
,
875 macb_rx_buffer(bp
, frag
), frag_len
);
876 offset
+= bp
->rx_buffer_size
;
877 desc
= macb_rx_desc(bp
, frag
);
878 desc
->addr
&= ~MACB_BIT(RX_USED
);
880 if (frag
== last_frag
)
884 /* Make descriptor updates visible to hardware */
887 __skb_pull(skb
, NET_IP_ALIGN
);
888 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
890 bp
->stats
.rx_packets
++;
891 bp
->stats
.rx_bytes
+= skb
->len
;
892 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
893 skb
->len
, skb
->csum
);
894 netif_receive_skb(skb
);
899 static int macb_rx(struct macb
*bp
, int budget
)
905 for (tail
= bp
->rx_tail
; budget
> 0; tail
++) {
906 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, tail
);
909 /* Make hw descriptor updates visible to CPU */
915 if (!(addr
& MACB_BIT(RX_USED
)))
918 if (ctrl
& MACB_BIT(RX_SOF
)) {
919 if (first_frag
!= -1)
920 discard_partial_frame(bp
, first_frag
, tail
);
924 if (ctrl
& MACB_BIT(RX_EOF
)) {
926 BUG_ON(first_frag
== -1);
928 dropped
= macb_rx_frame(bp
, first_frag
, tail
);
937 if (first_frag
!= -1)
938 bp
->rx_tail
= first_frag
;
945 static int macb_poll(struct napi_struct
*napi
, int budget
)
947 struct macb
*bp
= container_of(napi
, struct macb
, napi
);
951 status
= macb_readl(bp
, RSR
);
952 macb_writel(bp
, RSR
, status
);
956 netdev_vdbg(bp
->dev
, "poll: status = %08lx, budget = %d\n",
957 (unsigned long)status
, budget
);
959 work_done
= bp
->macbgem_ops
.mog_rx(bp
, budget
);
960 if (work_done
< budget
) {
963 /* Packets received while interrupts were disabled */
964 status
= macb_readl(bp
, RSR
);
966 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
967 macb_writel(bp
, ISR
, MACB_BIT(RCOMP
));
968 napi_reschedule(napi
);
970 macb_writel(bp
, IER
, MACB_RX_INT_FLAGS
);
974 /* TODO: Handle errors */
979 static irqreturn_t
macb_interrupt(int irq
, void *dev_id
)
981 struct macb_queue
*queue
= dev_id
;
982 struct macb
*bp
= queue
->bp
;
983 struct net_device
*dev
= bp
->dev
;
986 status
= queue_readl(queue
, ISR
);
988 if (unlikely(!status
))
991 spin_lock(&bp
->lock
);
994 /* close possible race with dev_close */
995 if (unlikely(!netif_running(dev
))) {
996 queue_writel(queue
, IDR
, -1);
1000 netdev_vdbg(bp
->dev
, "queue = %u, isr = 0x%08lx\n",
1001 (unsigned int)(queue
- bp
->queues
),
1002 (unsigned long)status
);
1004 if (status
& MACB_RX_INT_FLAGS
) {
1006 * There's no point taking any more interrupts
1007 * until we have processed the buffers. The
1008 * scheduling call may fail if the poll routine
1009 * is already scheduled, so disable interrupts
1012 queue_writel(queue
, IDR
, MACB_RX_INT_FLAGS
);
1013 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1014 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1016 if (napi_schedule_prep(&bp
->napi
)) {
1017 netdev_vdbg(bp
->dev
, "scheduling RX softirq\n");
1018 __napi_schedule(&bp
->napi
);
1022 if (unlikely(status
& (MACB_TX_ERR_FLAGS
))) {
1023 queue_writel(queue
, IDR
, MACB_TX_INT_FLAGS
);
1024 schedule_work(&queue
->tx_error_task
);
1026 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1027 queue_writel(queue
, ISR
, MACB_TX_ERR_FLAGS
);
1032 if (status
& MACB_BIT(TCOMP
))
1033 macb_tx_interrupt(queue
);
1036 * Link change detection isn't possible with RMII, so we'll
1037 * add that if/when we get our hands on a full-blown MII PHY.
1040 if (status
& MACB_BIT(ISR_ROVR
)) {
1041 /* We missed at least one packet */
1042 if (macb_is_gem(bp
))
1043 bp
->hw_stats
.gem
.rx_overruns
++;
1045 bp
->hw_stats
.macb
.rx_overruns
++;
1047 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1048 queue_writel(queue
, ISR
, MACB_BIT(ISR_ROVR
));
1051 if (status
& MACB_BIT(HRESP
)) {
1053 * TODO: Reset the hardware, and maybe move the
1054 * netdev_err to a lower-priority context as well
1057 netdev_err(dev
, "DMA bus error: HRESP not OK\n");
1059 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1060 queue_writel(queue
, ISR
, MACB_BIT(HRESP
));
1063 status
= queue_readl(queue
, ISR
);
1066 spin_unlock(&bp
->lock
);
1071 #ifdef CONFIG_NET_POLL_CONTROLLER
1073 * Polling receive - used by netconsole and other diagnostic tools
1074 * to allow network i/o with interrupts disabled.
1076 static void macb_poll_controller(struct net_device
*dev
)
1078 struct macb
*bp
= netdev_priv(dev
);
1079 struct macb_queue
*queue
;
1080 unsigned long flags
;
1083 local_irq_save(flags
);
1084 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
1085 macb_interrupt(dev
->irq
, queue
);
1086 local_irq_restore(flags
);
1090 static inline unsigned int macb_count_tx_descriptors(struct macb
*bp
,
1093 return (len
+ bp
->max_tx_length
- 1) / bp
->max_tx_length
;
1096 static unsigned int macb_tx_map(struct macb
*bp
,
1097 struct macb_queue
*queue
,
1098 struct sk_buff
*skb
)
1101 unsigned int len
, entry
, i
, tx_head
= queue
->tx_head
;
1102 struct macb_tx_skb
*tx_skb
= NULL
;
1103 struct macb_dma_desc
*desc
;
1104 unsigned int offset
, size
, count
= 0;
1105 unsigned int f
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1106 unsigned int eof
= 1;
1109 /* First, map non-paged data */
1110 len
= skb_headlen(skb
);
1113 size
= min(len
, bp
->max_tx_length
);
1114 entry
= macb_tx_ring_wrap(tx_head
);
1115 tx_skb
= &queue
->tx_skb
[entry
];
1117 mapping
= dma_map_single(&bp
->pdev
->dev
,
1119 size
, DMA_TO_DEVICE
);
1120 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1123 /* Save info to properly release resources */
1125 tx_skb
->mapping
= mapping
;
1126 tx_skb
->size
= size
;
1127 tx_skb
->mapped_as_page
= false;
1135 /* Then, map paged data from fragments */
1136 for (f
= 0; f
< nr_frags
; f
++) {
1137 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1139 len
= skb_frag_size(frag
);
1142 size
= min(len
, bp
->max_tx_length
);
1143 entry
= macb_tx_ring_wrap(tx_head
);
1144 tx_skb
= &queue
->tx_skb
[entry
];
1146 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
,
1147 offset
, size
, DMA_TO_DEVICE
);
1148 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1151 /* Save info to properly release resources */
1153 tx_skb
->mapping
= mapping
;
1154 tx_skb
->size
= size
;
1155 tx_skb
->mapped_as_page
= true;
1164 /* Should never happen */
1165 if (unlikely(tx_skb
== NULL
)) {
1166 netdev_err(bp
->dev
, "BUG! empty skb!\n");
1170 /* This is the last buffer of the frame: save socket buffer */
1173 /* Update TX ring: update buffer descriptors in reverse order
1174 * to avoid race condition
1177 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1178 * to set the end of TX queue
1181 entry
= macb_tx_ring_wrap(i
);
1182 ctrl
= MACB_BIT(TX_USED
);
1183 desc
= &queue
->tx_ring
[entry
];
1188 entry
= macb_tx_ring_wrap(i
);
1189 tx_skb
= &queue
->tx_skb
[entry
];
1190 desc
= &queue
->tx_ring
[entry
];
1192 ctrl
= (u32
)tx_skb
->size
;
1194 ctrl
|= MACB_BIT(TX_LAST
);
1197 if (unlikely(entry
== (TX_RING_SIZE
- 1)))
1198 ctrl
|= MACB_BIT(TX_WRAP
);
1200 /* Set TX buffer descriptor */
1201 desc
->addr
= tx_skb
->mapping
;
1202 /* desc->addr must be visible to hardware before clearing
1203 * 'TX_USED' bit in desc->ctrl.
1207 } while (i
!= queue
->tx_head
);
1209 queue
->tx_head
= tx_head
;
1214 netdev_err(bp
->dev
, "TX DMA map failed\n");
1216 for (i
= queue
->tx_head
; i
!= tx_head
; i
++) {
1217 tx_skb
= macb_tx_skb(queue
, i
);
1219 macb_tx_unmap(bp
, tx_skb
);
1225 static int macb_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1227 u16 queue_index
= skb_get_queue_mapping(skb
);
1228 struct macb
*bp
= netdev_priv(dev
);
1229 struct macb_queue
*queue
= &bp
->queues
[queue_index
];
1230 unsigned long flags
;
1231 unsigned int count
, nr_frags
, frag_size
, f
;
1233 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1234 netdev_vdbg(bp
->dev
,
1235 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1236 queue_index
, skb
->len
, skb
->head
, skb
->data
,
1237 skb_tail_pointer(skb
), skb_end_pointer(skb
));
1238 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_OFFSET
, 16, 1,
1239 skb
->data
, 16, true);
1242 /* Count how many TX buffer descriptors are needed to send this
1243 * socket buffer: skb fragments of jumbo frames may need to be
1244 * splitted into many buffer descriptors.
1246 count
= macb_count_tx_descriptors(bp
, skb_headlen(skb
));
1247 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1248 for (f
= 0; f
< nr_frags
; f
++) {
1249 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[f
]);
1250 count
+= macb_count_tx_descriptors(bp
, frag_size
);
1253 spin_lock_irqsave(&bp
->lock
, flags
);
1255 /* This is a hard error, log it. */
1256 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, TX_RING_SIZE
) < count
) {
1257 netif_stop_subqueue(dev
, queue_index
);
1258 spin_unlock_irqrestore(&bp
->lock
, flags
);
1259 netdev_dbg(bp
->dev
, "tx_head = %u, tx_tail = %u\n",
1260 queue
->tx_head
, queue
->tx_tail
);
1261 return NETDEV_TX_BUSY
;
1264 /* Map socket buffer for DMA transfer */
1265 if (!macb_tx_map(bp
, queue
, skb
)) {
1266 dev_kfree_skb_any(skb
);
1270 /* Make newly initialized descriptor visible to hardware */
1273 skb_tx_timestamp(skb
);
1275 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1277 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, TX_RING_SIZE
) < 1)
1278 netif_stop_subqueue(dev
, queue_index
);
1281 spin_unlock_irqrestore(&bp
->lock
, flags
);
1283 return NETDEV_TX_OK
;
1286 static void macb_init_rx_buffer_size(struct macb
*bp
, size_t size
)
1288 if (!macb_is_gem(bp
)) {
1289 bp
->rx_buffer_size
= MACB_RX_BUFFER_SIZE
;
1291 bp
->rx_buffer_size
= size
;
1293 if (bp
->rx_buffer_size
% RX_BUFFER_MULTIPLE
) {
1295 "RX buffer must be multiple of %d bytes, expanding\n",
1296 RX_BUFFER_MULTIPLE
);
1297 bp
->rx_buffer_size
=
1298 roundup(bp
->rx_buffer_size
, RX_BUFFER_MULTIPLE
);
1302 netdev_dbg(bp
->dev
, "mtu [%u] rx_buffer_size [%Zu]\n",
1303 bp
->dev
->mtu
, bp
->rx_buffer_size
);
1306 static void gem_free_rx_buffers(struct macb
*bp
)
1308 struct sk_buff
*skb
;
1309 struct macb_dma_desc
*desc
;
1316 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1317 skb
= bp
->rx_skbuff
[i
];
1322 desc
= &bp
->rx_ring
[i
];
1323 addr
= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, desc
->addr
));
1324 dma_unmap_single(&bp
->pdev
->dev
, addr
, bp
->rx_buffer_size
,
1326 dev_kfree_skb_any(skb
);
1330 kfree(bp
->rx_skbuff
);
1331 bp
->rx_skbuff
= NULL
;
1334 static void macb_free_rx_buffers(struct macb
*bp
)
1336 if (bp
->rx_buffers
) {
1337 dma_free_coherent(&bp
->pdev
->dev
,
1338 RX_RING_SIZE
* bp
->rx_buffer_size
,
1339 bp
->rx_buffers
, bp
->rx_buffers_dma
);
1340 bp
->rx_buffers
= NULL
;
1344 static void macb_free_consistent(struct macb
*bp
)
1346 struct macb_queue
*queue
;
1349 bp
->macbgem_ops
.mog_free_rx_buffers(bp
);
1351 dma_free_coherent(&bp
->pdev
->dev
, RX_RING_BYTES
,
1352 bp
->rx_ring
, bp
->rx_ring_dma
);
1356 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1357 kfree(queue
->tx_skb
);
1358 queue
->tx_skb
= NULL
;
1359 if (queue
->tx_ring
) {
1360 dma_free_coherent(&bp
->pdev
->dev
, TX_RING_BYTES
,
1361 queue
->tx_ring
, queue
->tx_ring_dma
);
1362 queue
->tx_ring
= NULL
;
1367 static int gem_alloc_rx_buffers(struct macb
*bp
)
1371 size
= RX_RING_SIZE
* sizeof(struct sk_buff
*);
1372 bp
->rx_skbuff
= kzalloc(size
, GFP_KERNEL
);
1377 "Allocated %d RX struct sk_buff entries at %p\n",
1378 RX_RING_SIZE
, bp
->rx_skbuff
);
1382 static int macb_alloc_rx_buffers(struct macb
*bp
)
1386 size
= RX_RING_SIZE
* bp
->rx_buffer_size
;
1387 bp
->rx_buffers
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1388 &bp
->rx_buffers_dma
, GFP_KERNEL
);
1389 if (!bp
->rx_buffers
)
1393 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1394 size
, (unsigned long)bp
->rx_buffers_dma
, bp
->rx_buffers
);
1398 static int macb_alloc_consistent(struct macb
*bp
)
1400 struct macb_queue
*queue
;
1404 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1405 size
= TX_RING_BYTES
;
1406 queue
->tx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1407 &queue
->tx_ring_dma
,
1409 if (!queue
->tx_ring
)
1412 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1413 q
, size
, (unsigned long)queue
->tx_ring_dma
,
1416 size
= TX_RING_SIZE
* sizeof(struct macb_tx_skb
);
1417 queue
->tx_skb
= kmalloc(size
, GFP_KERNEL
);
1422 size
= RX_RING_BYTES
;
1423 bp
->rx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1424 &bp
->rx_ring_dma
, GFP_KERNEL
);
1428 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1429 size
, (unsigned long)bp
->rx_ring_dma
, bp
->rx_ring
);
1431 if (bp
->macbgem_ops
.mog_alloc_rx_buffers(bp
))
1437 macb_free_consistent(bp
);
1441 static void gem_init_rings(struct macb
*bp
)
1443 struct macb_queue
*queue
;
1447 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1448 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1449 queue
->tx_ring
[i
].addr
= 0;
1450 queue
->tx_ring
[i
].ctrl
= MACB_BIT(TX_USED
);
1452 queue
->tx_ring
[TX_RING_SIZE
- 1].ctrl
|= MACB_BIT(TX_WRAP
);
1458 bp
->rx_prepared_head
= 0;
1463 static void macb_init_rings(struct macb
*bp
)
1468 addr
= bp
->rx_buffers_dma
;
1469 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1470 bp
->rx_ring
[i
].addr
= addr
;
1471 bp
->rx_ring
[i
].ctrl
= 0;
1472 addr
+= bp
->rx_buffer_size
;
1474 bp
->rx_ring
[RX_RING_SIZE
- 1].addr
|= MACB_BIT(RX_WRAP
);
1476 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1477 bp
->queues
[0].tx_ring
[i
].addr
= 0;
1478 bp
->queues
[0].tx_ring
[i
].ctrl
= MACB_BIT(TX_USED
);
1480 bp
->queues
[0].tx_head
= 0;
1481 bp
->queues
[0].tx_tail
= 0;
1482 bp
->queues
[0].tx_ring
[TX_RING_SIZE
- 1].ctrl
|= MACB_BIT(TX_WRAP
);
1487 static void macb_reset_hw(struct macb
*bp
)
1489 struct macb_queue
*queue
;
1493 * Disable RX and TX (XXX: Should we halt the transmission
1496 macb_writel(bp
, NCR
, 0);
1498 /* Clear the stats registers (XXX: Update stats first?) */
1499 macb_writel(bp
, NCR
, MACB_BIT(CLRSTAT
));
1501 /* Clear all status flags */
1502 macb_writel(bp
, TSR
, -1);
1503 macb_writel(bp
, RSR
, -1);
1505 /* Disable all interrupts */
1506 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1507 queue_writel(queue
, IDR
, -1);
1508 queue_readl(queue
, ISR
);
1512 static u32
gem_mdc_clk_div(struct macb
*bp
)
1515 unsigned long pclk_hz
= clk_get_rate(bp
->pclk
);
1517 if (pclk_hz
<= 20000000)
1518 config
= GEM_BF(CLK
, GEM_CLK_DIV8
);
1519 else if (pclk_hz
<= 40000000)
1520 config
= GEM_BF(CLK
, GEM_CLK_DIV16
);
1521 else if (pclk_hz
<= 80000000)
1522 config
= GEM_BF(CLK
, GEM_CLK_DIV32
);
1523 else if (pclk_hz
<= 120000000)
1524 config
= GEM_BF(CLK
, GEM_CLK_DIV48
);
1525 else if (pclk_hz
<= 160000000)
1526 config
= GEM_BF(CLK
, GEM_CLK_DIV64
);
1528 config
= GEM_BF(CLK
, GEM_CLK_DIV96
);
1533 static u32
macb_mdc_clk_div(struct macb
*bp
)
1536 unsigned long pclk_hz
;
1538 if (macb_is_gem(bp
))
1539 return gem_mdc_clk_div(bp
);
1541 pclk_hz
= clk_get_rate(bp
->pclk
);
1542 if (pclk_hz
<= 20000000)
1543 config
= MACB_BF(CLK
, MACB_CLK_DIV8
);
1544 else if (pclk_hz
<= 40000000)
1545 config
= MACB_BF(CLK
, MACB_CLK_DIV16
);
1546 else if (pclk_hz
<= 80000000)
1547 config
= MACB_BF(CLK
, MACB_CLK_DIV32
);
1549 config
= MACB_BF(CLK
, MACB_CLK_DIV64
);
1555 * Get the DMA bus width field of the network configuration register that we
1556 * should program. We find the width from decoding the design configuration
1557 * register to find the maximum supported data bus width.
1559 static u32
macb_dbw(struct macb
*bp
)
1561 if (!macb_is_gem(bp
))
1564 switch (GEM_BFEXT(DBWDEF
, gem_readl(bp
, DCFG1
))) {
1566 return GEM_BF(DBW
, GEM_DBW128
);
1568 return GEM_BF(DBW
, GEM_DBW64
);
1571 return GEM_BF(DBW
, GEM_DBW32
);
1576 * Configure the receive DMA engine
1577 * - use the correct receive buffer size
1578 * - set best burst length for DMA operations
1579 * (if not supported by FIFO, it will fallback to default)
1580 * - set both rx/tx packet buffers to full memory size
1581 * These are configurable parameters for GEM.
1583 static void macb_configure_dma(struct macb
*bp
)
1588 if (macb_is_gem(bp
)) {
1589 dmacfg
= gem_readl(bp
, DMACFG
) & ~GEM_BF(RXBS
, -1L);
1590 dmacfg
|= GEM_BF(RXBS
, bp
->rx_buffer_size
/ RX_BUFFER_MULTIPLE
);
1591 if (bp
->dma_burst_length
)
1592 dmacfg
= GEM_BFINS(FBLDO
, bp
->dma_burst_length
, dmacfg
);
1593 dmacfg
|= GEM_BIT(TXPBMS
) | GEM_BF(RXBMS
, -1L);
1594 dmacfg
&= ~GEM_BIT(ENDIA_PKT
);
1596 /* Find the CPU endianness by using the loopback bit of net_ctrl
1597 * register. save it first. When the CPU is in big endian we
1598 * need to program swaped mode for management descriptor access.
1600 ncr
= macb_readl(bp
, NCR
);
1601 __raw_writel(MACB_BIT(LLB
), bp
->regs
+ MACB_NCR
);
1602 tmp
= __raw_readl(bp
->regs
+ MACB_NCR
);
1604 if (tmp
== MACB_BIT(LLB
))
1605 dmacfg
&= ~GEM_BIT(ENDIA_DESC
);
1607 dmacfg
|= GEM_BIT(ENDIA_DESC
); /* CPU in big endian */
1609 /* Restore net_ctrl */
1610 macb_writel(bp
, NCR
, ncr
);
1612 if (bp
->dev
->features
& NETIF_F_HW_CSUM
)
1613 dmacfg
|= GEM_BIT(TXCOEN
);
1615 dmacfg
&= ~GEM_BIT(TXCOEN
);
1616 netdev_dbg(bp
->dev
, "Cadence configure DMA with 0x%08x\n",
1618 gem_writel(bp
, DMACFG
, dmacfg
);
1622 static void macb_init_hw(struct macb
*bp
)
1624 struct macb_queue
*queue
;
1630 macb_set_hwaddr(bp
);
1632 config
= macb_mdc_clk_div(bp
);
1633 config
|= MACB_BF(RBOF
, NET_IP_ALIGN
); /* Make eth data aligned */
1634 config
|= MACB_BIT(PAE
); /* PAuse Enable */
1635 config
|= MACB_BIT(DRFCS
); /* Discard Rx FCS */
1636 config
|= MACB_BIT(BIG
); /* Receive oversized frames */
1637 if (bp
->dev
->flags
& IFF_PROMISC
)
1638 config
|= MACB_BIT(CAF
); /* Copy All Frames */
1639 else if (macb_is_gem(bp
) && bp
->dev
->features
& NETIF_F_RXCSUM
)
1640 config
|= GEM_BIT(RXCOEN
);
1641 if (!(bp
->dev
->flags
& IFF_BROADCAST
))
1642 config
|= MACB_BIT(NBC
); /* No BroadCast */
1643 config
|= macb_dbw(bp
);
1644 macb_writel(bp
, NCFGR
, config
);
1645 bp
->speed
= SPEED_10
;
1646 bp
->duplex
= DUPLEX_HALF
;
1648 macb_configure_dma(bp
);
1650 /* Initialize TX and RX buffers */
1651 macb_writel(bp
, RBQP
, bp
->rx_ring_dma
);
1652 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1653 queue_writel(queue
, TBQP
, queue
->tx_ring_dma
);
1655 /* Enable interrupts */
1656 queue_writel(queue
, IER
,
1662 /* Enable TX and RX */
1663 macb_writel(bp
, NCR
, MACB_BIT(RE
) | MACB_BIT(TE
) | MACB_BIT(MPE
));
1667 * The hash address register is 64 bits long and takes up two
1668 * locations in the memory map. The least significant bits are stored
1669 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1671 * The unicast hash enable and the multicast hash enable bits in the
1672 * network configuration register enable the reception of hash matched
1673 * frames. The destination address is reduced to a 6 bit index into
1674 * the 64 bit hash register using the following hash function. The
1675 * hash function is an exclusive or of every sixth bit of the
1676 * destination address.
1678 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1679 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1680 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1681 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1682 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1683 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1685 * da[0] represents the least significant bit of the first byte
1686 * received, that is, the multicast/unicast indicator, and da[47]
1687 * represents the most significant bit of the last byte received. If
1688 * the hash index, hi[n], points to a bit that is set in the hash
1689 * register then the frame will be matched according to whether the
1690 * frame is multicast or unicast. A multicast match will be signalled
1691 * if the multicast hash enable bit is set, da[0] is 1 and the hash
1692 * index points to a bit set in the hash register. A unicast match
1693 * will be signalled if the unicast hash enable bit is set, da[0] is 0
1694 * and the hash index points to a bit set in the hash register. To
1695 * receive all multicast frames, the hash register should be set with
1696 * all ones and the multicast hash enable bit should be set in the
1697 * network configuration register.
1700 static inline int hash_bit_value(int bitnr
, __u8
*addr
)
1702 if (addr
[bitnr
/ 8] & (1 << (bitnr
% 8)))
1708 * Return the hash index value for the specified address.
1710 static int hash_get_index(__u8
*addr
)
1715 for (j
= 0; j
< 6; j
++) {
1716 for (i
= 0, bitval
= 0; i
< 8; i
++)
1717 bitval
^= hash_bit_value(i
* 6 + j
, addr
);
1719 hash_index
|= (bitval
<< j
);
1726 * Add multicast addresses to the internal multicast-hash table.
1728 static void macb_sethashtable(struct net_device
*dev
)
1730 struct netdev_hw_addr
*ha
;
1731 unsigned long mc_filter
[2];
1733 struct macb
*bp
= netdev_priv(dev
);
1735 mc_filter
[0] = mc_filter
[1] = 0;
1737 netdev_for_each_mc_addr(ha
, dev
) {
1738 bitnr
= hash_get_index(ha
->addr
);
1739 mc_filter
[bitnr
>> 5] |= 1 << (bitnr
& 31);
1742 macb_or_gem_writel(bp
, HRB
, mc_filter
[0]);
1743 macb_or_gem_writel(bp
, HRT
, mc_filter
[1]);
1747 * Enable/Disable promiscuous and multicast modes.
1749 static void macb_set_rx_mode(struct net_device
*dev
)
1752 struct macb
*bp
= netdev_priv(dev
);
1754 cfg
= macb_readl(bp
, NCFGR
);
1756 if (dev
->flags
& IFF_PROMISC
) {
1757 /* Enable promiscuous mode */
1758 cfg
|= MACB_BIT(CAF
);
1760 /* Disable RX checksum offload */
1761 if (macb_is_gem(bp
))
1762 cfg
&= ~GEM_BIT(RXCOEN
);
1764 /* Disable promiscuous mode */
1765 cfg
&= ~MACB_BIT(CAF
);
1767 /* Enable RX checksum offload only if requested */
1768 if (macb_is_gem(bp
) && dev
->features
& NETIF_F_RXCSUM
)
1769 cfg
|= GEM_BIT(RXCOEN
);
1772 if (dev
->flags
& IFF_ALLMULTI
) {
1773 /* Enable all multicast mode */
1774 macb_or_gem_writel(bp
, HRB
, -1);
1775 macb_or_gem_writel(bp
, HRT
, -1);
1776 cfg
|= MACB_BIT(NCFGR_MTI
);
1777 } else if (!netdev_mc_empty(dev
)) {
1778 /* Enable specific multicasts */
1779 macb_sethashtable(dev
);
1780 cfg
|= MACB_BIT(NCFGR_MTI
);
1781 } else if (dev
->flags
& (~IFF_ALLMULTI
)) {
1782 /* Disable all multicast mode */
1783 macb_or_gem_writel(bp
, HRB
, 0);
1784 macb_or_gem_writel(bp
, HRT
, 0);
1785 cfg
&= ~MACB_BIT(NCFGR_MTI
);
1788 macb_writel(bp
, NCFGR
, cfg
);
1791 static int macb_open(struct net_device
*dev
)
1793 struct macb
*bp
= netdev_priv(dev
);
1794 size_t bufsz
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
;
1797 netdev_dbg(bp
->dev
, "open\n");
1799 /* carrier starts down */
1800 netif_carrier_off(dev
);
1802 /* if the phy is not yet register, retry later*/
1806 /* RX buffers initialization */
1807 macb_init_rx_buffer_size(bp
, bufsz
);
1809 err
= macb_alloc_consistent(bp
);
1811 netdev_err(dev
, "Unable to allocate DMA memory (error %d)\n",
1816 napi_enable(&bp
->napi
);
1818 bp
->macbgem_ops
.mog_init_rings(bp
);
1821 /* schedule a link state check */
1822 phy_start(bp
->phy_dev
);
1824 netif_tx_start_all_queues(dev
);
1829 static int macb_close(struct net_device
*dev
)
1831 struct macb
*bp
= netdev_priv(dev
);
1832 unsigned long flags
;
1834 netif_tx_stop_all_queues(dev
);
1835 napi_disable(&bp
->napi
);
1838 phy_stop(bp
->phy_dev
);
1840 spin_lock_irqsave(&bp
->lock
, flags
);
1842 netif_carrier_off(dev
);
1843 spin_unlock_irqrestore(&bp
->lock
, flags
);
1845 macb_free_consistent(bp
);
1850 static void gem_update_stats(struct macb
*bp
)
1853 u32
*p
= &bp
->hw_stats
.gem
.tx_octets_31_0
;
1855 for (i
= 0; i
< GEM_STATS_LEN
; ++i
, ++p
) {
1856 u32 offset
= gem_statistics
[i
].offset
;
1857 u64 val
= readl_relaxed(bp
->regs
+ offset
);
1859 bp
->ethtool_stats
[i
] += val
;
1862 if (offset
== GEM_OCTTXL
|| offset
== GEM_OCTRXL
) {
1863 /* Add GEM_OCTTXH, GEM_OCTRXH */
1864 val
= readl_relaxed(bp
->regs
+ offset
+ 4);
1865 bp
->ethtool_stats
[i
] += ((u64
)val
) << 32;
1871 static struct net_device_stats
*gem_get_stats(struct macb
*bp
)
1873 struct gem_stats
*hwstat
= &bp
->hw_stats
.gem
;
1874 struct net_device_stats
*nstat
= &bp
->stats
;
1876 gem_update_stats(bp
);
1878 nstat
->rx_errors
= (hwstat
->rx_frame_check_sequence_errors
+
1879 hwstat
->rx_alignment_errors
+
1880 hwstat
->rx_resource_errors
+
1881 hwstat
->rx_overruns
+
1882 hwstat
->rx_oversize_frames
+
1883 hwstat
->rx_jabbers
+
1884 hwstat
->rx_undersized_frames
+
1885 hwstat
->rx_length_field_frame_errors
);
1886 nstat
->tx_errors
= (hwstat
->tx_late_collisions
+
1887 hwstat
->tx_excessive_collisions
+
1888 hwstat
->tx_underrun
+
1889 hwstat
->tx_carrier_sense_errors
);
1890 nstat
->multicast
= hwstat
->rx_multicast_frames
;
1891 nstat
->collisions
= (hwstat
->tx_single_collision_frames
+
1892 hwstat
->tx_multiple_collision_frames
+
1893 hwstat
->tx_excessive_collisions
);
1894 nstat
->rx_length_errors
= (hwstat
->rx_oversize_frames
+
1895 hwstat
->rx_jabbers
+
1896 hwstat
->rx_undersized_frames
+
1897 hwstat
->rx_length_field_frame_errors
);
1898 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
;
1899 nstat
->rx_crc_errors
= hwstat
->rx_frame_check_sequence_errors
;
1900 nstat
->rx_frame_errors
= hwstat
->rx_alignment_errors
;
1901 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
1902 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_collisions
;
1903 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_sense_errors
;
1904 nstat
->tx_fifo_errors
= hwstat
->tx_underrun
;
1909 static void gem_get_ethtool_stats(struct net_device
*dev
,
1910 struct ethtool_stats
*stats
, u64
*data
)
1914 bp
= netdev_priv(dev
);
1915 gem_update_stats(bp
);
1916 memcpy(data
, &bp
->ethtool_stats
, sizeof(u64
) * GEM_STATS_LEN
);
1919 static int gem_get_sset_count(struct net_device
*dev
, int sset
)
1923 return GEM_STATS_LEN
;
1929 static void gem_get_ethtool_strings(struct net_device
*dev
, u32 sset
, u8
*p
)
1935 for (i
= 0; i
< GEM_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
)
1936 memcpy(p
, gem_statistics
[i
].stat_string
,
1942 static struct net_device_stats
*macb_get_stats(struct net_device
*dev
)
1944 struct macb
*bp
= netdev_priv(dev
);
1945 struct net_device_stats
*nstat
= &bp
->stats
;
1946 struct macb_stats
*hwstat
= &bp
->hw_stats
.macb
;
1948 if (macb_is_gem(bp
))
1949 return gem_get_stats(bp
);
1951 /* read stats from hardware */
1952 macb_update_stats(bp
);
1954 /* Convert HW stats into netdevice stats */
1955 nstat
->rx_errors
= (hwstat
->rx_fcs_errors
+
1956 hwstat
->rx_align_errors
+
1957 hwstat
->rx_resource_errors
+
1958 hwstat
->rx_overruns
+
1959 hwstat
->rx_oversize_pkts
+
1960 hwstat
->rx_jabbers
+
1961 hwstat
->rx_undersize_pkts
+
1962 hwstat
->rx_length_mismatch
);
1963 nstat
->tx_errors
= (hwstat
->tx_late_cols
+
1964 hwstat
->tx_excessive_cols
+
1965 hwstat
->tx_underruns
+
1966 hwstat
->tx_carrier_errors
+
1967 hwstat
->sqe_test_errors
);
1968 nstat
->collisions
= (hwstat
->tx_single_cols
+
1969 hwstat
->tx_multiple_cols
+
1970 hwstat
->tx_excessive_cols
);
1971 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
1972 hwstat
->rx_jabbers
+
1973 hwstat
->rx_undersize_pkts
+
1974 hwstat
->rx_length_mismatch
);
1975 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
+
1976 hwstat
->rx_overruns
;
1977 nstat
->rx_crc_errors
= hwstat
->rx_fcs_errors
;
1978 nstat
->rx_frame_errors
= hwstat
->rx_align_errors
;
1979 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
1980 /* XXX: What does "missed" mean? */
1981 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_cols
;
1982 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_errors
;
1983 nstat
->tx_fifo_errors
= hwstat
->tx_underruns
;
1984 /* Don't know about heartbeat or window errors... */
1989 static int macb_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1991 struct macb
*bp
= netdev_priv(dev
);
1992 struct phy_device
*phydev
= bp
->phy_dev
;
1997 return phy_ethtool_gset(phydev
, cmd
);
2000 static int macb_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2002 struct macb
*bp
= netdev_priv(dev
);
2003 struct phy_device
*phydev
= bp
->phy_dev
;
2008 return phy_ethtool_sset(phydev
, cmd
);
2011 static int macb_get_regs_len(struct net_device
*netdev
)
2013 return MACB_GREGS_NBR
* sizeof(u32
);
2016 static void macb_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2019 struct macb
*bp
= netdev_priv(dev
);
2020 unsigned int tail
, head
;
2023 regs
->version
= (macb_readl(bp
, MID
) & ((1 << MACB_REV_SIZE
) - 1))
2024 | MACB_GREGS_VERSION
;
2026 tail
= macb_tx_ring_wrap(bp
->queues
[0].tx_tail
);
2027 head
= macb_tx_ring_wrap(bp
->queues
[0].tx_head
);
2029 regs_buff
[0] = macb_readl(bp
, NCR
);
2030 regs_buff
[1] = macb_or_gem_readl(bp
, NCFGR
);
2031 regs_buff
[2] = macb_readl(bp
, NSR
);
2032 regs_buff
[3] = macb_readl(bp
, TSR
);
2033 regs_buff
[4] = macb_readl(bp
, RBQP
);
2034 regs_buff
[5] = macb_readl(bp
, TBQP
);
2035 regs_buff
[6] = macb_readl(bp
, RSR
);
2036 regs_buff
[7] = macb_readl(bp
, IMR
);
2038 regs_buff
[8] = tail
;
2039 regs_buff
[9] = head
;
2040 regs_buff
[10] = macb_tx_dma(&bp
->queues
[0], tail
);
2041 regs_buff
[11] = macb_tx_dma(&bp
->queues
[0], head
);
2043 regs_buff
[12] = macb_or_gem_readl(bp
, USRIO
);
2044 if (macb_is_gem(bp
)) {
2045 regs_buff
[13] = gem_readl(bp
, DMACFG
);
2049 static const struct ethtool_ops macb_ethtool_ops
= {
2050 .get_settings
= macb_get_settings
,
2051 .set_settings
= macb_set_settings
,
2052 .get_regs_len
= macb_get_regs_len
,
2053 .get_regs
= macb_get_regs
,
2054 .get_link
= ethtool_op_get_link
,
2055 .get_ts_info
= ethtool_op_get_ts_info
,
2058 static const struct ethtool_ops gem_ethtool_ops
= {
2059 .get_settings
= macb_get_settings
,
2060 .set_settings
= macb_set_settings
,
2061 .get_regs_len
= macb_get_regs_len
,
2062 .get_regs
= macb_get_regs
,
2063 .get_link
= ethtool_op_get_link
,
2064 .get_ts_info
= ethtool_op_get_ts_info
,
2065 .get_ethtool_stats
= gem_get_ethtool_stats
,
2066 .get_strings
= gem_get_ethtool_strings
,
2067 .get_sset_count
= gem_get_sset_count
,
2070 static int macb_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2072 struct macb
*bp
= netdev_priv(dev
);
2073 struct phy_device
*phydev
= bp
->phy_dev
;
2075 if (!netif_running(dev
))
2081 return phy_mii_ioctl(phydev
, rq
, cmd
);
2084 static int macb_set_features(struct net_device
*netdev
,
2085 netdev_features_t features
)
2087 struct macb
*bp
= netdev_priv(netdev
);
2088 netdev_features_t changed
= features
^ netdev
->features
;
2090 /* TX checksum offload */
2091 if ((changed
& NETIF_F_HW_CSUM
) && macb_is_gem(bp
)) {
2094 dmacfg
= gem_readl(bp
, DMACFG
);
2095 if (features
& NETIF_F_HW_CSUM
)
2096 dmacfg
|= GEM_BIT(TXCOEN
);
2098 dmacfg
&= ~GEM_BIT(TXCOEN
);
2099 gem_writel(bp
, DMACFG
, dmacfg
);
2102 /* RX checksum offload */
2103 if ((changed
& NETIF_F_RXCSUM
) && macb_is_gem(bp
)) {
2106 netcfg
= gem_readl(bp
, NCFGR
);
2107 if (features
& NETIF_F_RXCSUM
&&
2108 !(netdev
->flags
& IFF_PROMISC
))
2109 netcfg
|= GEM_BIT(RXCOEN
);
2111 netcfg
&= ~GEM_BIT(RXCOEN
);
2112 gem_writel(bp
, NCFGR
, netcfg
);
2118 static const struct net_device_ops macb_netdev_ops
= {
2119 .ndo_open
= macb_open
,
2120 .ndo_stop
= macb_close
,
2121 .ndo_start_xmit
= macb_start_xmit
,
2122 .ndo_set_rx_mode
= macb_set_rx_mode
,
2123 .ndo_get_stats
= macb_get_stats
,
2124 .ndo_do_ioctl
= macb_ioctl
,
2125 .ndo_validate_addr
= eth_validate_addr
,
2126 .ndo_change_mtu
= eth_change_mtu
,
2127 .ndo_set_mac_address
= eth_mac_addr
,
2128 #ifdef CONFIG_NET_POLL_CONTROLLER
2129 .ndo_poll_controller
= macb_poll_controller
,
2131 .ndo_set_features
= macb_set_features
,
2135 * Configure peripheral capabilities according to device tree
2136 * and integration options used
2138 static void macb_configure_caps(struct macb
*bp
, const struct macb_config
*dt_conf
)
2143 bp
->caps
= dt_conf
->caps
;
2145 if (macb_is_gem_hw(bp
->regs
)) {
2146 bp
->caps
|= MACB_CAPS_MACB_IS_GEM
;
2148 dcfg
= gem_readl(bp
, DCFG1
);
2149 if (GEM_BFEXT(IRQCOR
, dcfg
) == 0)
2150 bp
->caps
|= MACB_CAPS_ISR_CLEAR_ON_WRITE
;
2151 dcfg
= gem_readl(bp
, DCFG2
);
2152 if ((dcfg
& (GEM_BIT(RX_PKT_BUFF
) | GEM_BIT(TX_PKT_BUFF
))) == 0)
2153 bp
->caps
|= MACB_CAPS_FIFO_MODE
;
2156 netdev_dbg(bp
->dev
, "Cadence caps 0x%08x\n", bp
->caps
);
2159 static void macb_probe_queues(void __iomem
*mem
,
2160 unsigned int *queue_mask
,
2161 unsigned int *num_queues
)
2168 /* is it macb or gem ?
2170 * We need to read directly from the hardware here because
2171 * we are early in the probe process and don't have the
2172 * MACB_CAPS_MACB_IS_GEM flag positioned
2174 if (!macb_is_gem_hw(mem
))
2177 /* bit 0 is never set but queue 0 always exists */
2178 *queue_mask
= readl_relaxed(mem
+ GEM_DCFG6
) & 0xff;
2182 for (hw_q
= 1; hw_q
< MACB_MAX_QUEUES
; ++hw_q
)
2183 if (*queue_mask
& (1 << hw_q
))
2187 static int macb_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
2188 struct clk
**hclk
, struct clk
**tx_clk
)
2192 *pclk
= devm_clk_get(&pdev
->dev
, "pclk");
2193 if (IS_ERR(*pclk
)) {
2194 err
= PTR_ERR(*pclk
);
2195 dev_err(&pdev
->dev
, "failed to get macb_clk (%u)\n", err
);
2199 *hclk
= devm_clk_get(&pdev
->dev
, "hclk");
2200 if (IS_ERR(*hclk
)) {
2201 err
= PTR_ERR(*hclk
);
2202 dev_err(&pdev
->dev
, "failed to get hclk (%u)\n", err
);
2206 *tx_clk
= devm_clk_get(&pdev
->dev
, "tx_clk");
2207 if (IS_ERR(*tx_clk
))
2210 err
= clk_prepare_enable(*pclk
);
2212 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
2216 err
= clk_prepare_enable(*hclk
);
2218 dev_err(&pdev
->dev
, "failed to enable hclk (%u)\n", err
);
2219 goto err_disable_pclk
;
2222 err
= clk_prepare_enable(*tx_clk
);
2224 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
2225 goto err_disable_hclk
;
2231 clk_disable_unprepare(*hclk
);
2234 clk_disable_unprepare(*pclk
);
2239 static int macb_init(struct platform_device
*pdev
)
2241 struct net_device
*dev
= platform_get_drvdata(pdev
);
2242 unsigned int hw_q
, q
;
2243 struct macb
*bp
= netdev_priv(dev
);
2244 struct macb_queue
*queue
;
2248 /* set the queue register mapping once for all: queue0 has a special
2249 * register mapping but we don't want to test the queue index then
2250 * compute the corresponding register offset at run time.
2252 for (hw_q
= 0, q
= 0; hw_q
< MACB_MAX_QUEUES
; ++hw_q
) {
2253 if (!(bp
->queue_mask
& (1 << hw_q
)))
2256 queue
= &bp
->queues
[q
];
2259 queue
->ISR
= GEM_ISR(hw_q
- 1);
2260 queue
->IER
= GEM_IER(hw_q
- 1);
2261 queue
->IDR
= GEM_IDR(hw_q
- 1);
2262 queue
->IMR
= GEM_IMR(hw_q
- 1);
2263 queue
->TBQP
= GEM_TBQP(hw_q
- 1);
2265 /* queue0 uses legacy registers */
2266 queue
->ISR
= MACB_ISR
;
2267 queue
->IER
= MACB_IER
;
2268 queue
->IDR
= MACB_IDR
;
2269 queue
->IMR
= MACB_IMR
;
2270 queue
->TBQP
= MACB_TBQP
;
2273 /* get irq: here we use the linux queue index, not the hardware
2274 * queue index. the queue irq definitions in the device tree
2275 * must remove the optional gaps that could exist in the
2276 * hardware queue mask.
2278 queue
->irq
= platform_get_irq(pdev
, q
);
2279 err
= devm_request_irq(&pdev
->dev
, queue
->irq
, macb_interrupt
,
2280 IRQF_SHARED
, dev
->name
, queue
);
2283 "Unable to request IRQ %d (error %d)\n",
2288 INIT_WORK(&queue
->tx_error_task
, macb_tx_error_task
);
2292 dev
->netdev_ops
= &macb_netdev_ops
;
2293 netif_napi_add(dev
, &bp
->napi
, macb_poll
, 64);
2295 /* setup appropriated routines according to adapter type */
2296 if (macb_is_gem(bp
)) {
2297 bp
->max_tx_length
= GEM_MAX_TX_LEN
;
2298 bp
->macbgem_ops
.mog_alloc_rx_buffers
= gem_alloc_rx_buffers
;
2299 bp
->macbgem_ops
.mog_free_rx_buffers
= gem_free_rx_buffers
;
2300 bp
->macbgem_ops
.mog_init_rings
= gem_init_rings
;
2301 bp
->macbgem_ops
.mog_rx
= gem_rx
;
2302 dev
->ethtool_ops
= &gem_ethtool_ops
;
2304 bp
->max_tx_length
= MACB_MAX_TX_LEN
;
2305 bp
->macbgem_ops
.mog_alloc_rx_buffers
= macb_alloc_rx_buffers
;
2306 bp
->macbgem_ops
.mog_free_rx_buffers
= macb_free_rx_buffers
;
2307 bp
->macbgem_ops
.mog_init_rings
= macb_init_rings
;
2308 bp
->macbgem_ops
.mog_rx
= macb_rx
;
2309 dev
->ethtool_ops
= &macb_ethtool_ops
;
2313 dev
->hw_features
= NETIF_F_SG
;
2314 /* Checksum offload is only available on gem with packet buffer */
2315 if (macb_is_gem(bp
) && !(bp
->caps
& MACB_CAPS_FIFO_MODE
))
2316 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
2317 if (bp
->caps
& MACB_CAPS_SG_DISABLED
)
2318 dev
->hw_features
&= ~NETIF_F_SG
;
2319 dev
->features
= dev
->hw_features
;
2322 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RGMII
)
2323 val
= GEM_BIT(RGMII
);
2324 else if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
&&
2325 (bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII
))
2326 val
= MACB_BIT(RMII
);
2327 else if (!(bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII
))
2328 val
= MACB_BIT(MII
);
2330 if (bp
->caps
& MACB_CAPS_USRIO_HAS_CLKEN
)
2331 val
|= MACB_BIT(CLKEN
);
2333 macb_or_gem_writel(bp
, USRIO
, val
);
2335 /* Set MII management clock divider */
2336 val
= macb_mdc_clk_div(bp
);
2337 val
|= macb_dbw(bp
);
2338 macb_writel(bp
, NCFGR
, val
);
2343 #if defined(CONFIG_OF)
2344 /* 1518 rounded up */
2345 #define AT91ETHER_MAX_RBUFF_SZ 0x600
2346 /* max number of receive buffers */
2347 #define AT91ETHER_MAX_RX_DESCR 9
2349 /* Initialize and start the Receiver and Transmit subsystems */
2350 static int at91ether_start(struct net_device
*dev
)
2352 struct macb
*lp
= netdev_priv(dev
);
2357 lp
->rx_ring
= dma_alloc_coherent(&lp
->pdev
->dev
,
2358 (AT91ETHER_MAX_RX_DESCR
*
2359 sizeof(struct macb_dma_desc
)),
2360 &lp
->rx_ring_dma
, GFP_KERNEL
);
2364 lp
->rx_buffers
= dma_alloc_coherent(&lp
->pdev
->dev
,
2365 AT91ETHER_MAX_RX_DESCR
*
2366 AT91ETHER_MAX_RBUFF_SZ
,
2367 &lp
->rx_buffers_dma
, GFP_KERNEL
);
2368 if (!lp
->rx_buffers
) {
2369 dma_free_coherent(&lp
->pdev
->dev
,
2370 AT91ETHER_MAX_RX_DESCR
*
2371 sizeof(struct macb_dma_desc
),
2372 lp
->rx_ring
, lp
->rx_ring_dma
);
2377 addr
= lp
->rx_buffers_dma
;
2378 for (i
= 0; i
< AT91ETHER_MAX_RX_DESCR
; i
++) {
2379 lp
->rx_ring
[i
].addr
= addr
;
2380 lp
->rx_ring
[i
].ctrl
= 0;
2381 addr
+= AT91ETHER_MAX_RBUFF_SZ
;
2384 /* Set the Wrap bit on the last descriptor */
2385 lp
->rx_ring
[AT91ETHER_MAX_RX_DESCR
- 1].addr
|= MACB_BIT(RX_WRAP
);
2387 /* Reset buffer index */
2390 /* Program address of descriptor list in Rx Buffer Queue register */
2391 macb_writel(lp
, RBQP
, lp
->rx_ring_dma
);
2393 /* Enable Receive and Transmit */
2394 ctl
= macb_readl(lp
, NCR
);
2395 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
) | MACB_BIT(TE
));
2400 /* Open the ethernet interface */
2401 static int at91ether_open(struct net_device
*dev
)
2403 struct macb
*lp
= netdev_priv(dev
);
2407 /* Clear internal statistics */
2408 ctl
= macb_readl(lp
, NCR
);
2409 macb_writel(lp
, NCR
, ctl
| MACB_BIT(CLRSTAT
));
2411 macb_set_hwaddr(lp
);
2413 ret
= at91ether_start(dev
);
2417 /* Enable MAC interrupts */
2418 macb_writel(lp
, IER
, MACB_BIT(RCOMP
) |
2420 MACB_BIT(ISR_TUND
) |
2423 MACB_BIT(ISR_ROVR
) |
2426 /* schedule a link state check */
2427 phy_start(lp
->phy_dev
);
2429 netif_start_queue(dev
);
2434 /* Close the interface */
2435 static int at91ether_close(struct net_device
*dev
)
2437 struct macb
*lp
= netdev_priv(dev
);
2440 /* Disable Receiver and Transmitter */
2441 ctl
= macb_readl(lp
, NCR
);
2442 macb_writel(lp
, NCR
, ctl
& ~(MACB_BIT(TE
) | MACB_BIT(RE
)));
2444 /* Disable MAC interrupts */
2445 macb_writel(lp
, IDR
, MACB_BIT(RCOMP
) |
2447 MACB_BIT(ISR_TUND
) |
2450 MACB_BIT(ISR_ROVR
) |
2453 netif_stop_queue(dev
);
2455 dma_free_coherent(&lp
->pdev
->dev
,
2456 AT91ETHER_MAX_RX_DESCR
*
2457 sizeof(struct macb_dma_desc
),
2458 lp
->rx_ring
, lp
->rx_ring_dma
);
2461 dma_free_coherent(&lp
->pdev
->dev
,
2462 AT91ETHER_MAX_RX_DESCR
* AT91ETHER_MAX_RBUFF_SZ
,
2463 lp
->rx_buffers
, lp
->rx_buffers_dma
);
2464 lp
->rx_buffers
= NULL
;
2469 /* Transmit packet */
2470 static int at91ether_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2472 struct macb
*lp
= netdev_priv(dev
);
2474 if (macb_readl(lp
, TSR
) & MACB_BIT(RM9200_BNQ
)) {
2475 netif_stop_queue(dev
);
2477 /* Store packet information (to free when Tx completed) */
2479 lp
->skb_length
= skb
->len
;
2480 lp
->skb_physaddr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
2483 /* Set address of the data in the Transmit Address register */
2484 macb_writel(lp
, TAR
, lp
->skb_physaddr
);
2485 /* Set length of the packet in the Transmit Control register */
2486 macb_writel(lp
, TCR
, skb
->len
);
2489 netdev_err(dev
, "%s called, but device is busy!\n", __func__
);
2490 return NETDEV_TX_BUSY
;
2493 return NETDEV_TX_OK
;
2496 /* Extract received frame from buffer descriptors and sent to upper layers.
2497 * (Called from interrupt context)
2499 static void at91ether_rx(struct net_device
*dev
)
2501 struct macb
*lp
= netdev_priv(dev
);
2502 unsigned char *p_recv
;
2503 struct sk_buff
*skb
;
2504 unsigned int pktlen
;
2506 while (lp
->rx_ring
[lp
->rx_tail
].addr
& MACB_BIT(RX_USED
)) {
2507 p_recv
= lp
->rx_buffers
+ lp
->rx_tail
* AT91ETHER_MAX_RBUFF_SZ
;
2508 pktlen
= MACB_BF(RX_FRMLEN
, lp
->rx_ring
[lp
->rx_tail
].ctrl
);
2509 skb
= netdev_alloc_skb(dev
, pktlen
+ 2);
2511 skb_reserve(skb
, 2);
2512 memcpy(skb_put(skb
, pktlen
), p_recv
, pktlen
);
2514 skb
->protocol
= eth_type_trans(skb
, dev
);
2515 lp
->stats
.rx_packets
++;
2516 lp
->stats
.rx_bytes
+= pktlen
;
2519 lp
->stats
.rx_dropped
++;
2522 if (lp
->rx_ring
[lp
->rx_tail
].ctrl
& MACB_BIT(RX_MHASH_MATCH
))
2523 lp
->stats
.multicast
++;
2525 /* reset ownership bit */
2526 lp
->rx_ring
[lp
->rx_tail
].addr
&= ~MACB_BIT(RX_USED
);
2528 /* wrap after last buffer */
2529 if (lp
->rx_tail
== AT91ETHER_MAX_RX_DESCR
- 1)
2536 /* MAC interrupt handler */
2537 static irqreturn_t
at91ether_interrupt(int irq
, void *dev_id
)
2539 struct net_device
*dev
= dev_id
;
2540 struct macb
*lp
= netdev_priv(dev
);
2543 /* MAC Interrupt Status register indicates what interrupts are pending.
2544 * It is automatically cleared once read.
2546 intstatus
= macb_readl(lp
, ISR
);
2548 /* Receive complete */
2549 if (intstatus
& MACB_BIT(RCOMP
))
2552 /* Transmit complete */
2553 if (intstatus
& MACB_BIT(TCOMP
)) {
2554 /* The TCOM bit is set even if the transmission failed */
2555 if (intstatus
& (MACB_BIT(ISR_TUND
) | MACB_BIT(ISR_RLE
)))
2556 lp
->stats
.tx_errors
++;
2559 dev_kfree_skb_irq(lp
->skb
);
2561 dma_unmap_single(NULL
, lp
->skb_physaddr
,
2562 lp
->skb_length
, DMA_TO_DEVICE
);
2563 lp
->stats
.tx_packets
++;
2564 lp
->stats
.tx_bytes
+= lp
->skb_length
;
2566 netif_wake_queue(dev
);
2569 /* Work-around for EMAC Errata section 41.3.1 */
2570 if (intstatus
& MACB_BIT(RXUBR
)) {
2571 ctl
= macb_readl(lp
, NCR
);
2572 macb_writel(lp
, NCR
, ctl
& ~MACB_BIT(RE
));
2573 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
));
2576 if (intstatus
& MACB_BIT(ISR_ROVR
))
2577 netdev_err(dev
, "ROVR error\n");
2582 #ifdef CONFIG_NET_POLL_CONTROLLER
2583 static void at91ether_poll_controller(struct net_device
*dev
)
2585 unsigned long flags
;
2587 local_irq_save(flags
);
2588 at91ether_interrupt(dev
->irq
, dev
);
2589 local_irq_restore(flags
);
2593 static const struct net_device_ops at91ether_netdev_ops
= {
2594 .ndo_open
= at91ether_open
,
2595 .ndo_stop
= at91ether_close
,
2596 .ndo_start_xmit
= at91ether_start_xmit
,
2597 .ndo_get_stats
= macb_get_stats
,
2598 .ndo_set_rx_mode
= macb_set_rx_mode
,
2599 .ndo_set_mac_address
= eth_mac_addr
,
2600 .ndo_do_ioctl
= macb_ioctl
,
2601 .ndo_validate_addr
= eth_validate_addr
,
2602 .ndo_change_mtu
= eth_change_mtu
,
2603 #ifdef CONFIG_NET_POLL_CONTROLLER
2604 .ndo_poll_controller
= at91ether_poll_controller
,
2608 static int at91ether_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
2609 struct clk
**hclk
, struct clk
**tx_clk
)
2616 *pclk
= devm_clk_get(&pdev
->dev
, "ether_clk");
2618 return PTR_ERR(*pclk
);
2620 err
= clk_prepare_enable(*pclk
);
2622 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
2629 static int at91ether_init(struct platform_device
*pdev
)
2631 struct net_device
*dev
= platform_get_drvdata(pdev
);
2632 struct macb
*bp
= netdev_priv(dev
);
2636 dev
->netdev_ops
= &at91ether_netdev_ops
;
2637 dev
->ethtool_ops
= &macb_ethtool_ops
;
2639 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, at91ether_interrupt
,
2644 macb_writel(bp
, NCR
, 0);
2646 reg
= MACB_BF(CLK
, MACB_CLK_DIV32
) | MACB_BIT(BIG
);
2647 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
2648 reg
|= MACB_BIT(RM9200_RMII
);
2650 macb_writel(bp
, NCFGR
, reg
);
2655 static const struct macb_config at91sam9260_config
= {
2656 .caps
= MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII
,
2657 .clk_init
= macb_clk_init
,
2661 static const struct macb_config pc302gem_config
= {
2662 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
,
2663 .dma_burst_length
= 16,
2664 .clk_init
= macb_clk_init
,
2668 static const struct macb_config sama5d3_config
= {
2669 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
,
2670 .dma_burst_length
= 16,
2671 .clk_init
= macb_clk_init
,
2675 static const struct macb_config sama5d4_config
= {
2677 .dma_burst_length
= 4,
2678 .clk_init
= macb_clk_init
,
2682 static const struct macb_config emac_config
= {
2683 .clk_init
= at91ether_clk_init
,
2684 .init
= at91ether_init
,
2687 static const struct of_device_id macb_dt_ids
[] = {
2688 { .compatible
= "cdns,at32ap7000-macb" },
2689 { .compatible
= "cdns,at91sam9260-macb", .data
= &at91sam9260_config
},
2690 { .compatible
= "cdns,macb" },
2691 { .compatible
= "cdns,pc302-gem", .data
= &pc302gem_config
},
2692 { .compatible
= "cdns,gem", .data
= &pc302gem_config
},
2693 { .compatible
= "atmel,sama5d3-gem", .data
= &sama5d3_config
},
2694 { .compatible
= "atmel,sama5d4-gem", .data
= &sama5d4_config
},
2695 { .compatible
= "cdns,at91rm9200-emac", .data
= &emac_config
},
2696 { .compatible
= "cdns,emac", .data
= &emac_config
},
2699 MODULE_DEVICE_TABLE(of
, macb_dt_ids
);
2700 #endif /* CONFIG_OF */
2702 static int macb_probe(struct platform_device
*pdev
)
2704 int (*clk_init
)(struct platform_device
*, struct clk
**,
2705 struct clk
**, struct clk
**)
2707 int (*init
)(struct platform_device
*) = macb_init
;
2708 struct device_node
*np
= pdev
->dev
.of_node
;
2709 const struct macb_config
*macb_config
= NULL
;
2710 struct clk
*pclk
, *hclk
, *tx_clk
;
2711 unsigned int queue_mask
, num_queues
;
2712 struct macb_platform_data
*pdata
;
2713 struct phy_device
*phydev
;
2714 struct net_device
*dev
;
2715 struct resource
*regs
;
2722 const struct of_device_id
*match
;
2724 match
= of_match_node(macb_dt_ids
, np
);
2725 if (match
&& match
->data
) {
2726 macb_config
= match
->data
;
2727 clk_init
= macb_config
->clk_init
;
2728 init
= macb_config
->init
;
2732 err
= clk_init(pdev
, &pclk
, &hclk
, &tx_clk
);
2736 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2737 mem
= devm_ioremap_resource(&pdev
->dev
, regs
);
2740 goto err_disable_clocks
;
2743 macb_probe_queues(mem
, &queue_mask
, &num_queues
);
2744 dev
= alloc_etherdev_mq(sizeof(*bp
), num_queues
);
2747 goto err_disable_clocks
;
2750 dev
->base_addr
= regs
->start
;
2752 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2754 bp
= netdev_priv(dev
);
2758 bp
->num_queues
= num_queues
;
2759 bp
->queue_mask
= queue_mask
;
2761 bp
->dma_burst_length
= macb_config
->dma_burst_length
;
2764 bp
->tx_clk
= tx_clk
;
2765 spin_lock_init(&bp
->lock
);
2767 /* setup capabilities */
2768 macb_configure_caps(bp
, macb_config
);
2770 platform_set_drvdata(pdev
, dev
);
2772 dev
->irq
= platform_get_irq(pdev
, 0);
2775 goto err_disable_clocks
;
2778 mac
= of_get_mac_address(np
);
2780 memcpy(bp
->dev
->dev_addr
, mac
, ETH_ALEN
);
2782 macb_get_hwaddr(bp
);
2784 err
= of_get_phy_mode(np
);
2786 pdata
= dev_get_platdata(&pdev
->dev
);
2787 if (pdata
&& pdata
->is_rmii
)
2788 bp
->phy_interface
= PHY_INTERFACE_MODE_RMII
;
2790 bp
->phy_interface
= PHY_INTERFACE_MODE_MII
;
2792 bp
->phy_interface
= err
;
2795 /* IP specific init */
2798 goto err_out_free_netdev
;
2800 err
= register_netdev(dev
);
2802 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
2803 goto err_out_unregister_netdev
;
2806 err
= macb_mii_init(bp
);
2808 goto err_out_unregister_netdev
;
2810 netif_carrier_off(dev
);
2812 netdev_info(dev
, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
2813 macb_is_gem(bp
) ? "GEM" : "MACB", macb_readl(bp
, MID
),
2814 dev
->base_addr
, dev
->irq
, dev
->dev_addr
);
2816 phydev
= bp
->phy_dev
;
2817 netdev_info(dev
, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
2818 phydev
->drv
->name
, dev_name(&phydev
->dev
), phydev
->irq
);
2822 err_out_unregister_netdev
:
2823 unregister_netdev(dev
);
2825 err_out_free_netdev
:
2829 clk_disable_unprepare(tx_clk
);
2830 clk_disable_unprepare(hclk
);
2831 clk_disable_unprepare(pclk
);
2836 static int macb_remove(struct platform_device
*pdev
)
2838 struct net_device
*dev
;
2841 dev
= platform_get_drvdata(pdev
);
2844 bp
= netdev_priv(dev
);
2846 phy_disconnect(bp
->phy_dev
);
2847 mdiobus_unregister(bp
->mii_bus
);
2848 kfree(bp
->mii_bus
->irq
);
2849 mdiobus_free(bp
->mii_bus
);
2850 unregister_netdev(dev
);
2851 clk_disable_unprepare(bp
->tx_clk
);
2852 clk_disable_unprepare(bp
->hclk
);
2853 clk_disable_unprepare(bp
->pclk
);
2860 static int __maybe_unused
macb_suspend(struct device
*dev
)
2862 struct platform_device
*pdev
= to_platform_device(dev
);
2863 struct net_device
*netdev
= platform_get_drvdata(pdev
);
2864 struct macb
*bp
= netdev_priv(netdev
);
2866 netif_carrier_off(netdev
);
2867 netif_device_detach(netdev
);
2869 clk_disable_unprepare(bp
->tx_clk
);
2870 clk_disable_unprepare(bp
->hclk
);
2871 clk_disable_unprepare(bp
->pclk
);
2876 static int __maybe_unused
macb_resume(struct device
*dev
)
2878 struct platform_device
*pdev
= to_platform_device(dev
);
2879 struct net_device
*netdev
= platform_get_drvdata(pdev
);
2880 struct macb
*bp
= netdev_priv(netdev
);
2882 clk_prepare_enable(bp
->pclk
);
2883 clk_prepare_enable(bp
->hclk
);
2884 clk_prepare_enable(bp
->tx_clk
);
2886 netif_device_attach(netdev
);
2891 static SIMPLE_DEV_PM_OPS(macb_pm_ops
, macb_suspend
, macb_resume
);
2893 static struct platform_driver macb_driver
= {
2894 .probe
= macb_probe
,
2895 .remove
= macb_remove
,
2898 .of_match_table
= of_match_ptr(macb_dt_ids
),
2903 module_platform_driver(macb_driver
);
2905 MODULE_LICENSE("GPL");
2906 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
2907 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2908 MODULE_ALIAS("platform:macb");