2 * Blackfin On-Chip MAC Driver
4 * Copyright 2004-2010 Analog Devices Inc.
6 * Enter bugs at http://blackfin.uclinux.org/
8 * Licensed under the GPL-2 or later.
11 #define DRV_VERSION "1.1"
12 #define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/irq.h>
26 #include <linux/ioport.h>
27 #include <linux/crc32.h>
28 #include <linux/device.h>
29 #include <linux/spinlock.h>
30 #include <linux/mii.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/skbuff.h>
35 #include <linux/platform_device.h>
38 #include <linux/dma-mapping.h>
40 #include <asm/div64.h>
42 #include <asm/blackfin.h>
43 #include <asm/cacheflush.h>
44 #include <asm/portmux.h>
49 MODULE_AUTHOR("Bryan Wu, Luke Yang");
50 MODULE_LICENSE("GPL");
51 MODULE_DESCRIPTION(DRV_DESC
);
52 MODULE_ALIAS("platform:bfin_mac");
54 #if defined(CONFIG_BFIN_MAC_USE_L1)
55 # define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56 # define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
58 # define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60 # define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
64 #define PKT_BUF_SZ 1580
66 #define MAX_TIMEOUT_CNT 500
68 /* pointers to maintain transmit list */
69 static struct net_dma_desc_tx
*tx_list_head
;
70 static struct net_dma_desc_tx
*tx_list_tail
;
71 static struct net_dma_desc_rx
*rx_list_head
;
72 static struct net_dma_desc_rx
*rx_list_tail
;
73 static struct net_dma_desc_rx
*current_rx_ptr
;
74 static struct net_dma_desc_tx
*current_tx_ptr
;
75 static struct net_dma_desc_tx
*tx_desc
;
76 static struct net_dma_desc_rx
*rx_desc
;
78 static void desc_list_free(void)
80 struct net_dma_desc_rx
*r
;
81 struct net_dma_desc_tx
*t
;
83 #if !defined(CONFIG_BFIN_MAC_USE_L1)
84 dma_addr_t dma_handle
= 0;
89 for (i
= 0; i
< CONFIG_BFIN_TX_DESC_NUM
; i
++) {
92 dev_kfree_skb(t
->skb
);
98 bfin_mac_free(dma_handle
, tx_desc
, CONFIG_BFIN_TX_DESC_NUM
);
103 for (i
= 0; i
< CONFIG_BFIN_RX_DESC_NUM
; i
++) {
106 dev_kfree_skb(r
->skb
);
112 bfin_mac_free(dma_handle
, rx_desc
, CONFIG_BFIN_RX_DESC_NUM
);
116 static int desc_list_init(struct net_device
*dev
)
119 struct sk_buff
*new_skb
;
120 #if !defined(CONFIG_BFIN_MAC_USE_L1)
122 * This dma_handle is useless in Blackfin dma_alloc_coherent().
123 * The real dma handler is the return value of dma_alloc_coherent().
125 dma_addr_t dma_handle
;
128 tx_desc
= bfin_mac_alloc(&dma_handle
,
129 sizeof(struct net_dma_desc_tx
),
130 CONFIG_BFIN_TX_DESC_NUM
);
134 rx_desc
= bfin_mac_alloc(&dma_handle
,
135 sizeof(struct net_dma_desc_rx
),
136 CONFIG_BFIN_RX_DESC_NUM
);
141 tx_list_head
= tx_list_tail
= tx_desc
;
143 for (i
= 0; i
< CONFIG_BFIN_TX_DESC_NUM
; i
++) {
144 struct net_dma_desc_tx
*t
= tx_desc
+ i
;
145 struct dma_descriptor
*a
= &(t
->desc_a
);
146 struct dma_descriptor
*b
= &(t
->desc_b
);
150 * read from memory WNR = 0
151 * wordsize is 32 bits
152 * 6 half words is desc size
155 a
->config
= WDSIZE_32
| NDSIZE_6
| DMAFLOW_LARGE
;
156 a
->start_addr
= (unsigned long)t
->packet
;
158 a
->next_dma_desc
= b
;
162 * write to memory WNR = 1
163 * wordsize is 32 bits
165 * 6 half words is desc size
168 b
->config
= DMAEN
| WNR
| WDSIZE_32
| NDSIZE_6
| DMAFLOW_LARGE
;
169 b
->start_addr
= (unsigned long)(&(t
->status
));
173 tx_list_tail
->desc_b
.next_dma_desc
= a
;
174 tx_list_tail
->next
= t
;
177 tx_list_tail
->next
= tx_list_head
; /* tx_list is a circle */
178 tx_list_tail
->desc_b
.next_dma_desc
= &(tx_list_head
->desc_a
);
179 current_tx_ptr
= tx_list_head
;
182 rx_list_head
= rx_list_tail
= rx_desc
;
184 for (i
= 0; i
< CONFIG_BFIN_RX_DESC_NUM
; i
++) {
185 struct net_dma_desc_rx
*r
= rx_desc
+ i
;
186 struct dma_descriptor
*a
= &(r
->desc_a
);
187 struct dma_descriptor
*b
= &(r
->desc_b
);
189 /* allocate a new skb for next time receive */
190 new_skb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
+ NET_IP_ALIGN
);
194 skb_reserve(new_skb
, NET_IP_ALIGN
);
195 /* Invidate the data cache of skb->data range when it is write back
196 * cache. It will prevent overwritting the new data from DMA
198 blackfin_dcache_invalidate_range((unsigned long)new_skb
->head
,
199 (unsigned long)new_skb
->end
);
204 * write to memory WNR = 1
205 * wordsize is 32 bits
207 * 6 half words is desc size
210 a
->config
= DMAEN
| WNR
| WDSIZE_32
| NDSIZE_6
| DMAFLOW_LARGE
;
211 /* since RXDWA is enabled */
212 a
->start_addr
= (unsigned long)new_skb
->data
- 2;
214 a
->next_dma_desc
= b
;
218 * write to memory WNR = 1
219 * wordsize is 32 bits
221 * 6 half words is desc size
224 b
->config
= DMAEN
| WNR
| WDSIZE_32
| DI_EN
|
225 NDSIZE_6
| DMAFLOW_LARGE
;
226 b
->start_addr
= (unsigned long)(&(r
->status
));
229 rx_list_tail
->desc_b
.next_dma_desc
= a
;
230 rx_list_tail
->next
= r
;
233 rx_list_tail
->next
= rx_list_head
; /* rx_list is a circle */
234 rx_list_tail
->desc_b
.next_dma_desc
= &(rx_list_head
->desc_a
);
235 current_rx_ptr
= rx_list_head
;
241 pr_err("kmalloc failed\n");
246 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
251 /* Wait until the previous MDC/MDIO transaction has completed */
252 static int bfin_mdio_poll(void)
254 int timeout_cnt
= MAX_TIMEOUT_CNT
;
256 /* poll the STABUSY bit */
257 while ((bfin_read_EMAC_STAADD()) & STABUSY
) {
259 if (timeout_cnt
-- < 0) {
260 pr_err("wait MDC/MDIO transaction to complete timeout\n");
268 /* Read an off-chip register in a PHY through the MDC/MDIO port */
269 static int bfin_mdiobus_read(struct mii_bus
*bus
, int phy_addr
, int regnum
)
273 ret
= bfin_mdio_poll();
278 bfin_write_EMAC_STAADD(SET_PHYAD((u16
) phy_addr
) |
279 SET_REGAD((u16
) regnum
) |
282 ret
= bfin_mdio_poll();
286 return (int) bfin_read_EMAC_STADAT();
289 /* Write an off-chip register in a PHY through the MDC/MDIO port */
290 static int bfin_mdiobus_write(struct mii_bus
*bus
, int phy_addr
, int regnum
,
295 ret
= bfin_mdio_poll();
299 bfin_write_EMAC_STADAT((u32
) value
);
302 bfin_write_EMAC_STAADD(SET_PHYAD((u16
) phy_addr
) |
303 SET_REGAD((u16
) regnum
) |
307 return bfin_mdio_poll();
310 static void bfin_mac_adjust_link(struct net_device
*dev
)
312 struct bfin_mac_local
*lp
= netdev_priv(dev
);
313 struct phy_device
*phydev
= lp
->phydev
;
317 spin_lock_irqsave(&lp
->lock
, flags
);
319 /* Now we make sure that we can be in full duplex mode.
320 * If not, we operate in half-duplex mode. */
321 if (phydev
->duplex
!= lp
->old_duplex
) {
322 u32 opmode
= bfin_read_EMAC_OPMODE();
330 bfin_write_EMAC_OPMODE(opmode
);
331 lp
->old_duplex
= phydev
->duplex
;
334 if (phydev
->speed
!= lp
->old_speed
) {
335 if (phydev
->interface
== PHY_INTERFACE_MODE_RMII
) {
336 u32 opmode
= bfin_read_EMAC_OPMODE();
337 switch (phydev
->speed
) {
346 "Ack! Speed (%d) is not 10/100!\n",
350 bfin_write_EMAC_OPMODE(opmode
);
354 lp
->old_speed
= phydev
->speed
;
361 } else if (lp
->old_link
) {
369 u32 opmode
= bfin_read_EMAC_OPMODE();
370 phy_print_status(phydev
);
371 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode
);
374 spin_unlock_irqrestore(&lp
->lock
, flags
);
378 #define MDC_CLK 2500000
380 static int mii_probe(struct net_device
*dev
, int phy_mode
)
382 struct bfin_mac_local
*lp
= netdev_priv(dev
);
383 struct phy_device
*phydev
= NULL
;
384 unsigned short sysctl
;
388 /* Enable PHY output early */
389 if (!(bfin_read_VR_CTL() & CLKBUFOE
))
390 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE
);
393 mdc_div
= ((sclk
/ MDC_CLK
) / 2) - 1;
395 sysctl
= bfin_read_EMAC_SYSCTL();
396 sysctl
= (sysctl
& ~MDCDIV
) | SET_MDCDIV(mdc_div
);
397 bfin_write_EMAC_SYSCTL(sysctl
);
399 /* search for connected PHY device */
400 for (i
= 0; i
< PHY_MAX_ADDR
; ++i
) {
401 struct phy_device
*const tmp_phydev
= lp
->mii_bus
->phy_map
[i
];
404 continue; /* no PHY here... */
407 break; /* found it */
410 /* now we are supposed to have a proper phydev, to attach to... */
412 netdev_err(dev
, "no phy device found\n");
416 if (phy_mode
!= PHY_INTERFACE_MODE_RMII
&&
417 phy_mode
!= PHY_INTERFACE_MODE_MII
) {
418 netdev_err(dev
, "invalid phy interface mode\n");
422 phydev
= phy_connect(dev
, dev_name(&phydev
->dev
),
423 &bfin_mac_adjust_link
, phy_mode
);
425 if (IS_ERR(phydev
)) {
426 netdev_err(dev
, "could not attach PHY\n");
427 return PTR_ERR(phydev
);
430 /* mask with MAC supported features */
431 phydev
->supported
&= (SUPPORTED_10baseT_Half
432 | SUPPORTED_10baseT_Full
433 | SUPPORTED_100baseT_Half
434 | SUPPORTED_100baseT_Full
436 | SUPPORTED_Pause
| SUPPORTED_Asym_Pause
440 phydev
->advertising
= phydev
->supported
;
447 pr_info("attached PHY driver [%s] "
448 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
449 phydev
->drv
->name
, dev_name(&phydev
->dev
), phydev
->irq
,
450 MDC_CLK
, mdc_div
, sclk
/1000000);
460 * interrupt routine for magic packet wakeup
462 static irqreturn_t
bfin_mac_wake_interrupt(int irq
, void *dev_id
)
468 bfin_mac_ethtool_getsettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
470 struct bfin_mac_local
*lp
= netdev_priv(dev
);
473 return phy_ethtool_gset(lp
->phydev
, cmd
);
479 bfin_mac_ethtool_setsettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
481 struct bfin_mac_local
*lp
= netdev_priv(dev
);
483 if (!capable(CAP_NET_ADMIN
))
487 return phy_ethtool_sset(lp
->phydev
, cmd
);
492 static void bfin_mac_ethtool_getdrvinfo(struct net_device
*dev
,
493 struct ethtool_drvinfo
*info
)
495 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
496 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
497 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
498 strlcpy(info
->bus_info
, dev_name(&dev
->dev
), sizeof(info
->bus_info
));
501 static void bfin_mac_ethtool_getwol(struct net_device
*dev
,
502 struct ethtool_wolinfo
*wolinfo
)
504 struct bfin_mac_local
*lp
= netdev_priv(dev
);
506 wolinfo
->supported
= WAKE_MAGIC
;
507 wolinfo
->wolopts
= lp
->wol
;
510 static int bfin_mac_ethtool_setwol(struct net_device
*dev
,
511 struct ethtool_wolinfo
*wolinfo
)
513 struct bfin_mac_local
*lp
= netdev_priv(dev
);
516 if (wolinfo
->wolopts
& (WAKE_MAGICSECURE
|
523 lp
->wol
= wolinfo
->wolopts
;
525 if (lp
->wol
&& !lp
->irq_wake_requested
) {
526 /* register wake irq handler */
527 rc
= request_irq(IRQ_MAC_WAKEDET
, bfin_mac_wake_interrupt
,
528 0, "EMAC_WAKE", dev
);
531 lp
->irq_wake_requested
= true;
534 if (!lp
->wol
&& lp
->irq_wake_requested
) {
535 free_irq(IRQ_MAC_WAKEDET
, dev
);
536 lp
->irq_wake_requested
= false;
539 /* Make sure the PHY driver doesn't suspend */
540 device_init_wakeup(&dev
->dev
, lp
->wol
);
545 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
546 static int bfin_mac_ethtool_get_ts_info(struct net_device
*dev
,
547 struct ethtool_ts_info
*info
)
549 struct bfin_mac_local
*lp
= netdev_priv(dev
);
551 info
->so_timestamping
=
552 SOF_TIMESTAMPING_TX_HARDWARE
|
553 SOF_TIMESTAMPING_RX_HARDWARE
|
554 SOF_TIMESTAMPING_RAW_HARDWARE
;
555 info
->phc_index
= lp
->phc_index
;
557 (1 << HWTSTAMP_TX_OFF
) |
558 (1 << HWTSTAMP_TX_ON
);
560 (1 << HWTSTAMP_FILTER_NONE
) |
561 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
562 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
563 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
568 static const struct ethtool_ops bfin_mac_ethtool_ops
= {
569 .get_settings
= bfin_mac_ethtool_getsettings
,
570 .set_settings
= bfin_mac_ethtool_setsettings
,
571 .get_link
= ethtool_op_get_link
,
572 .get_drvinfo
= bfin_mac_ethtool_getdrvinfo
,
573 .get_wol
= bfin_mac_ethtool_getwol
,
574 .set_wol
= bfin_mac_ethtool_setwol
,
575 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
576 .get_ts_info
= bfin_mac_ethtool_get_ts_info
,
580 /**************************************************************************/
581 static void setup_system_regs(struct net_device
*dev
)
583 struct bfin_mac_local
*lp
= netdev_priv(dev
);
585 unsigned short sysctl
;
588 * Odd word alignment for Receive Frame DMA word
589 * Configure checksum support and rcve frame word alignment
591 sysctl
= bfin_read_EMAC_SYSCTL();
593 * check if interrupt is requested for any PHY,
594 * enable PHY interrupt only if needed
596 for (i
= 0; i
< PHY_MAX_ADDR
; ++i
)
597 if (lp
->mii_bus
->irq
[i
] != PHY_POLL
)
599 if (i
< PHY_MAX_ADDR
)
602 #if defined(BFIN_MAC_CSUM_OFFLOAD)
607 bfin_write_EMAC_SYSCTL(sysctl
);
609 bfin_write_EMAC_MMC_CTL(RSTC
| CROLL
);
611 /* Set vlan regs to let 1522 bytes long packets pass through */
612 bfin_write_EMAC_VLAN1(lp
->vlan1_mask
);
613 bfin_write_EMAC_VLAN2(lp
->vlan2_mask
);
615 /* Initialize the TX DMA channel registers */
616 bfin_write_DMA2_X_COUNT(0);
617 bfin_write_DMA2_X_MODIFY(4);
618 bfin_write_DMA2_Y_COUNT(0);
619 bfin_write_DMA2_Y_MODIFY(0);
621 /* Initialize the RX DMA channel registers */
622 bfin_write_DMA1_X_COUNT(0);
623 bfin_write_DMA1_X_MODIFY(4);
624 bfin_write_DMA1_Y_COUNT(0);
625 bfin_write_DMA1_Y_MODIFY(0);
628 static void setup_mac_addr(u8
*mac_addr
)
630 u32 addr_low
= le32_to_cpu(*(__le32
*) & mac_addr
[0]);
631 u16 addr_hi
= le16_to_cpu(*(__le16
*) & mac_addr
[4]);
633 /* this depends on a little-endian machine */
634 bfin_write_EMAC_ADDRLO(addr_low
);
635 bfin_write_EMAC_ADDRHI(addr_hi
);
638 static int bfin_mac_set_mac_address(struct net_device
*dev
, void *p
)
640 struct sockaddr
*addr
= p
;
641 if (netif_running(dev
))
643 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
644 setup_mac_addr(dev
->dev_addr
);
648 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
649 #define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
651 static u32
bfin_select_phc_clock(u32 input_clk
, unsigned int *shift_result
)
653 u32 ipn
= 1000000000UL / input_clk
;
655 unsigned int shift
= 0;
661 *shift_result
= shift
;
662 return 1000000000UL / ppn
;
665 static int bfin_mac_hwtstamp_set(struct net_device
*netdev
,
668 struct hwtstamp_config config
;
669 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
671 u32 ptpfv1
, ptpfv2
, ptpfv3
, ptpfoff
;
673 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
676 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
677 __func__
, config
.flags
, config
.tx_type
, config
.rx_filter
);
679 /* reserved for future extensions */
683 if ((config
.tx_type
!= HWTSTAMP_TX_OFF
) &&
684 (config
.tx_type
!= HWTSTAMP_TX_ON
))
687 ptpctl
= bfin_read_EMAC_PTP_CTL();
689 switch (config
.rx_filter
) {
690 case HWTSTAMP_FILTER_NONE
:
692 * Dont allow any timestamping
695 bfin_write_EMAC_PTP_FV3(ptpfv3
);
697 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
698 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
699 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
701 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
702 * to enable all the field matches.
705 bfin_write_EMAC_PTP_CTL(ptpctl
);
707 * Keep the default values of the EMAC_PTP_FOFF register.
709 ptpfoff
= 0x4A24170C;
710 bfin_write_EMAC_PTP_FOFF(ptpfoff
);
712 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
716 bfin_write_EMAC_PTP_FV1(ptpfv1
);
718 bfin_write_EMAC_PTP_FV2(ptpfv2
);
720 * The default value (0xFFFC) allows the timestamping of both
721 * received Sync messages and Delay_Req messages.
724 bfin_write_EMAC_PTP_FV3(ptpfv3
);
726 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
728 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
729 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
730 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
731 /* Clear all five comparison mask bits (bits[12:8]) in the
732 * EMAC_PTP_CTL register to enable all the field matches.
735 bfin_write_EMAC_PTP_CTL(ptpctl
);
737 * Keep the default values of the EMAC_PTP_FOFF register, except set
738 * the PTPCOF field to 0x2A.
740 ptpfoff
= 0x2A24170C;
741 bfin_write_EMAC_PTP_FOFF(ptpfoff
);
743 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
747 bfin_write_EMAC_PTP_FV1(ptpfv1
);
749 bfin_write_EMAC_PTP_FV2(ptpfv2
);
751 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
752 * the value to 0xFFF0.
755 bfin_write_EMAC_PTP_FV3(ptpfv3
);
757 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
759 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
760 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
761 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
763 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
764 * EFTM and PTPCM field comparison.
767 bfin_write_EMAC_PTP_CTL(ptpctl
);
769 * Keep the default values of all the fields of the EMAC_PTP_FOFF
770 * register, except set the PTPCOF field to 0x0E.
772 ptpfoff
= 0x0E24170C;
773 bfin_write_EMAC_PTP_FOFF(ptpfoff
);
775 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
776 * corresponds to PTP messages on the MAC layer.
779 bfin_write_EMAC_PTP_FV1(ptpfv1
);
781 bfin_write_EMAC_PTP_FV2(ptpfv2
);
783 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
784 * messages, set the value to 0xFFF0.
787 bfin_write_EMAC_PTP_FV3(ptpfv3
);
789 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
795 if (config
.tx_type
== HWTSTAMP_TX_OFF
&&
796 bfin_mac_hwtstamp_is_none(config
.rx_filter
)) {
798 bfin_write_EMAC_PTP_CTL(ptpctl
);
803 bfin_write_EMAC_PTP_CTL(ptpctl
);
806 * clear any existing timestamp
808 bfin_read_EMAC_PTP_RXSNAPLO();
809 bfin_read_EMAC_PTP_RXSNAPHI();
811 bfin_read_EMAC_PTP_TXSNAPLO();
812 bfin_read_EMAC_PTP_TXSNAPHI();
817 lp
->stamp_cfg
= config
;
818 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
822 static int bfin_mac_hwtstamp_get(struct net_device
*netdev
,
825 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
827 return copy_to_user(ifr
->ifr_data
, &lp
->stamp_cfg
,
828 sizeof(lp
->stamp_cfg
)) ?
832 static void bfin_tx_hwtstamp(struct net_device
*netdev
, struct sk_buff
*skb
)
834 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
836 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) {
837 int timeout_cnt
= MAX_TIMEOUT_CNT
;
839 /* When doing time stamping, keep the connection to the socket
842 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
845 * The timestamping is done at the EMAC module's MII/RMII interface
846 * when the module sees the Start of Frame of an event message packet. This
847 * interface is the closest possible place to the physical Ethernet transmission
848 * medium, providing the best timing accuracy.
850 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL
)) && (--timeout_cnt
))
852 if (timeout_cnt
== 0)
853 netdev_err(netdev
, "timestamp the TX packet failed\n");
855 struct skb_shared_hwtstamps shhwtstamps
;
859 regval
= bfin_read_EMAC_PTP_TXSNAPLO();
860 regval
|= (u64
)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
861 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
862 ns
= regval
<< lp
->shift
;
863 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
864 skb_tstamp_tx(skb
, &shhwtstamps
);
869 static void bfin_rx_hwtstamp(struct net_device
*netdev
, struct sk_buff
*skb
)
871 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
874 struct skb_shared_hwtstamps
*shhwtstamps
;
876 if (bfin_mac_hwtstamp_is_none(lp
->stamp_cfg
.rx_filter
))
879 valid
= bfin_read_EMAC_PTP_ISTAT() & RXEL
;
883 shhwtstamps
= skb_hwtstamps(skb
);
885 regval
= bfin_read_EMAC_PTP_RXSNAPLO();
886 regval
|= (u64
)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
887 ns
= regval
<< lp
->shift
;
888 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
889 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
892 static void bfin_mac_hwtstamp_init(struct net_device
*netdev
)
894 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
896 u32 input_clk
, phc_clk
;
898 /* Initialize hardware timer */
899 input_clk
= get_sclk();
900 phc_clk
= bfin_select_phc_clock(input_clk
, &lp
->shift
);
901 addend
= phc_clk
* (1ULL << 32);
902 do_div(addend
, input_clk
);
903 bfin_write_EMAC_PTP_ADDEND((u32
)addend
);
906 ppb
= 1000000000ULL * input_clk
;
907 do_div(ppb
, phc_clk
);
908 lp
->max_ppb
= ppb
- 1000000000ULL - 1ULL;
910 /* Initialize hwstamp config */
911 lp
->stamp_cfg
.rx_filter
= HWTSTAMP_FILTER_NONE
;
912 lp
->stamp_cfg
.tx_type
= HWTSTAMP_TX_OFF
;
915 static u64
bfin_ptp_time_read(struct bfin_mac_local
*lp
)
920 lo
= bfin_read_EMAC_PTP_TIMELO();
921 hi
= bfin_read_EMAC_PTP_TIMEHI();
923 ns
= ((u64
) hi
) << 32;
930 static void bfin_ptp_time_write(struct bfin_mac_local
*lp
, u64 ns
)
936 lo
= ns
& 0xffffffff;
938 bfin_write_EMAC_PTP_TIMELO(lo
);
939 bfin_write_EMAC_PTP_TIMEHI(hi
);
942 /* PTP Hardware Clock operations */
944 static int bfin_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
949 struct bfin_mac_local
*lp
=
950 container_of(ptp
, struct bfin_mac_local
, caps
);
959 diff
= div_u64(adj
, 1000000000ULL);
961 addend
= neg_adj
? addend
- diff
: addend
+ diff
;
963 bfin_write_EMAC_PTP_ADDEND(addend
);
968 static int bfin_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
972 struct bfin_mac_local
*lp
=
973 container_of(ptp
, struct bfin_mac_local
, caps
);
975 spin_lock_irqsave(&lp
->phc_lock
, flags
);
977 now
= bfin_ptp_time_read(lp
);
979 bfin_ptp_time_write(lp
, now
);
981 spin_unlock_irqrestore(&lp
->phc_lock
, flags
);
986 static int bfin_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec64
*ts
)
990 struct bfin_mac_local
*lp
=
991 container_of(ptp
, struct bfin_mac_local
, caps
);
993 spin_lock_irqsave(&lp
->phc_lock
, flags
);
995 ns
= bfin_ptp_time_read(lp
);
997 spin_unlock_irqrestore(&lp
->phc_lock
, flags
);
999 *ts
= ns_to_timespec64(ns
);
1004 static int bfin_ptp_settime(struct ptp_clock_info
*ptp
,
1005 const struct timespec64
*ts
)
1008 unsigned long flags
;
1009 struct bfin_mac_local
*lp
=
1010 container_of(ptp
, struct bfin_mac_local
, caps
);
1012 ns
= timespec64_to_ns(ts
);
1014 spin_lock_irqsave(&lp
->phc_lock
, flags
);
1016 bfin_ptp_time_write(lp
, ns
);
1018 spin_unlock_irqrestore(&lp
->phc_lock
, flags
);
1023 static int bfin_ptp_enable(struct ptp_clock_info
*ptp
,
1024 struct ptp_clock_request
*rq
, int on
)
1029 static struct ptp_clock_info bfin_ptp_caps
= {
1030 .owner
= THIS_MODULE
,
1031 .name
= "BF518 clock",
1038 .adjfreq
= bfin_ptp_adjfreq
,
1039 .adjtime
= bfin_ptp_adjtime
,
1040 .gettime64
= bfin_ptp_gettime
,
1041 .settime64
= bfin_ptp_settime
,
1042 .enable
= bfin_ptp_enable
,
1045 static int bfin_phc_init(struct net_device
*netdev
, struct device
*dev
)
1047 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
1049 lp
->caps
= bfin_ptp_caps
;
1050 lp
->caps
.max_adj
= lp
->max_ppb
;
1051 lp
->clock
= ptp_clock_register(&lp
->caps
, dev
);
1052 if (IS_ERR(lp
->clock
))
1053 return PTR_ERR(lp
->clock
);
1055 lp
->phc_index
= ptp_clock_index(lp
->clock
);
1056 spin_lock_init(&lp
->phc_lock
);
1061 static void bfin_phc_release(struct bfin_mac_local
*lp
)
1063 ptp_clock_unregister(lp
->clock
);
1067 # define bfin_mac_hwtstamp_is_none(cfg) 0
1068 # define bfin_mac_hwtstamp_init(dev)
1069 # define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
1070 # define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
1071 # define bfin_rx_hwtstamp(dev, skb)
1072 # define bfin_tx_hwtstamp(dev, skb)
1073 # define bfin_phc_init(netdev, dev) 0
1074 # define bfin_phc_release(lp)
1077 static inline void _tx_reclaim_skb(void)
1080 tx_list_head
->desc_a
.config
&= ~DMAEN
;
1081 tx_list_head
->status
.status_word
= 0;
1082 if (tx_list_head
->skb
) {
1083 dev_consume_skb_any(tx_list_head
->skb
);
1084 tx_list_head
->skb
= NULL
;
1086 tx_list_head
= tx_list_head
->next
;
1088 } while (tx_list_head
->status
.status_word
!= 0);
1091 static void tx_reclaim_skb(struct bfin_mac_local
*lp
)
1093 int timeout_cnt
= MAX_TIMEOUT_CNT
;
1095 if (tx_list_head
->status
.status_word
!= 0)
1098 if (current_tx_ptr
->next
== tx_list_head
) {
1099 while (tx_list_head
->status
.status_word
== 0) {
1100 /* slow down polling to avoid too many queue stop. */
1102 /* reclaim skb if DMA is not running. */
1103 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN
))
1105 if (timeout_cnt
-- < 0)
1109 if (timeout_cnt
>= 0)
1112 netif_stop_queue(lp
->ndev
);
1115 if (current_tx_ptr
->next
!= tx_list_head
&&
1116 netif_queue_stopped(lp
->ndev
))
1117 netif_wake_queue(lp
->ndev
);
1119 if (tx_list_head
!= current_tx_ptr
) {
1120 /* shorten the timer interval if tx queue is stopped */
1121 if (netif_queue_stopped(lp
->ndev
))
1122 lp
->tx_reclaim_timer
.expires
=
1123 jiffies
+ (TX_RECLAIM_JIFFIES
>> 4);
1125 lp
->tx_reclaim_timer
.expires
=
1126 jiffies
+ TX_RECLAIM_JIFFIES
;
1128 mod_timer(&lp
->tx_reclaim_timer
,
1129 lp
->tx_reclaim_timer
.expires
);
1135 static void tx_reclaim_skb_timeout(unsigned long lp
)
1137 tx_reclaim_skb((struct bfin_mac_local
*)lp
);
1140 static int bfin_mac_hard_start_xmit(struct sk_buff
*skb
,
1141 struct net_device
*dev
)
1143 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1145 u32 data_align
= (unsigned long)(skb
->data
) & 0x3;
1147 current_tx_ptr
->skb
= skb
;
1149 if (data_align
== 0x2) {
1150 /* move skb->data to current_tx_ptr payload */
1151 data
= (u16
*)(skb
->data
) - 1;
1152 *data
= (u16
)(skb
->len
);
1154 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
1155 * a DMA_Length_Word field associated with the packet. The lower 12 bits
1156 * of this field are the length of the packet payload in bytes and the higher
1157 * 4 bits are the timestamping enable field.
1159 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)
1162 current_tx_ptr
->desc_a
.start_addr
= (u32
)data
;
1163 /* this is important! */
1164 blackfin_dcache_flush_range((u32
)data
,
1165 (u32
)((u8
*)data
+ skb
->len
+ 4));
1167 *((u16
*)(current_tx_ptr
->packet
)) = (u16
)(skb
->len
);
1168 /* enable timestamping for the sent packet */
1169 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)
1170 *((u16
*)(current_tx_ptr
->packet
)) |= 0x1000;
1171 memcpy((u8
*)(current_tx_ptr
->packet
+ 2), skb
->data
,
1173 current_tx_ptr
->desc_a
.start_addr
=
1174 (u32
)current_tx_ptr
->packet
;
1175 blackfin_dcache_flush_range(
1176 (u32
)current_tx_ptr
->packet
,
1177 (u32
)(current_tx_ptr
->packet
+ skb
->len
+ 2));
1180 /* make sure the internal data buffers in the core are drained
1181 * so that the DMA descriptors are completely written when the
1182 * DMA engine goes to fetch them below
1186 /* always clear status buffer before start tx dma */
1187 current_tx_ptr
->status
.status_word
= 0;
1189 /* enable this packet's dma */
1190 current_tx_ptr
->desc_a
.config
|= DMAEN
;
1192 /* tx dma is running, just return */
1193 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN
)
1196 /* tx dma is not running */
1197 bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr
->desc_a
));
1198 /* dma enabled, read from memory, size is 6 */
1199 bfin_write_DMA2_CONFIG(current_tx_ptr
->desc_a
.config
);
1200 /* Turn on the EMAC tx */
1201 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE
);
1204 bfin_tx_hwtstamp(dev
, skb
);
1206 current_tx_ptr
= current_tx_ptr
->next
;
1207 dev
->stats
.tx_packets
++;
1208 dev
->stats
.tx_bytes
+= (skb
->len
);
1212 return NETDEV_TX_OK
;
1215 #define IP_HEADER_OFF 0
1216 #define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1217 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1219 static void bfin_mac_rx(struct bfin_mac_local
*lp
)
1221 struct net_device
*dev
= lp
->ndev
;
1222 struct sk_buff
*skb
, *new_skb
;
1224 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1226 unsigned char fcs
[ETH_FCS_LEN
+ 1];
1229 /* check if frame status word reports an error condition
1230 * we which case we simply drop the packet
1232 if (current_rx_ptr
->status
.status_word
& RX_ERROR_MASK
) {
1233 netdev_notice(dev
, "rx: receive error - packet dropped\n");
1234 dev
->stats
.rx_dropped
++;
1238 /* allocate a new skb for next time receive */
1239 skb
= current_rx_ptr
->skb
;
1241 new_skb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
+ NET_IP_ALIGN
);
1243 dev
->stats
.rx_dropped
++;
1246 /* reserve 2 bytes for RXDWA padding */
1247 skb_reserve(new_skb
, NET_IP_ALIGN
);
1248 /* Invidate the data cache of skb->data range when it is write back
1249 * cache. It will prevent overwritting the new data from DMA
1251 blackfin_dcache_invalidate_range((unsigned long)new_skb
->head
,
1252 (unsigned long)new_skb
->end
);
1254 current_rx_ptr
->skb
= new_skb
;
1255 current_rx_ptr
->desc_a
.start_addr
= (unsigned long)new_skb
->data
- 2;
1257 len
= (unsigned short)(current_rx_ptr
->status
.status_word
& RX_FRLEN
);
1258 /* Deduce Ethernet FCS length from Ethernet payload length */
1262 skb
->protocol
= eth_type_trans(skb
, dev
);
1264 bfin_rx_hwtstamp(dev
, skb
);
1266 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1267 /* Checksum offloading only works for IPv4 packets with the standard IP header
1268 * length of 20 bytes, because the blackfin MAC checksum calculation is
1269 * based on that assumption. We must NOT use the calculated checksum if our
1270 * IP version or header break that assumption.
1272 if (skb
->data
[IP_HEADER_OFF
] == 0x45) {
1273 skb
->csum
= current_rx_ptr
->status
.ip_payload_csum
;
1275 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1276 * IP checksum is based on 16-bit one's complement algorithm.
1277 * To deduce a value from checksum is equal to add its inversion.
1278 * If the IP payload len is odd, the inversed FCS should also
1279 * begin from odd address and leave first byte zero.
1283 for (i
= 0; i
< ETH_FCS_LEN
; i
++)
1284 fcs
[i
+ 1] = ~skb
->data
[skb
->len
+ i
];
1285 skb
->csum
= csum_partial(fcs
, ETH_FCS_LEN
+ 1, skb
->csum
);
1287 for (i
= 0; i
< ETH_FCS_LEN
; i
++)
1288 fcs
[i
] = ~skb
->data
[skb
->len
+ i
];
1289 skb
->csum
= csum_partial(fcs
, ETH_FCS_LEN
, skb
->csum
);
1291 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1295 napi_gro_receive(&lp
->napi
, skb
);
1297 dev
->stats
.rx_packets
++;
1298 dev
->stats
.rx_bytes
+= len
;
1300 current_rx_ptr
->status
.status_word
= 0x00000000;
1301 current_rx_ptr
= current_rx_ptr
->next
;
1304 static int bfin_mac_poll(struct napi_struct
*napi
, int budget
)
1307 struct bfin_mac_local
*lp
= container_of(napi
,
1308 struct bfin_mac_local
,
1311 while (current_rx_ptr
->status
.status_word
!= 0 && i
< budget
) {
1317 napi_complete(napi
);
1318 if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED
, &lp
->flags
))
1319 enable_irq(IRQ_MAC_RX
);
1325 /* interrupt routine to handle rx and error signal */
1326 static irqreturn_t
bfin_mac_interrupt(int irq
, void *dev_id
)
1328 struct bfin_mac_local
*lp
= netdev_priv(dev_id
);
1331 status
= bfin_read_DMA1_IRQ_STATUS();
1333 bfin_write_DMA1_IRQ_STATUS(status
| DMA_DONE
| DMA_ERR
);
1334 if (status
& DMA_DONE
) {
1335 disable_irq_nosync(IRQ_MAC_RX
);
1336 set_bit(BFIN_MAC_RX_IRQ_DISABLED
, &lp
->flags
);
1337 napi_schedule(&lp
->napi
);
1343 #ifdef CONFIG_NET_POLL_CONTROLLER
1344 static void bfin_mac_poll_controller(struct net_device
*dev
)
1346 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1348 bfin_mac_interrupt(IRQ_MAC_RX
, dev
);
1351 #endif /* CONFIG_NET_POLL_CONTROLLER */
1353 static void bfin_mac_disable(void)
1355 unsigned int opmode
;
1357 opmode
= bfin_read_EMAC_OPMODE();
1360 /* Turn off the EMAC */
1361 bfin_write_EMAC_OPMODE(opmode
);
1365 * Enable Interrupts, Receive, and Transmit
1367 static int bfin_mac_enable(struct phy_device
*phydev
)
1372 pr_debug("%s\n", __func__
);
1375 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head
->desc_a
));
1376 bfin_write_DMA1_CONFIG(rx_list_head
->desc_a
.config
);
1379 ret
= bfin_mdio_poll();
1383 /* We enable only RX here */
1384 /* ASTP : Enable Automatic Pad Stripping
1385 PR : Promiscuous Mode for test
1386 PSF : Receive frames with total length less than 64 bytes.
1387 FDMODE : Full Duplex Mode
1388 LB : Internal Loopback for test
1389 RE : Receiver Enable */
1390 opmode
= bfin_read_EMAC_OPMODE();
1391 if (opmode
& FDMODE
)
1394 opmode
|= DRO
| DC
| PSF
;
1397 if (phydev
->interface
== PHY_INTERFACE_MODE_RMII
) {
1398 opmode
|= RMII
; /* For Now only 100MBit are supported */
1399 #if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1400 if (__SILICON_REVISION__
< 3) {
1402 * This isn't publicly documented (fun times!), but in
1403 * silicon <=0.2, the RX and TX pins are clocked together.
1404 * So in order to recv, we must enable the transmit side
1405 * as well. This will cause a spurious TX interrupt too,
1406 * but we can easily consume that.
1413 /* Turn on the EMAC rx */
1414 bfin_write_EMAC_OPMODE(opmode
);
1419 /* Our watchdog timed out. Called by the networking layer */
1420 static void bfin_mac_timeout(struct net_device
*dev
)
1422 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1424 pr_debug("%s: %s\n", dev
->name
, __func__
);
1428 del_timer(&lp
->tx_reclaim_timer
);
1430 /* reset tx queue and free skb */
1431 while (tx_list_head
!= current_tx_ptr
) {
1432 tx_list_head
->desc_a
.config
&= ~DMAEN
;
1433 tx_list_head
->status
.status_word
= 0;
1434 if (tx_list_head
->skb
) {
1435 dev_kfree_skb(tx_list_head
->skb
);
1436 tx_list_head
->skb
= NULL
;
1438 tx_list_head
= tx_list_head
->next
;
1441 if (netif_queue_stopped(dev
))
1442 netif_wake_queue(dev
);
1444 bfin_mac_enable(lp
->phydev
);
1446 /* We can accept TX packets again */
1447 dev
->trans_start
= jiffies
; /* prevent tx timeout */
1450 static void bfin_mac_multicast_hash(struct net_device
*dev
)
1452 u32 emac_hashhi
, emac_hashlo
;
1453 struct netdev_hw_addr
*ha
;
1456 emac_hashhi
= emac_hashlo
= 0;
1458 netdev_for_each_mc_addr(ha
, dev
) {
1459 crc
= ether_crc(ETH_ALEN
, ha
->addr
);
1463 emac_hashhi
|= 1 << (crc
& 0x1f);
1465 emac_hashlo
|= 1 << (crc
& 0x1f);
1468 bfin_write_EMAC_HASHHI(emac_hashhi
);
1469 bfin_write_EMAC_HASHLO(emac_hashlo
);
1473 * This routine will, depending on the values passed to it,
1474 * either make it accept multicast packets, go into
1475 * promiscuous mode (for TCPDUMP and cousins) or accept
1476 * a select set of multicast packets
1478 static void bfin_mac_set_multicast_list(struct net_device
*dev
)
1482 if (dev
->flags
& IFF_PROMISC
) {
1483 netdev_info(dev
, "set promisc mode\n");
1484 sysctl
= bfin_read_EMAC_OPMODE();
1486 bfin_write_EMAC_OPMODE(sysctl
);
1487 } else if (dev
->flags
& IFF_ALLMULTI
) {
1488 /* accept all multicast */
1489 sysctl
= bfin_read_EMAC_OPMODE();
1491 bfin_write_EMAC_OPMODE(sysctl
);
1492 } else if (!netdev_mc_empty(dev
)) {
1493 /* set up multicast hash table */
1494 sysctl
= bfin_read_EMAC_OPMODE();
1496 bfin_write_EMAC_OPMODE(sysctl
);
1497 bfin_mac_multicast_hash(dev
);
1499 /* clear promisc or multicast mode */
1500 sysctl
= bfin_read_EMAC_OPMODE();
1501 sysctl
&= ~(RAF
| PAM
);
1502 bfin_write_EMAC_OPMODE(sysctl
);
1506 static int bfin_mac_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1508 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
1510 if (!netif_running(netdev
))
1515 return bfin_mac_hwtstamp_set(netdev
, ifr
);
1517 return bfin_mac_hwtstamp_get(netdev
, ifr
);
1520 return phy_mii_ioctl(lp
->phydev
, ifr
, cmd
);
1527 * this puts the device in an inactive state
1529 static void bfin_mac_shutdown(struct net_device
*dev
)
1531 /* Turn off the EMAC */
1532 bfin_write_EMAC_OPMODE(0x00000000);
1533 /* Turn off the EMAC RX DMA */
1534 bfin_write_DMA1_CONFIG(0x0000);
1535 bfin_write_DMA2_CONFIG(0x0000);
1539 * Open and Initialize the interface
1541 * Set up everything, reset the card, etc..
1543 static int bfin_mac_open(struct net_device
*dev
)
1545 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1547 pr_debug("%s: %s\n", dev
->name
, __func__
);
1550 * Check that the address is valid. If its not, refuse
1551 * to bring the device up. The user must specify an
1552 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1554 if (!is_valid_ether_addr(dev
->dev_addr
)) {
1555 netdev_warn(dev
, "no valid ethernet hw addr\n");
1559 /* initial rx and tx list */
1560 ret
= desc_list_init(dev
);
1564 phy_start(lp
->phydev
);
1565 setup_system_regs(dev
);
1566 setup_mac_addr(dev
->dev_addr
);
1569 ret
= bfin_mac_enable(lp
->phydev
);
1572 pr_debug("hardware init finished\n");
1574 napi_enable(&lp
->napi
);
1575 netif_start_queue(dev
);
1576 netif_carrier_on(dev
);
1582 * this makes the board clean up everything that it can
1583 * and not talk to the outside world. Caused by
1584 * an 'ifconfig ethX down'
1586 static int bfin_mac_close(struct net_device
*dev
)
1588 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1589 pr_debug("%s: %s\n", dev
->name
, __func__
);
1591 netif_stop_queue(dev
);
1592 napi_disable(&lp
->napi
);
1593 netif_carrier_off(dev
);
1595 phy_stop(lp
->phydev
);
1596 phy_write(lp
->phydev
, MII_BMCR
, BMCR_PDOWN
);
1598 /* clear everything */
1599 bfin_mac_shutdown(dev
);
1601 /* free the rx/tx buffers */
1607 static const struct net_device_ops bfin_mac_netdev_ops
= {
1608 .ndo_open
= bfin_mac_open
,
1609 .ndo_stop
= bfin_mac_close
,
1610 .ndo_start_xmit
= bfin_mac_hard_start_xmit
,
1611 .ndo_set_mac_address
= bfin_mac_set_mac_address
,
1612 .ndo_tx_timeout
= bfin_mac_timeout
,
1613 .ndo_set_rx_mode
= bfin_mac_set_multicast_list
,
1614 .ndo_do_ioctl
= bfin_mac_ioctl
,
1615 .ndo_validate_addr
= eth_validate_addr
,
1616 .ndo_change_mtu
= eth_change_mtu
,
1617 #ifdef CONFIG_NET_POLL_CONTROLLER
1618 .ndo_poll_controller
= bfin_mac_poll_controller
,
1622 static int bfin_mac_probe(struct platform_device
*pdev
)
1624 struct net_device
*ndev
;
1625 struct bfin_mac_local
*lp
;
1626 struct platform_device
*pd
;
1627 struct bfin_mii_bus_platform_data
*mii_bus_data
;
1630 ndev
= alloc_etherdev(sizeof(struct bfin_mac_local
));
1634 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1635 platform_set_drvdata(pdev
, ndev
);
1636 lp
= netdev_priv(ndev
);
1639 /* Grab the MAC address in the MAC */
1640 *(__le32
*) (&(ndev
->dev_addr
[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1641 *(__le16
*) (&(ndev
->dev_addr
[4])) = cpu_to_le16((u16
) bfin_read_EMAC_ADDRHI());
1644 /*todo: how to proble? which is revision_register */
1645 bfin_write_EMAC_ADDRLO(0x12345678);
1646 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1647 dev_err(&pdev
->dev
, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1649 goto out_err_probe_mac
;
1654 * Is it valid? (Did bootloader initialize it?)
1655 * Grab the MAC from the board somehow
1656 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1658 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
1659 if (bfin_get_ether_addr(ndev
->dev_addr
) ||
1660 !is_valid_ether_addr(ndev
->dev_addr
)) {
1661 /* Still not valid, get a random one */
1662 netdev_warn(ndev
, "Setting Ethernet MAC to a random one\n");
1663 eth_hw_addr_random(ndev
);
1667 setup_mac_addr(ndev
->dev_addr
);
1669 if (!dev_get_platdata(&pdev
->dev
)) {
1670 dev_err(&pdev
->dev
, "Cannot get platform device bfin_mii_bus!\n");
1672 goto out_err_probe_mac
;
1674 pd
= dev_get_platdata(&pdev
->dev
);
1675 lp
->mii_bus
= platform_get_drvdata(pd
);
1677 dev_err(&pdev
->dev
, "Cannot get mii_bus!\n");
1679 goto out_err_probe_mac
;
1681 lp
->mii_bus
->priv
= ndev
;
1682 mii_bus_data
= dev_get_platdata(&pd
->dev
);
1684 rc
= mii_probe(ndev
, mii_bus_data
->phy_mode
);
1686 dev_err(&pdev
->dev
, "MII Probe failed!\n");
1687 goto out_err_mii_probe
;
1690 lp
->vlan1_mask
= ETH_P_8021Q
| mii_bus_data
->vlan1_mask
;
1691 lp
->vlan2_mask
= ETH_P_8021Q
| mii_bus_data
->vlan2_mask
;
1693 ndev
->netdev_ops
= &bfin_mac_netdev_ops
;
1694 ndev
->ethtool_ops
= &bfin_mac_ethtool_ops
;
1696 init_timer(&lp
->tx_reclaim_timer
);
1697 lp
->tx_reclaim_timer
.data
= (unsigned long)lp
;
1698 lp
->tx_reclaim_timer
.function
= tx_reclaim_skb_timeout
;
1701 netif_napi_add(ndev
, &lp
->napi
, bfin_mac_poll
, CONFIG_BFIN_RX_DESC_NUM
);
1703 spin_lock_init(&lp
->lock
);
1705 /* now, enable interrupts */
1706 /* register irq handler */
1707 rc
= request_irq(IRQ_MAC_RX
, bfin_mac_interrupt
,
1708 0, "EMAC_RX", ndev
);
1710 dev_err(&pdev
->dev
, "Cannot request Blackfin MAC RX IRQ!\n");
1712 goto out_err_request_irq
;
1715 rc
= register_netdev(ndev
);
1717 dev_err(&pdev
->dev
, "Cannot register net device!\n");
1718 goto out_err_reg_ndev
;
1721 bfin_mac_hwtstamp_init(ndev
);
1722 rc
= bfin_phc_init(ndev
, &pdev
->dev
);
1724 dev_err(&pdev
->dev
, "Cannot register PHC device!\n");
1728 /* now, print out the card info, in a short format.. */
1729 netdev_info(ndev
, "%s, Version %s\n", DRV_DESC
, DRV_VERSION
);
1735 free_irq(IRQ_MAC_RX
, ndev
);
1736 out_err_request_irq
:
1737 netif_napi_del(&lp
->napi
);
1739 mdiobus_unregister(lp
->mii_bus
);
1740 mdiobus_free(lp
->mii_bus
);
1747 static int bfin_mac_remove(struct platform_device
*pdev
)
1749 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1750 struct bfin_mac_local
*lp
= netdev_priv(ndev
);
1752 bfin_phc_release(lp
);
1754 lp
->mii_bus
->priv
= NULL
;
1756 unregister_netdev(ndev
);
1758 netif_napi_del(&lp
->napi
);
1760 free_irq(IRQ_MAC_RX
, ndev
);
1768 static int bfin_mac_suspend(struct platform_device
*pdev
, pm_message_t mesg
)
1770 struct net_device
*net_dev
= platform_get_drvdata(pdev
);
1771 struct bfin_mac_local
*lp
= netdev_priv(net_dev
);
1774 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE
) | RE
);
1775 bfin_write_EMAC_WKUP_CTL(MPKE
);
1776 enable_irq_wake(IRQ_MAC_WAKEDET
);
1778 if (netif_running(net_dev
))
1779 bfin_mac_close(net_dev
);
1785 static int bfin_mac_resume(struct platform_device
*pdev
)
1787 struct net_device
*net_dev
= platform_get_drvdata(pdev
);
1788 struct bfin_mac_local
*lp
= netdev_priv(net_dev
);
1791 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE
);
1792 bfin_write_EMAC_WKUP_CTL(0);
1793 disable_irq_wake(IRQ_MAC_WAKEDET
);
1795 if (netif_running(net_dev
))
1796 bfin_mac_open(net_dev
);
1802 #define bfin_mac_suspend NULL
1803 #define bfin_mac_resume NULL
1804 #endif /* CONFIG_PM */
1806 static int bfin_mii_bus_probe(struct platform_device
*pdev
)
1808 struct mii_bus
*miibus
;
1809 struct bfin_mii_bus_platform_data
*mii_bus_pd
;
1810 const unsigned short *pin_req
;
1813 mii_bus_pd
= dev_get_platdata(&pdev
->dev
);
1815 dev_err(&pdev
->dev
, "No peripherals in platform data!\n");
1820 * We are setting up a network card,
1821 * so set the GPIO pins to Ethernet mode
1823 pin_req
= mii_bus_pd
->mac_peripherals
;
1824 rc
= peripheral_request_list(pin_req
, KBUILD_MODNAME
);
1826 dev_err(&pdev
->dev
, "Requesting peripherals failed!\n");
1831 miibus
= mdiobus_alloc();
1834 miibus
->read
= bfin_mdiobus_read
;
1835 miibus
->write
= bfin_mdiobus_write
;
1837 miibus
->parent
= &pdev
->dev
;
1838 miibus
->name
= "bfin_mii_bus";
1839 miibus
->phy_mask
= mii_bus_pd
->phy_mask
;
1841 snprintf(miibus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
1842 pdev
->name
, pdev
->id
);
1843 miibus
->irq
= kmalloc(sizeof(int)*PHY_MAX_ADDR
, GFP_KERNEL
);
1845 goto out_err_irq_alloc
;
1847 for (i
= rc
; i
< PHY_MAX_ADDR
; ++i
)
1848 miibus
->irq
[i
] = PHY_POLL
;
1850 rc
= clamp(mii_bus_pd
->phydev_number
, 0, PHY_MAX_ADDR
);
1851 if (rc
!= mii_bus_pd
->phydev_number
)
1852 dev_err(&pdev
->dev
, "Invalid number (%i) of phydevs\n",
1853 mii_bus_pd
->phydev_number
);
1854 for (i
= 0; i
< rc
; ++i
) {
1855 unsigned short phyaddr
= mii_bus_pd
->phydev_data
[i
].addr
;
1856 if (phyaddr
< PHY_MAX_ADDR
)
1857 miibus
->irq
[phyaddr
] = mii_bus_pd
->phydev_data
[i
].irq
;
1860 "Invalid PHY address %i for phydev %i\n",
1864 rc
= mdiobus_register(miibus
);
1866 dev_err(&pdev
->dev
, "Cannot register MDIO bus!\n");
1867 goto out_err_mdiobus_register
;
1870 platform_set_drvdata(pdev
, miibus
);
1873 out_err_mdiobus_register
:
1876 mdiobus_free(miibus
);
1878 peripheral_free_list(pin_req
);
1883 static int bfin_mii_bus_remove(struct platform_device
*pdev
)
1885 struct mii_bus
*miibus
= platform_get_drvdata(pdev
);
1886 struct bfin_mii_bus_platform_data
*mii_bus_pd
=
1887 dev_get_platdata(&pdev
->dev
);
1889 mdiobus_unregister(miibus
);
1891 mdiobus_free(miibus
);
1892 peripheral_free_list(mii_bus_pd
->mac_peripherals
);
1897 static struct platform_driver bfin_mii_bus_driver
= {
1898 .probe
= bfin_mii_bus_probe
,
1899 .remove
= bfin_mii_bus_remove
,
1901 .name
= "bfin_mii_bus",
1905 static struct platform_driver bfin_mac_driver
= {
1906 .probe
= bfin_mac_probe
,
1907 .remove
= bfin_mac_remove
,
1908 .resume
= bfin_mac_resume
,
1909 .suspend
= bfin_mac_suspend
,
1911 .name
= KBUILD_MODNAME
,
1915 static int __init
bfin_mac_init(void)
1918 ret
= platform_driver_register(&bfin_mii_bus_driver
);
1920 return platform_driver_register(&bfin_mac_driver
);
1924 module_init(bfin_mac_init
);
1926 static void __exit
bfin_mac_cleanup(void)
1928 platform_driver_unregister(&bfin_mac_driver
);
1929 platform_driver_unregister(&bfin_mii_bus_driver
);
1932 module_exit(bfin_mac_cleanup
);