2 * SuperH Ethernet device driver
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2013 Renesas Solutions Corp.
6 * Copyright (C) 2013 Cogent Embedded, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/etherdevice.h>
31 #include <linux/delay.h>
32 #include <linux/platform_device.h>
33 #include <linux/mdio-bitbang.h>
34 #include <linux/netdevice.h>
35 #include <linux/phy.h>
36 #include <linux/cache.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/slab.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
42 #include <linux/clk.h>
43 #include <linux/sh_eth.h>
47 #define SH_ETH_DEF_MSG_ENABLE \
53 static const u16 sh_eth_offset_gigabit
[SH_ETH_MAX_REGISTER_OFFSET
] = {
107 [TSU_CTRST
] = 0x0004,
108 [TSU_FWEN0
] = 0x0010,
109 [TSU_FWEN1
] = 0x0014,
111 [TSU_BSYSL0
] = 0x0020,
112 [TSU_BSYSL1
] = 0x0024,
113 [TSU_PRISL0
] = 0x0028,
114 [TSU_PRISL1
] = 0x002c,
115 [TSU_FWSL0
] = 0x0030,
116 [TSU_FWSL1
] = 0x0034,
117 [TSU_FWSLC
] = 0x0038,
118 [TSU_QTAG0
] = 0x0040,
119 [TSU_QTAG1
] = 0x0044,
121 [TSU_FWINMK
] = 0x0054,
122 [TSU_ADQT0
] = 0x0048,
123 [TSU_ADQT1
] = 0x004c,
124 [TSU_VTAG0
] = 0x0058,
125 [TSU_VTAG1
] = 0x005c,
126 [TSU_ADSBSY
] = 0x0060,
128 [TSU_POST1
] = 0x0070,
129 [TSU_POST2
] = 0x0074,
130 [TSU_POST3
] = 0x0078,
131 [TSU_POST4
] = 0x007c,
132 [TSU_ADRH0
] = 0x0100,
133 [TSU_ADRL0
] = 0x0104,
134 [TSU_ADRH31
] = 0x01f8,
135 [TSU_ADRL31
] = 0x01fc,
151 static const u16 sh_eth_offset_fast_rcar
[SH_ETH_MAX_REGISTER_OFFSET
] = {
196 static const u16 sh_eth_offset_fast_sh4
[SH_ETH_MAX_REGISTER_OFFSET
] = {
248 static const u16 sh_eth_offset_fast_sh3_sh2
[SH_ETH_MAX_REGISTER_OFFSET
] = {
274 [TSU_CTRST
] = 0x0004,
275 [TSU_FWEN0
] = 0x0010,
276 [TSU_FWEN1
] = 0x0014,
278 [TSU_BSYSL0
] = 0x0020,
279 [TSU_BSYSL1
] = 0x0024,
280 [TSU_PRISL0
] = 0x0028,
281 [TSU_PRISL1
] = 0x002c,
282 [TSU_FWSL0
] = 0x0030,
283 [TSU_FWSL1
] = 0x0034,
284 [TSU_FWSLC
] = 0x0038,
285 [TSU_QTAGM0
] = 0x0040,
286 [TSU_QTAGM1
] = 0x0044,
287 [TSU_ADQT0
] = 0x0048,
288 [TSU_ADQT1
] = 0x004c,
290 [TSU_FWINMK
] = 0x0054,
291 [TSU_ADSBSY
] = 0x0060,
293 [TSU_POST1
] = 0x0070,
294 [TSU_POST2
] = 0x0074,
295 [TSU_POST3
] = 0x0078,
296 [TSU_POST4
] = 0x007c,
311 [TSU_ADRH0
] = 0x0100,
312 [TSU_ADRL0
] = 0x0104,
313 [TSU_ADRL31
] = 0x01fc,
316 static int sh_eth_is_gether(struct sh_eth_private
*mdp
)
318 if (mdp
->reg_offset
== sh_eth_offset_gigabit
)
324 static void __maybe_unused
sh_eth_select_mii(struct net_device
*ndev
)
327 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
329 switch (mdp
->phy_interface
) {
330 case PHY_INTERFACE_MODE_GMII
:
333 case PHY_INTERFACE_MODE_MII
:
336 case PHY_INTERFACE_MODE_RMII
:
340 pr_warn("PHY interface mode was not setup. Set to MII.\n");
345 sh_eth_write(ndev
, value
, RMII_MII
);
348 static void __maybe_unused
sh_eth_set_duplex(struct net_device
*ndev
)
350 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
352 if (mdp
->duplex
) /* Full */
353 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_DM
, ECMR
);
355 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_DM
, ECMR
);
358 /* There is CPU dependent code */
359 #if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779)
360 static void sh_eth_set_rate(struct net_device
*ndev
)
362 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
364 switch (mdp
->speed
) {
365 case 10: /* 10BASE */
366 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_ELB
, ECMR
);
368 case 100:/* 100BASE */
369 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_ELB
, ECMR
);
377 static struct sh_eth_cpu_data sh_eth_my_cpu_data
= {
378 .set_duplex
= sh_eth_set_duplex
,
379 .set_rate
= sh_eth_set_rate
,
381 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
382 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
383 .eesipr_value
= 0x01ff009f,
385 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
386 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RDE
|
387 EESR_RFRMER
| EESR_TFE
| EESR_TDE
| EESR_ECI
,
388 .tx_error_check
= EESR_TWB
| EESR_TABT
| EESR_TDE
| EESR_TFE
,
395 #elif defined(CONFIG_CPU_SUBTYPE_SH7724)
397 static void sh_eth_set_rate(struct net_device
*ndev
)
399 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
401 switch (mdp
->speed
) {
402 case 10: /* 10BASE */
403 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_RTM
, ECMR
);
405 case 100:/* 100BASE */
406 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_RTM
, ECMR
);
414 static struct sh_eth_cpu_data sh_eth_my_cpu_data
= {
415 .set_duplex
= sh_eth_set_duplex
,
416 .set_rate
= sh_eth_set_rate
,
418 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
419 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
420 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x01ff009f,
422 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
423 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RDE
|
424 EESR_RFRMER
| EESR_TFE
| EESR_TDE
| EESR_ECI
,
425 .tx_error_check
= EESR_TWB
| EESR_TABT
| EESR_TDE
| EESR_TFE
,
432 .rpadir_value
= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
434 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
435 #define SH_ETH_HAS_BOTH_MODULES 1
437 static void sh_eth_set_rate(struct net_device
*ndev
)
439 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
441 switch (mdp
->speed
) {
442 case 10: /* 10BASE */
443 sh_eth_write(ndev
, 0, RTRATE
);
445 case 100:/* 100BASE */
446 sh_eth_write(ndev
, 1, RTRATE
);
454 static struct sh_eth_cpu_data sh_eth_my_cpu_data
= {
455 .set_duplex
= sh_eth_set_duplex
,
456 .set_rate
= sh_eth_set_rate
,
458 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
459 .rmcr_value
= 0x00000001,
461 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
462 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RDE
|
463 EESR_RFRMER
| EESR_TFE
| EESR_TDE
| EESR_ECI
,
464 .tx_error_check
= EESR_TWB
| EESR_TABT
| EESR_TDE
| EESR_TFE
,
466 .irq_flags
= IRQF_SHARED
,
473 .rpadir_value
= 2 << 16,
476 #define SH_GIGA_ETH_BASE 0xfee00000
477 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
478 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
479 static void sh_eth_chip_reset_giga(struct net_device
*ndev
)
482 unsigned long mahr
[2], malr
[2];
484 /* save MAHR and MALR */
485 for (i
= 0; i
< 2; i
++) {
486 malr
[i
] = ioread32((void *)GIGA_MALR(i
));
487 mahr
[i
] = ioread32((void *)GIGA_MAHR(i
));
491 iowrite32(ARSTR_ARSTR
, (void *)(SH_GIGA_ETH_BASE
+ 0x1800));
494 /* restore MAHR and MALR */
495 for (i
= 0; i
< 2; i
++) {
496 iowrite32(malr
[i
], (void *)GIGA_MALR(i
));
497 iowrite32(mahr
[i
], (void *)GIGA_MAHR(i
));
501 static void sh_eth_set_rate_giga(struct net_device
*ndev
)
503 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
505 switch (mdp
->speed
) {
506 case 10: /* 10BASE */
507 sh_eth_write(ndev
, 0x00000000, GECMR
);
509 case 100:/* 100BASE */
510 sh_eth_write(ndev
, 0x00000010, GECMR
);
512 case 1000: /* 1000BASE */
513 sh_eth_write(ndev
, 0x00000020, GECMR
);
520 /* SH7757(GETHERC) */
521 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga
= {
522 .chip_reset
= sh_eth_chip_reset_giga
,
523 .set_duplex
= sh_eth_set_duplex
,
524 .set_rate
= sh_eth_set_rate_giga
,
526 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
527 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
528 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
530 .tx_check
= EESR_TC1
| EESR_FTC
,
531 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
| \
532 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
| \
534 .tx_error_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_TDE
| \
536 .fdr_value
= 0x0000072f,
537 .rmcr_value
= 0x00000001,
539 .irq_flags
= IRQF_SHARED
,
546 .rpadir_value
= 2 << 16,
552 static struct sh_eth_cpu_data
*sh_eth_get_cpu_data(struct sh_eth_private
*mdp
)
554 if (sh_eth_is_gether(mdp
))
555 return &sh_eth_my_cpu_data_giga
;
557 return &sh_eth_my_cpu_data
;
560 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
562 static void sh_eth_chip_reset(struct net_device
*ndev
)
564 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
567 sh_eth_tsu_write(mdp
, ARSTR_ARSTR
, ARSTR
);
571 static void sh_eth_set_rate(struct net_device
*ndev
)
573 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
575 switch (mdp
->speed
) {
576 case 10: /* 10BASE */
577 sh_eth_write(ndev
, GECMR_10
, GECMR
);
579 case 100:/* 100BASE */
580 sh_eth_write(ndev
, GECMR_100
, GECMR
);
582 case 1000: /* 1000BASE */
583 sh_eth_write(ndev
, GECMR_1000
, GECMR
);
591 static struct sh_eth_cpu_data sh_eth_my_cpu_data
= {
592 .chip_reset
= sh_eth_chip_reset
,
593 .set_duplex
= sh_eth_set_duplex
,
594 .set_rate
= sh_eth_set_rate
,
596 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
597 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
598 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
600 .tx_check
= EESR_TC1
| EESR_FTC
,
601 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
| \
602 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
| \
604 .tx_error_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_TDE
| \
615 #if defined(CONFIG_CPU_SUBTYPE_SH7734)
619 .irq_flags
= IRQF_SHARED
,
624 #elif defined(CONFIG_ARCH_R8A7740)
626 static void sh_eth_chip_reset(struct net_device
*ndev
)
628 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
631 sh_eth_tsu_write(mdp
, ARSTR_ARSTR
, ARSTR
);
634 sh_eth_select_mii(ndev
);
637 static void sh_eth_set_rate(struct net_device
*ndev
)
639 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
641 switch (mdp
->speed
) {
642 case 10: /* 10BASE */
643 sh_eth_write(ndev
, GECMR_10
, GECMR
);
645 case 100:/* 100BASE */
646 sh_eth_write(ndev
, GECMR_100
, GECMR
);
648 case 1000: /* 1000BASE */
649 sh_eth_write(ndev
, GECMR_1000
, GECMR
);
657 static struct sh_eth_cpu_data sh_eth_my_cpu_data
= {
658 .chip_reset
= sh_eth_chip_reset
,
659 .set_duplex
= sh_eth_set_duplex
,
660 .set_rate
= sh_eth_set_rate
,
662 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
663 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
664 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
666 .tx_check
= EESR_TC1
| EESR_FTC
,
667 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
| \
668 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
| \
670 .tx_error_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_TDE
| \
684 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
685 static struct sh_eth_cpu_data sh_eth_my_cpu_data
= {
686 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
693 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
694 static struct sh_eth_cpu_data sh_eth_my_cpu_data
= {
695 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
700 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data
*cd
)
703 cd
->ecsr_value
= DEFAULT_ECSR_INIT
;
705 if (!cd
->ecsipr_value
)
706 cd
->ecsipr_value
= DEFAULT_ECSIPR_INIT
;
708 if (!cd
->fcftr_value
)
709 cd
->fcftr_value
= DEFAULT_FIFO_F_D_RFF
| \
710 DEFAULT_FIFO_F_D_RFD
;
713 cd
->fdr_value
= DEFAULT_FDR_INIT
;
716 cd
->rmcr_value
= DEFAULT_RMCR_VALUE
;
719 cd
->tx_check
= DEFAULT_TX_CHECK
;
721 if (!cd
->eesr_err_check
)
722 cd
->eesr_err_check
= DEFAULT_EESR_ERR_CHECK
;
724 if (!cd
->tx_error_check
)
725 cd
->tx_error_check
= DEFAULT_TX_ERROR_CHECK
;
728 static int sh_eth_check_reset(struct net_device
*ndev
)
734 if (!(sh_eth_read(ndev
, EDMR
) & 0x3))
740 pr_err("Device reset fail\n");
746 static int sh_eth_reset(struct net_device
*ndev
)
748 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
751 if (sh_eth_is_gether(mdp
)) {
752 sh_eth_write(ndev
, EDSR_ENALL
, EDSR
);
753 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) | EDMR_SRST_GETHER
,
756 ret
= sh_eth_check_reset(ndev
);
761 sh_eth_write(ndev
, 0x0, TDLAR
);
762 sh_eth_write(ndev
, 0x0, TDFAR
);
763 sh_eth_write(ndev
, 0x0, TDFXR
);
764 sh_eth_write(ndev
, 0x0, TDFFR
);
765 sh_eth_write(ndev
, 0x0, RDLAR
);
766 sh_eth_write(ndev
, 0x0, RDFAR
);
767 sh_eth_write(ndev
, 0x0, RDFXR
);
768 sh_eth_write(ndev
, 0x0, RDFFR
);
770 /* Reset HW CRC register */
772 sh_eth_write(ndev
, 0x0, CSMR
);
774 /* Select MII mode */
775 if (mdp
->cd
->select_mii
)
776 sh_eth_select_mii(ndev
);
778 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) | EDMR_SRST_ETHER
,
781 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) & ~EDMR_SRST_ETHER
,
789 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
790 static void sh_eth_set_receive_align(struct sk_buff
*skb
)
794 reserve
= SH4_SKB_RX_ALIGN
- ((u32
)skb
->data
& (SH4_SKB_RX_ALIGN
- 1));
796 skb_reserve(skb
, reserve
);
799 static void sh_eth_set_receive_align(struct sk_buff
*skb
)
801 skb_reserve(skb
, SH2_SH3_SKB_RX_ALIGN
);
806 /* CPU <-> EDMAC endian convert */
807 static inline __u32
cpu_to_edmac(struct sh_eth_private
*mdp
, u32 x
)
809 switch (mdp
->edmac_endian
) {
810 case EDMAC_LITTLE_ENDIAN
:
811 return cpu_to_le32(x
);
812 case EDMAC_BIG_ENDIAN
:
813 return cpu_to_be32(x
);
818 static inline __u32
edmac_to_cpu(struct sh_eth_private
*mdp
, u32 x
)
820 switch (mdp
->edmac_endian
) {
821 case EDMAC_LITTLE_ENDIAN
:
822 return le32_to_cpu(x
);
823 case EDMAC_BIG_ENDIAN
:
824 return be32_to_cpu(x
);
830 * Program the hardware MAC address from dev->dev_addr.
832 static void update_mac_address(struct net_device
*ndev
)
835 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
836 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
838 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
842 * Get MAC address from SuperH MAC address register
844 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
845 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
846 * When you want use this device, you must set MAC address in bootloader.
849 static void read_mac_address(struct net_device
*ndev
, unsigned char *mac
)
851 if (mac
[0] || mac
[1] || mac
[2] || mac
[3] || mac
[4] || mac
[5]) {
852 memcpy(ndev
->dev_addr
, mac
, 6);
854 ndev
->dev_addr
[0] = (sh_eth_read(ndev
, MAHR
) >> 24);
855 ndev
->dev_addr
[1] = (sh_eth_read(ndev
, MAHR
) >> 16) & 0xFF;
856 ndev
->dev_addr
[2] = (sh_eth_read(ndev
, MAHR
) >> 8) & 0xFF;
857 ndev
->dev_addr
[3] = (sh_eth_read(ndev
, MAHR
) & 0xFF);
858 ndev
->dev_addr
[4] = (sh_eth_read(ndev
, MALR
) >> 8) & 0xFF;
859 ndev
->dev_addr
[5] = (sh_eth_read(ndev
, MALR
) & 0xFF);
863 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private
*mdp
)
865 if (sh_eth_is_gether(mdp
))
866 return EDTRR_TRNS_GETHER
;
868 return EDTRR_TRNS_ETHER
;
872 void (*set_gate
)(void *addr
);
873 struct mdiobb_ctrl ctrl
;
875 u32 mmd_msk
;/* MMD */
882 static void bb_set(void *addr
, u32 msk
)
884 iowrite32(ioread32(addr
) | msk
, addr
);
888 static void bb_clr(void *addr
, u32 msk
)
890 iowrite32((ioread32(addr
) & ~msk
), addr
);
894 static int bb_read(void *addr
, u32 msk
)
896 return (ioread32(addr
) & msk
) != 0;
899 /* Data I/O pin control */
900 static void sh_mmd_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
902 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
904 if (bitbang
->set_gate
)
905 bitbang
->set_gate(bitbang
->addr
);
908 bb_set(bitbang
->addr
, bitbang
->mmd_msk
);
910 bb_clr(bitbang
->addr
, bitbang
->mmd_msk
);
914 static void sh_set_mdio(struct mdiobb_ctrl
*ctrl
, int bit
)
916 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
918 if (bitbang
->set_gate
)
919 bitbang
->set_gate(bitbang
->addr
);
922 bb_set(bitbang
->addr
, bitbang
->mdo_msk
);
924 bb_clr(bitbang
->addr
, bitbang
->mdo_msk
);
928 static int sh_get_mdio(struct mdiobb_ctrl
*ctrl
)
930 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
932 if (bitbang
->set_gate
)
933 bitbang
->set_gate(bitbang
->addr
);
935 return bb_read(bitbang
->addr
, bitbang
->mdi_msk
);
938 /* MDC pin control */
939 static void sh_mdc_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
941 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
943 if (bitbang
->set_gate
)
944 bitbang
->set_gate(bitbang
->addr
);
947 bb_set(bitbang
->addr
, bitbang
->mdc_msk
);
949 bb_clr(bitbang
->addr
, bitbang
->mdc_msk
);
952 /* mdio bus control struct */
953 static struct mdiobb_ops bb_ops
= {
954 .owner
= THIS_MODULE
,
955 .set_mdc
= sh_mdc_ctrl
,
956 .set_mdio_dir
= sh_mmd_ctrl
,
957 .set_mdio_data
= sh_set_mdio
,
958 .get_mdio_data
= sh_get_mdio
,
961 /* free skb and descriptor buffer */
962 static void sh_eth_ring_free(struct net_device
*ndev
)
964 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
967 /* Free Rx skb ringbuffer */
968 if (mdp
->rx_skbuff
) {
969 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
970 if (mdp
->rx_skbuff
[i
])
971 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
974 kfree(mdp
->rx_skbuff
);
975 mdp
->rx_skbuff
= NULL
;
977 /* Free Tx skb ringbuffer */
978 if (mdp
->tx_skbuff
) {
979 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
980 if (mdp
->tx_skbuff
[i
])
981 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
984 kfree(mdp
->tx_skbuff
);
985 mdp
->tx_skbuff
= NULL
;
988 /* format skb and descriptor buffer */
989 static void sh_eth_ring_format(struct net_device
*ndev
)
991 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
994 struct sh_eth_rxdesc
*rxdesc
= NULL
;
995 struct sh_eth_txdesc
*txdesc
= NULL
;
996 int rx_ringsize
= sizeof(*rxdesc
) * mdp
->num_rx_ring
;
997 int tx_ringsize
= sizeof(*txdesc
) * mdp
->num_tx_ring
;
999 mdp
->cur_rx
= mdp
->cur_tx
= 0;
1000 mdp
->dirty_rx
= mdp
->dirty_tx
= 0;
1002 memset(mdp
->rx_ring
, 0, rx_ringsize
);
1004 /* build Rx ring buffer */
1005 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
1007 mdp
->rx_skbuff
[i
] = NULL
;
1008 skb
= netdev_alloc_skb(ndev
, mdp
->rx_buf_sz
);
1009 mdp
->rx_skbuff
[i
] = skb
;
1012 dma_map_single(&ndev
->dev
, skb
->data
, mdp
->rx_buf_sz
,
1014 sh_eth_set_receive_align(skb
);
1017 rxdesc
= &mdp
->rx_ring
[i
];
1018 rxdesc
->addr
= virt_to_phys(PTR_ALIGN(skb
->data
, 4));
1019 rxdesc
->status
= cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
1021 /* The size of the buffer is 16 byte boundary. */
1022 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
1023 /* Rx descriptor address set */
1025 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDLAR
);
1026 if (sh_eth_is_gether(mdp
))
1027 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDFAR
);
1031 mdp
->dirty_rx
= (u32
) (i
- mdp
->num_rx_ring
);
1033 /* Mark the last entry as wrapping the ring. */
1034 rxdesc
->status
|= cpu_to_edmac(mdp
, RD_RDEL
);
1036 memset(mdp
->tx_ring
, 0, tx_ringsize
);
1038 /* build Tx ring buffer */
1039 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
1040 mdp
->tx_skbuff
[i
] = NULL
;
1041 txdesc
= &mdp
->tx_ring
[i
];
1042 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
1043 txdesc
->buffer_length
= 0;
1045 /* Tx descriptor address set */
1046 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDLAR
);
1047 if (sh_eth_is_gether(mdp
))
1048 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDFAR
);
1052 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
1055 /* Get skb and descriptor buffer */
1056 static int sh_eth_ring_init(struct net_device
*ndev
)
1058 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1059 int rx_ringsize
, tx_ringsize
, ret
= 0;
1062 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1063 * card needs room to do 8 byte alignment, +2 so we can reserve
1064 * the first 2 bytes, and +16 gets room for the status word from the
1067 mdp
->rx_buf_sz
= (ndev
->mtu
<= 1492 ? PKT_BUF_SZ
:
1068 (((ndev
->mtu
+ 26 + 7) & ~7) + 2 + 16));
1069 if (mdp
->cd
->rpadir
)
1070 mdp
->rx_buf_sz
+= NET_IP_ALIGN
;
1072 /* Allocate RX and TX skb rings */
1073 mdp
->rx_skbuff
= kmalloc_array(mdp
->num_rx_ring
,
1074 sizeof(*mdp
->rx_skbuff
), GFP_KERNEL
);
1075 if (!mdp
->rx_skbuff
) {
1080 mdp
->tx_skbuff
= kmalloc_array(mdp
->num_tx_ring
,
1081 sizeof(*mdp
->tx_skbuff
), GFP_KERNEL
);
1082 if (!mdp
->tx_skbuff
) {
1087 /* Allocate all Rx descriptors. */
1088 rx_ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1089 mdp
->rx_ring
= dma_alloc_coherent(NULL
, rx_ringsize
, &mdp
->rx_desc_dma
,
1091 if (!mdp
->rx_ring
) {
1093 goto desc_ring_free
;
1098 /* Allocate all Tx descriptors. */
1099 tx_ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1100 mdp
->tx_ring
= dma_alloc_coherent(NULL
, tx_ringsize
, &mdp
->tx_desc_dma
,
1102 if (!mdp
->tx_ring
) {
1104 goto desc_ring_free
;
1109 /* free DMA buffer */
1110 dma_free_coherent(NULL
, rx_ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
1113 /* Free Rx and Tx skb ring buffer */
1114 sh_eth_ring_free(ndev
);
1115 mdp
->tx_ring
= NULL
;
1116 mdp
->rx_ring
= NULL
;
1121 static void sh_eth_free_dma_buffer(struct sh_eth_private
*mdp
)
1126 ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1127 dma_free_coherent(NULL
, ringsize
, mdp
->rx_ring
,
1129 mdp
->rx_ring
= NULL
;
1133 ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1134 dma_free_coherent(NULL
, ringsize
, mdp
->tx_ring
,
1136 mdp
->tx_ring
= NULL
;
1140 static int sh_eth_dev_init(struct net_device
*ndev
, bool start
)
1143 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1147 ret
= sh_eth_reset(ndev
);
1151 /* Descriptor format */
1152 sh_eth_ring_format(ndev
);
1153 if (mdp
->cd
->rpadir
)
1154 sh_eth_write(ndev
, mdp
->cd
->rpadir_value
, RPADIR
);
1156 /* all sh_eth int mask */
1157 sh_eth_write(ndev
, 0, EESIPR
);
1159 #if defined(__LITTLE_ENDIAN)
1160 if (mdp
->cd
->hw_swap
)
1161 sh_eth_write(ndev
, EDMR_EL
, EDMR
);
1164 sh_eth_write(ndev
, 0, EDMR
);
1167 sh_eth_write(ndev
, mdp
->cd
->fdr_value
, FDR
);
1168 sh_eth_write(ndev
, 0, TFTR
);
1170 /* Frame recv control */
1171 sh_eth_write(ndev
, mdp
->cd
->rmcr_value
, RMCR
);
1173 sh_eth_write(ndev
, DESC_I_RINT8
| DESC_I_RINT5
| DESC_I_TINT2
, TRSCER
);
1176 sh_eth_write(ndev
, 0x800, BCULR
); /* Burst sycle set */
1178 sh_eth_write(ndev
, mdp
->cd
->fcftr_value
, FCFTR
);
1180 if (!mdp
->cd
->no_trimd
)
1181 sh_eth_write(ndev
, 0, TRIMD
);
1183 /* Recv frame limit set register */
1184 sh_eth_write(ndev
, ndev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
,
1187 sh_eth_write(ndev
, sh_eth_read(ndev
, EESR
), EESR
);
1189 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1191 /* PAUSE Prohibition */
1192 val
= (sh_eth_read(ndev
, ECMR
) & ECMR_DM
) |
1193 ECMR_ZPF
| (mdp
->duplex
? ECMR_DM
: 0) | ECMR_TE
| ECMR_RE
;
1195 sh_eth_write(ndev
, val
, ECMR
);
1197 if (mdp
->cd
->set_rate
)
1198 mdp
->cd
->set_rate(ndev
);
1200 /* E-MAC Status Register clear */
1201 sh_eth_write(ndev
, mdp
->cd
->ecsr_value
, ECSR
);
1203 /* E-MAC Interrupt Enable register */
1205 sh_eth_write(ndev
, mdp
->cd
->ecsipr_value
, ECSIPR
);
1207 /* Set MAC address */
1208 update_mac_address(ndev
);
1212 sh_eth_write(ndev
, APR_AP
, APR
);
1214 sh_eth_write(ndev
, MPR_MP
, MPR
);
1215 if (mdp
->cd
->tpauser
)
1216 sh_eth_write(ndev
, TPAUSER_UNLIMITED
, TPAUSER
);
1219 /* Setting the Rx mode will start the Rx process. */
1220 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1222 netif_start_queue(ndev
);
1229 /* free Tx skb function */
1230 static int sh_eth_txfree(struct net_device
*ndev
)
1232 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1233 struct sh_eth_txdesc
*txdesc
;
1237 for (; mdp
->cur_tx
- mdp
->dirty_tx
> 0; mdp
->dirty_tx
++) {
1238 entry
= mdp
->dirty_tx
% mdp
->num_tx_ring
;
1239 txdesc
= &mdp
->tx_ring
[entry
];
1240 if (txdesc
->status
& cpu_to_edmac(mdp
, TD_TACT
))
1242 /* Free the original skb. */
1243 if (mdp
->tx_skbuff
[entry
]) {
1244 dma_unmap_single(&ndev
->dev
, txdesc
->addr
,
1245 txdesc
->buffer_length
, DMA_TO_DEVICE
);
1246 dev_kfree_skb_irq(mdp
->tx_skbuff
[entry
]);
1247 mdp
->tx_skbuff
[entry
] = NULL
;
1250 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
1251 if (entry
>= mdp
->num_tx_ring
- 1)
1252 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
1254 ndev
->stats
.tx_packets
++;
1255 ndev
->stats
.tx_bytes
+= txdesc
->buffer_length
;
1260 /* Packet receive function */
1261 static int sh_eth_rx(struct net_device
*ndev
, u32 intr_status
)
1263 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1264 struct sh_eth_rxdesc
*rxdesc
;
1266 int entry
= mdp
->cur_rx
% mdp
->num_rx_ring
;
1267 int boguscnt
= (mdp
->dirty_rx
+ mdp
->num_rx_ring
) - mdp
->cur_rx
;
1268 struct sk_buff
*skb
;
1272 rxdesc
= &mdp
->rx_ring
[entry
];
1273 while (!(rxdesc
->status
& cpu_to_edmac(mdp
, RD_RACT
))) {
1274 desc_status
= edmac_to_cpu(mdp
, rxdesc
->status
);
1275 pkt_len
= rxdesc
->frame_length
;
1277 #if defined(CONFIG_ARCH_R8A7740)
1284 if (!(desc_status
& RDFEND
))
1285 ndev
->stats
.rx_length_errors
++;
1287 if (desc_status
& (RD_RFS1
| RD_RFS2
| RD_RFS3
| RD_RFS4
|
1288 RD_RFS5
| RD_RFS6
| RD_RFS10
)) {
1289 ndev
->stats
.rx_errors
++;
1290 if (desc_status
& RD_RFS1
)
1291 ndev
->stats
.rx_crc_errors
++;
1292 if (desc_status
& RD_RFS2
)
1293 ndev
->stats
.rx_frame_errors
++;
1294 if (desc_status
& RD_RFS3
)
1295 ndev
->stats
.rx_length_errors
++;
1296 if (desc_status
& RD_RFS4
)
1297 ndev
->stats
.rx_length_errors
++;
1298 if (desc_status
& RD_RFS6
)
1299 ndev
->stats
.rx_missed_errors
++;
1300 if (desc_status
& RD_RFS10
)
1301 ndev
->stats
.rx_over_errors
++;
1303 if (!mdp
->cd
->hw_swap
)
1305 phys_to_virt(ALIGN(rxdesc
->addr
, 4)),
1307 skb
= mdp
->rx_skbuff
[entry
];
1308 mdp
->rx_skbuff
[entry
] = NULL
;
1309 if (mdp
->cd
->rpadir
)
1310 skb_reserve(skb
, NET_IP_ALIGN
);
1311 skb_put(skb
, pkt_len
);
1312 skb
->protocol
= eth_type_trans(skb
, ndev
);
1314 ndev
->stats
.rx_packets
++;
1315 ndev
->stats
.rx_bytes
+= pkt_len
;
1317 rxdesc
->status
|= cpu_to_edmac(mdp
, RD_RACT
);
1318 entry
= (++mdp
->cur_rx
) % mdp
->num_rx_ring
;
1319 rxdesc
= &mdp
->rx_ring
[entry
];
1322 /* Refill the Rx ring buffers. */
1323 for (; mdp
->cur_rx
- mdp
->dirty_rx
> 0; mdp
->dirty_rx
++) {
1324 entry
= mdp
->dirty_rx
% mdp
->num_rx_ring
;
1325 rxdesc
= &mdp
->rx_ring
[entry
];
1326 /* The size of the buffer is 16 byte boundary. */
1327 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
1329 if (mdp
->rx_skbuff
[entry
] == NULL
) {
1330 skb
= netdev_alloc_skb(ndev
, mdp
->rx_buf_sz
);
1331 mdp
->rx_skbuff
[entry
] = skb
;
1333 break; /* Better luck next round. */
1334 dma_map_single(&ndev
->dev
, skb
->data
, mdp
->rx_buf_sz
,
1336 sh_eth_set_receive_align(skb
);
1338 skb_checksum_none_assert(skb
);
1339 rxdesc
->addr
= virt_to_phys(PTR_ALIGN(skb
->data
, 4));
1341 if (entry
>= mdp
->num_rx_ring
- 1)
1343 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
| RD_RDEL
);
1346 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
1349 /* Restart Rx engine if stopped. */
1350 /* If we don't need to check status, don't. -KDU */
1351 if (!(sh_eth_read(ndev
, EDRRR
) & EDRRR_R
)) {
1352 /* fix the values for the next receiving if RDE is set */
1353 if (intr_status
& EESR_RDE
)
1354 mdp
->cur_rx
= mdp
->dirty_rx
=
1355 (sh_eth_read(ndev
, RDFAR
) -
1356 sh_eth_read(ndev
, RDLAR
)) >> 4;
1357 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1363 static void sh_eth_rcv_snd_disable(struct net_device
*ndev
)
1365 /* disable tx and rx */
1366 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) &
1367 ~(ECMR_RE
| ECMR_TE
), ECMR
);
1370 static void sh_eth_rcv_snd_enable(struct net_device
*ndev
)
1372 /* enable tx and rx */
1373 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) |
1374 (ECMR_RE
| ECMR_TE
), ECMR
);
1377 /* error control function */
1378 static void sh_eth_error(struct net_device
*ndev
, int intr_status
)
1380 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1385 if (intr_status
& EESR_ECI
) {
1386 felic_stat
= sh_eth_read(ndev
, ECSR
);
1387 sh_eth_write(ndev
, felic_stat
, ECSR
); /* clear int */
1388 if (felic_stat
& ECSR_ICD
)
1389 ndev
->stats
.tx_carrier_errors
++;
1390 if (felic_stat
& ECSR_LCHNG
) {
1392 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
) {
1395 link_stat
= (sh_eth_read(ndev
, PSR
));
1396 if (mdp
->ether_link_active_low
)
1397 link_stat
= ~link_stat
;
1399 if (!(link_stat
& PHY_ST_LINK
))
1400 sh_eth_rcv_snd_disable(ndev
);
1403 sh_eth_write(ndev
, sh_eth_read(ndev
, EESIPR
) &
1404 ~DMAC_M_ECI
, EESIPR
);
1406 sh_eth_write(ndev
, sh_eth_read(ndev
, ECSR
),
1408 sh_eth_write(ndev
, sh_eth_read(ndev
, EESIPR
) |
1409 DMAC_M_ECI
, EESIPR
);
1410 /* enable tx and rx */
1411 sh_eth_rcv_snd_enable(ndev
);
1417 if (intr_status
& EESR_TWB
) {
1418 /* Write buck end. unused write back interrupt */
1419 if (intr_status
& EESR_TABT
) /* Transmit Abort int */
1420 ndev
->stats
.tx_aborted_errors
++;
1421 if (netif_msg_tx_err(mdp
))
1422 dev_err(&ndev
->dev
, "Transmit Abort\n");
1425 if (intr_status
& EESR_RABT
) {
1426 /* Receive Abort int */
1427 if (intr_status
& EESR_RFRMER
) {
1428 /* Receive Frame Overflow int */
1429 ndev
->stats
.rx_frame_errors
++;
1430 if (netif_msg_rx_err(mdp
))
1431 dev_err(&ndev
->dev
, "Receive Abort\n");
1435 if (intr_status
& EESR_TDE
) {
1436 /* Transmit Descriptor Empty int */
1437 ndev
->stats
.tx_fifo_errors
++;
1438 if (netif_msg_tx_err(mdp
))
1439 dev_err(&ndev
->dev
, "Transmit Descriptor Empty\n");
1442 if (intr_status
& EESR_TFE
) {
1443 /* FIFO under flow */
1444 ndev
->stats
.tx_fifo_errors
++;
1445 if (netif_msg_tx_err(mdp
))
1446 dev_err(&ndev
->dev
, "Transmit FIFO Under flow\n");
1449 if (intr_status
& EESR_RDE
) {
1450 /* Receive Descriptor Empty int */
1451 ndev
->stats
.rx_over_errors
++;
1453 if (netif_msg_rx_err(mdp
))
1454 dev_err(&ndev
->dev
, "Receive Descriptor Empty\n");
1457 if (intr_status
& EESR_RFE
) {
1458 /* Receive FIFO Overflow int */
1459 ndev
->stats
.rx_fifo_errors
++;
1460 if (netif_msg_rx_err(mdp
))
1461 dev_err(&ndev
->dev
, "Receive FIFO Overflow\n");
1464 if (!mdp
->cd
->no_ade
&& (intr_status
& EESR_ADE
)) {
1466 ndev
->stats
.tx_fifo_errors
++;
1467 if (netif_msg_tx_err(mdp
))
1468 dev_err(&ndev
->dev
, "Address Error\n");
1471 mask
= EESR_TWB
| EESR_TABT
| EESR_ADE
| EESR_TDE
| EESR_TFE
;
1472 if (mdp
->cd
->no_ade
)
1474 if (intr_status
& mask
) {
1476 u32 edtrr
= sh_eth_read(ndev
, EDTRR
);
1478 dev_err(&ndev
->dev
, "TX error. status=%8.8x cur_tx=%8.8x ",
1479 intr_status
, mdp
->cur_tx
);
1480 dev_err(&ndev
->dev
, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1481 mdp
->dirty_tx
, (u32
) ndev
->state
, edtrr
);
1482 /* dirty buffer free */
1483 sh_eth_txfree(ndev
);
1486 if (edtrr
^ sh_eth_get_edtrr_trns(mdp
)) {
1488 sh_eth_write(ndev
, sh_eth_get_edtrr_trns(mdp
), EDTRR
);
1491 netif_wake_queue(ndev
);
1495 static irqreturn_t
sh_eth_interrupt(int irq
, void *netdev
)
1497 struct net_device
*ndev
= netdev
;
1498 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1499 struct sh_eth_cpu_data
*cd
= mdp
->cd
;
1500 irqreturn_t ret
= IRQ_NONE
;
1501 unsigned long intr_status
;
1503 spin_lock(&mdp
->lock
);
1505 /* Get interrupt status */
1506 intr_status
= sh_eth_read(ndev
, EESR
);
1507 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1508 * enabled since it's the one that comes thru regardless of the mask,
1509 * and we need to fully handle it in sh_eth_error() in order to quench
1510 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1512 intr_status
&= sh_eth_read(ndev
, EESIPR
) | DMAC_M_ECI
;
1513 /* Clear interrupt */
1514 if (intr_status
& (EESR_FRC
| EESR_RMAF
| EESR_RRF
|
1515 EESR_RTLF
| EESR_RTSF
| EESR_PRE
| EESR_CERF
|
1516 cd
->tx_check
| cd
->eesr_err_check
)) {
1517 sh_eth_write(ndev
, intr_status
, EESR
);
1522 if (intr_status
& (EESR_FRC
| /* Frame recv*/
1523 EESR_RMAF
| /* Multi cast address recv*/
1524 EESR_RRF
| /* Bit frame recv */
1525 EESR_RTLF
| /* Long frame recv*/
1526 EESR_RTSF
| /* short frame recv */
1527 EESR_PRE
| /* PHY-LSI recv error */
1528 EESR_CERF
)){ /* recv frame CRC error */
1529 sh_eth_rx(ndev
, intr_status
);
1533 if (intr_status
& cd
->tx_check
) {
1534 sh_eth_txfree(ndev
);
1535 netif_wake_queue(ndev
);
1538 if (intr_status
& cd
->eesr_err_check
)
1539 sh_eth_error(ndev
, intr_status
);
1542 spin_unlock(&mdp
->lock
);
1547 /* PHY state control function */
1548 static void sh_eth_adjust_link(struct net_device
*ndev
)
1550 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1551 struct phy_device
*phydev
= mdp
->phydev
;
1555 if (phydev
->duplex
!= mdp
->duplex
) {
1557 mdp
->duplex
= phydev
->duplex
;
1558 if (mdp
->cd
->set_duplex
)
1559 mdp
->cd
->set_duplex(ndev
);
1562 if (phydev
->speed
!= mdp
->speed
) {
1564 mdp
->speed
= phydev
->speed
;
1565 if (mdp
->cd
->set_rate
)
1566 mdp
->cd
->set_rate(ndev
);
1570 (sh_eth_read(ndev
, ECMR
) & ~ECMR_TXF
), ECMR
);
1572 mdp
->link
= phydev
->link
;
1573 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1574 sh_eth_rcv_snd_enable(ndev
);
1576 } else if (mdp
->link
) {
1581 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1582 sh_eth_rcv_snd_disable(ndev
);
1585 if (new_state
&& netif_msg_link(mdp
))
1586 phy_print_status(phydev
);
1589 /* PHY init function */
1590 static int sh_eth_phy_init(struct net_device
*ndev
)
1592 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1593 char phy_id
[MII_BUS_ID_SIZE
+ 3];
1594 struct phy_device
*phydev
= NULL
;
1596 snprintf(phy_id
, sizeof(phy_id
), PHY_ID_FMT
,
1597 mdp
->mii_bus
->id
, mdp
->phy_id
);
1603 /* Try connect to PHY */
1604 phydev
= phy_connect(ndev
, phy_id
, sh_eth_adjust_link
,
1605 mdp
->phy_interface
);
1606 if (IS_ERR(phydev
)) {
1607 dev_err(&ndev
->dev
, "phy_connect failed\n");
1608 return PTR_ERR(phydev
);
1611 dev_info(&ndev
->dev
, "attached phy %i to driver %s\n",
1612 phydev
->addr
, phydev
->drv
->name
);
1614 mdp
->phydev
= phydev
;
1619 /* PHY control start function */
1620 static int sh_eth_phy_start(struct net_device
*ndev
)
1622 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1625 ret
= sh_eth_phy_init(ndev
);
1629 /* reset phy - this also wakes it from PDOWN */
1630 phy_write(mdp
->phydev
, MII_BMCR
, BMCR_RESET
);
1631 phy_start(mdp
->phydev
);
1636 static int sh_eth_get_settings(struct net_device
*ndev
,
1637 struct ethtool_cmd
*ecmd
)
1639 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1640 unsigned long flags
;
1643 spin_lock_irqsave(&mdp
->lock
, flags
);
1644 ret
= phy_ethtool_gset(mdp
->phydev
, ecmd
);
1645 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1650 static int sh_eth_set_settings(struct net_device
*ndev
,
1651 struct ethtool_cmd
*ecmd
)
1653 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1654 unsigned long flags
;
1657 spin_lock_irqsave(&mdp
->lock
, flags
);
1659 /* disable tx and rx */
1660 sh_eth_rcv_snd_disable(ndev
);
1662 ret
= phy_ethtool_sset(mdp
->phydev
, ecmd
);
1666 if (ecmd
->duplex
== DUPLEX_FULL
)
1671 if (mdp
->cd
->set_duplex
)
1672 mdp
->cd
->set_duplex(ndev
);
1677 /* enable tx and rx */
1678 sh_eth_rcv_snd_enable(ndev
);
1680 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1685 static int sh_eth_nway_reset(struct net_device
*ndev
)
1687 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1688 unsigned long flags
;
1691 spin_lock_irqsave(&mdp
->lock
, flags
);
1692 ret
= phy_start_aneg(mdp
->phydev
);
1693 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1698 static u32
sh_eth_get_msglevel(struct net_device
*ndev
)
1700 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1701 return mdp
->msg_enable
;
1704 static void sh_eth_set_msglevel(struct net_device
*ndev
, u32 value
)
1706 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1707 mdp
->msg_enable
= value
;
1710 static const char sh_eth_gstrings_stats
[][ETH_GSTRING_LEN
] = {
1711 "rx_current", "tx_current",
1712 "rx_dirty", "tx_dirty",
1714 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1716 static int sh_eth_get_sset_count(struct net_device
*netdev
, int sset
)
1720 return SH_ETH_STATS_LEN
;
1726 static void sh_eth_get_ethtool_stats(struct net_device
*ndev
,
1727 struct ethtool_stats
*stats
, u64
*data
)
1729 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1732 /* device-specific stats */
1733 data
[i
++] = mdp
->cur_rx
;
1734 data
[i
++] = mdp
->cur_tx
;
1735 data
[i
++] = mdp
->dirty_rx
;
1736 data
[i
++] = mdp
->dirty_tx
;
1739 static void sh_eth_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
1741 switch (stringset
) {
1743 memcpy(data
, *sh_eth_gstrings_stats
,
1744 sizeof(sh_eth_gstrings_stats
));
1749 static void sh_eth_get_ringparam(struct net_device
*ndev
,
1750 struct ethtool_ringparam
*ring
)
1752 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1754 ring
->rx_max_pending
= RX_RING_MAX
;
1755 ring
->tx_max_pending
= TX_RING_MAX
;
1756 ring
->rx_pending
= mdp
->num_rx_ring
;
1757 ring
->tx_pending
= mdp
->num_tx_ring
;
1760 static int sh_eth_set_ringparam(struct net_device
*ndev
,
1761 struct ethtool_ringparam
*ring
)
1763 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1766 if (ring
->tx_pending
> TX_RING_MAX
||
1767 ring
->rx_pending
> RX_RING_MAX
||
1768 ring
->tx_pending
< TX_RING_MIN
||
1769 ring
->rx_pending
< RX_RING_MIN
)
1771 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
1774 if (netif_running(ndev
)) {
1775 netif_tx_disable(ndev
);
1776 /* Disable interrupts by clearing the interrupt mask. */
1777 sh_eth_write(ndev
, 0x0000, EESIPR
);
1778 /* Stop the chip's Tx and Rx processes. */
1779 sh_eth_write(ndev
, 0, EDTRR
);
1780 sh_eth_write(ndev
, 0, EDRRR
);
1781 synchronize_irq(ndev
->irq
);
1784 /* Free all the skbuffs in the Rx queue. */
1785 sh_eth_ring_free(ndev
);
1786 /* Free DMA buffer */
1787 sh_eth_free_dma_buffer(mdp
);
1789 /* Set new parameters */
1790 mdp
->num_rx_ring
= ring
->rx_pending
;
1791 mdp
->num_tx_ring
= ring
->tx_pending
;
1793 ret
= sh_eth_ring_init(ndev
);
1795 dev_err(&ndev
->dev
, "%s: sh_eth_ring_init failed.\n", __func__
);
1798 ret
= sh_eth_dev_init(ndev
, false);
1800 dev_err(&ndev
->dev
, "%s: sh_eth_dev_init failed.\n", __func__
);
1804 if (netif_running(ndev
)) {
1805 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1806 /* Setting the Rx mode will start the Rx process. */
1807 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1808 netif_wake_queue(ndev
);
1814 static const struct ethtool_ops sh_eth_ethtool_ops
= {
1815 .get_settings
= sh_eth_get_settings
,
1816 .set_settings
= sh_eth_set_settings
,
1817 .nway_reset
= sh_eth_nway_reset
,
1818 .get_msglevel
= sh_eth_get_msglevel
,
1819 .set_msglevel
= sh_eth_set_msglevel
,
1820 .get_link
= ethtool_op_get_link
,
1821 .get_strings
= sh_eth_get_strings
,
1822 .get_ethtool_stats
= sh_eth_get_ethtool_stats
,
1823 .get_sset_count
= sh_eth_get_sset_count
,
1824 .get_ringparam
= sh_eth_get_ringparam
,
1825 .set_ringparam
= sh_eth_set_ringparam
,
1828 /* network device open function */
1829 static int sh_eth_open(struct net_device
*ndev
)
1832 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1834 pm_runtime_get_sync(&mdp
->pdev
->dev
);
1836 ret
= request_irq(ndev
->irq
, sh_eth_interrupt
,
1837 mdp
->cd
->irq_flags
, ndev
->name
, ndev
);
1839 dev_err(&ndev
->dev
, "Can not assign IRQ number\n");
1843 /* Descriptor set */
1844 ret
= sh_eth_ring_init(ndev
);
1849 ret
= sh_eth_dev_init(ndev
, true);
1853 /* PHY control start*/
1854 ret
= sh_eth_phy_start(ndev
);
1861 free_irq(ndev
->irq
, ndev
);
1862 pm_runtime_put_sync(&mdp
->pdev
->dev
);
1866 /* Timeout function */
1867 static void sh_eth_tx_timeout(struct net_device
*ndev
)
1869 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1870 struct sh_eth_rxdesc
*rxdesc
;
1873 netif_stop_queue(ndev
);
1875 if (netif_msg_timer(mdp
))
1876 dev_err(&ndev
->dev
, "%s: transmit timed out, status %8.8x,"
1877 " resetting...\n", ndev
->name
, (int)sh_eth_read(ndev
, EESR
));
1879 /* tx_errors count up */
1880 ndev
->stats
.tx_errors
++;
1882 /* Free all the skbuffs in the Rx queue. */
1883 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
1884 rxdesc
= &mdp
->rx_ring
[i
];
1886 rxdesc
->addr
= 0xBADF00D0;
1887 if (mdp
->rx_skbuff
[i
])
1888 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
1889 mdp
->rx_skbuff
[i
] = NULL
;
1891 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
1892 if (mdp
->tx_skbuff
[i
])
1893 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
1894 mdp
->tx_skbuff
[i
] = NULL
;
1898 sh_eth_dev_init(ndev
, true);
1901 /* Packet transmit function */
1902 static int sh_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1904 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1905 struct sh_eth_txdesc
*txdesc
;
1907 unsigned long flags
;
1909 spin_lock_irqsave(&mdp
->lock
, flags
);
1910 if ((mdp
->cur_tx
- mdp
->dirty_tx
) >= (mdp
->num_tx_ring
- 4)) {
1911 if (!sh_eth_txfree(ndev
)) {
1912 if (netif_msg_tx_queued(mdp
))
1913 dev_warn(&ndev
->dev
, "TxFD exhausted.\n");
1914 netif_stop_queue(ndev
);
1915 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1916 return NETDEV_TX_BUSY
;
1919 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1921 entry
= mdp
->cur_tx
% mdp
->num_tx_ring
;
1922 mdp
->tx_skbuff
[entry
] = skb
;
1923 txdesc
= &mdp
->tx_ring
[entry
];
1925 if (!mdp
->cd
->hw_swap
)
1926 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc
->addr
, 4)),
1928 txdesc
->addr
= dma_map_single(&ndev
->dev
, skb
->data
, skb
->len
,
1930 if (skb
->len
< ETHERSMALL
)
1931 txdesc
->buffer_length
= ETHERSMALL
;
1933 txdesc
->buffer_length
= skb
->len
;
1935 if (entry
>= mdp
->num_tx_ring
- 1)
1936 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
| TD_TDLE
);
1938 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
);
1942 if (!(sh_eth_read(ndev
, EDTRR
) & sh_eth_get_edtrr_trns(mdp
)))
1943 sh_eth_write(ndev
, sh_eth_get_edtrr_trns(mdp
), EDTRR
);
1945 return NETDEV_TX_OK
;
1948 /* device close function */
1949 static int sh_eth_close(struct net_device
*ndev
)
1951 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1953 netif_stop_queue(ndev
);
1955 /* Disable interrupts by clearing the interrupt mask. */
1956 sh_eth_write(ndev
, 0x0000, EESIPR
);
1958 /* Stop the chip's Tx and Rx processes. */
1959 sh_eth_write(ndev
, 0, EDTRR
);
1960 sh_eth_write(ndev
, 0, EDRRR
);
1962 /* PHY Disconnect */
1964 phy_stop(mdp
->phydev
);
1965 phy_disconnect(mdp
->phydev
);
1968 free_irq(ndev
->irq
, ndev
);
1970 /* Free all the skbuffs in the Rx queue. */
1971 sh_eth_ring_free(ndev
);
1973 /* free DMA buffer */
1974 sh_eth_free_dma_buffer(mdp
);
1976 pm_runtime_put_sync(&mdp
->pdev
->dev
);
1981 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
)
1983 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1985 pm_runtime_get_sync(&mdp
->pdev
->dev
);
1987 ndev
->stats
.tx_dropped
+= sh_eth_read(ndev
, TROCR
);
1988 sh_eth_write(ndev
, 0, TROCR
); /* (write clear) */
1989 ndev
->stats
.collisions
+= sh_eth_read(ndev
, CDCR
);
1990 sh_eth_write(ndev
, 0, CDCR
); /* (write clear) */
1991 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, LCCR
);
1992 sh_eth_write(ndev
, 0, LCCR
); /* (write clear) */
1993 if (sh_eth_is_gether(mdp
)) {
1994 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CERCR
);
1995 sh_eth_write(ndev
, 0, CERCR
); /* (write clear) */
1996 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CEECR
);
1997 sh_eth_write(ndev
, 0, CEECR
); /* (write clear) */
1999 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CNDCR
);
2000 sh_eth_write(ndev
, 0, CNDCR
); /* (write clear) */
2002 pm_runtime_put_sync(&mdp
->pdev
->dev
);
2004 return &ndev
->stats
;
2007 /* ioctl to device function */
2008 static int sh_eth_do_ioctl(struct net_device
*ndev
, struct ifreq
*rq
,
2011 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2012 struct phy_device
*phydev
= mdp
->phydev
;
2014 if (!netif_running(ndev
))
2020 return phy_mii_ioctl(phydev
, rq
, cmd
);
2023 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2024 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private
*mdp
,
2027 return sh_eth_tsu_get_offset(mdp
, TSU_POST1
) + (entry
/ 8 * 4);
2030 static u32
sh_eth_tsu_get_post_mask(int entry
)
2032 return 0x0f << (28 - ((entry
% 8) * 4));
2035 static u32
sh_eth_tsu_get_post_bit(struct sh_eth_private
*mdp
, int entry
)
2037 return (0x08 >> (mdp
->port
<< 1)) << (28 - ((entry
% 8) * 4));
2040 static void sh_eth_tsu_enable_cam_entry_post(struct net_device
*ndev
,
2043 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2047 reg_offset
= sh_eth_tsu_get_post_reg_offset(mdp
, entry
);
2048 tmp
= ioread32(reg_offset
);
2049 iowrite32(tmp
| sh_eth_tsu_get_post_bit(mdp
, entry
), reg_offset
);
2052 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device
*ndev
,
2055 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2056 u32 post_mask
, ref_mask
, tmp
;
2059 reg_offset
= sh_eth_tsu_get_post_reg_offset(mdp
, entry
);
2060 post_mask
= sh_eth_tsu_get_post_mask(entry
);
2061 ref_mask
= sh_eth_tsu_get_post_bit(mdp
, entry
) & ~post_mask
;
2063 tmp
= ioread32(reg_offset
);
2064 iowrite32(tmp
& ~post_mask
, reg_offset
);
2066 /* If other port enables, the function returns "true" */
2067 return tmp
& ref_mask
;
2070 static int sh_eth_tsu_busy(struct net_device
*ndev
)
2072 int timeout
= SH_ETH_TSU_TIMEOUT_MS
* 100;
2073 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2075 while ((sh_eth_tsu_read(mdp
, TSU_ADSBSY
) & TSU_ADSBSY_0
)) {
2079 dev_err(&ndev
->dev
, "%s: timeout\n", __func__
);
2087 static int sh_eth_tsu_write_entry(struct net_device
*ndev
, void *reg
,
2092 val
= addr
[0] << 24 | addr
[1] << 16 | addr
[2] << 8 | addr
[3];
2093 iowrite32(val
, reg
);
2094 if (sh_eth_tsu_busy(ndev
) < 0)
2097 val
= addr
[4] << 8 | addr
[5];
2098 iowrite32(val
, reg
+ 4);
2099 if (sh_eth_tsu_busy(ndev
) < 0)
2105 static void sh_eth_tsu_read_entry(void *reg
, u8
*addr
)
2109 val
= ioread32(reg
);
2110 addr
[0] = (val
>> 24) & 0xff;
2111 addr
[1] = (val
>> 16) & 0xff;
2112 addr
[2] = (val
>> 8) & 0xff;
2113 addr
[3] = val
& 0xff;
2114 val
= ioread32(reg
+ 4);
2115 addr
[4] = (val
>> 8) & 0xff;
2116 addr
[5] = val
& 0xff;
2120 static int sh_eth_tsu_find_entry(struct net_device
*ndev
, const u8
*addr
)
2122 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2123 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2125 u8 c_addr
[ETH_ALEN
];
2127 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2128 sh_eth_tsu_read_entry(reg_offset
, c_addr
);
2129 if (memcmp(addr
, c_addr
, ETH_ALEN
) == 0)
2136 static int sh_eth_tsu_find_empty(struct net_device
*ndev
)
2141 memset(blank
, 0, sizeof(blank
));
2142 entry
= sh_eth_tsu_find_entry(ndev
, blank
);
2143 return (entry
< 0) ? -ENOMEM
: entry
;
2146 static int sh_eth_tsu_disable_cam_entry_table(struct net_device
*ndev
,
2149 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2150 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2154 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) &
2155 ~(1 << (31 - entry
)), TSU_TEN
);
2157 memset(blank
, 0, sizeof(blank
));
2158 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ entry
* 8, blank
);
2164 static int sh_eth_tsu_add_entry(struct net_device
*ndev
, const u8
*addr
)
2166 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2167 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2173 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2175 /* No entry found, create one */
2176 i
= sh_eth_tsu_find_empty(ndev
);
2179 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ i
* 8, addr
);
2183 /* Enable the entry */
2184 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) |
2185 (1 << (31 - i
)), TSU_TEN
);
2188 /* Entry found or created, enable POST */
2189 sh_eth_tsu_enable_cam_entry_post(ndev
, i
);
2194 static int sh_eth_tsu_del_entry(struct net_device
*ndev
, const u8
*addr
)
2196 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2202 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2205 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2208 /* Disable the entry if both ports was disabled */
2209 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2217 static int sh_eth_tsu_purge_all(struct net_device
*ndev
)
2219 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2222 if (unlikely(!mdp
->cd
->tsu
))
2225 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++) {
2226 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2229 /* Disable the entry if both ports was disabled */
2230 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2238 static void sh_eth_tsu_purge_mcast(struct net_device
*ndev
)
2240 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2242 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2245 if (unlikely(!mdp
->cd
->tsu
))
2248 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2249 sh_eth_tsu_read_entry(reg_offset
, addr
);
2250 if (is_multicast_ether_addr(addr
))
2251 sh_eth_tsu_del_entry(ndev
, addr
);
2255 /* Multicast reception directions set */
2256 static void sh_eth_set_multicast_list(struct net_device
*ndev
)
2258 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2261 unsigned long flags
;
2263 spin_lock_irqsave(&mdp
->lock
, flags
);
2265 * Initial condition is MCT = 1, PRM = 0.
2266 * Depending on ndev->flags, set PRM or clear MCT
2268 ecmr_bits
= (sh_eth_read(ndev
, ECMR
) & ~ECMR_PRM
) | ECMR_MCT
;
2270 if (!(ndev
->flags
& IFF_MULTICAST
)) {
2271 sh_eth_tsu_purge_mcast(ndev
);
2274 if (ndev
->flags
& IFF_ALLMULTI
) {
2275 sh_eth_tsu_purge_mcast(ndev
);
2276 ecmr_bits
&= ~ECMR_MCT
;
2280 if (ndev
->flags
& IFF_PROMISC
) {
2281 sh_eth_tsu_purge_all(ndev
);
2282 ecmr_bits
= (ecmr_bits
& ~ECMR_MCT
) | ECMR_PRM
;
2283 } else if (mdp
->cd
->tsu
) {
2284 struct netdev_hw_addr
*ha
;
2285 netdev_for_each_mc_addr(ha
, ndev
) {
2286 if (mcast_all
&& is_multicast_ether_addr(ha
->addr
))
2289 if (sh_eth_tsu_add_entry(ndev
, ha
->addr
) < 0) {
2291 sh_eth_tsu_purge_mcast(ndev
);
2292 ecmr_bits
&= ~ECMR_MCT
;
2298 /* Normal, unicast/broadcast-only mode. */
2299 ecmr_bits
= (ecmr_bits
& ~ECMR_PRM
) | ECMR_MCT
;
2302 /* update the ethernet mode */
2303 sh_eth_write(ndev
, ecmr_bits
, ECMR
);
2305 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2308 static int sh_eth_get_vtag_index(struct sh_eth_private
*mdp
)
2316 static int sh_eth_vlan_rx_add_vid(struct net_device
*ndev
,
2317 __be16 proto
, u16 vid
)
2319 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2320 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2322 if (unlikely(!mdp
->cd
->tsu
))
2325 /* No filtering if vid = 0 */
2329 mdp
->vlan_num_ids
++;
2332 * The controller has one VLAN tag HW filter. So, if the filter is
2333 * already enabled, the driver disables it and the filte
2335 if (mdp
->vlan_num_ids
> 1) {
2336 /* disable VLAN filter */
2337 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
2341 sh_eth_tsu_write(mdp
, TSU_VTAG_ENABLE
| (vid
& TSU_VTAG_VID_MASK
),
2347 static int sh_eth_vlan_rx_kill_vid(struct net_device
*ndev
,
2348 __be16 proto
, u16 vid
)
2350 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2351 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2353 if (unlikely(!mdp
->cd
->tsu
))
2356 /* No filtering if vid = 0 */
2360 mdp
->vlan_num_ids
--;
2361 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
2366 /* SuperH's TSU register init function */
2367 static void sh_eth_tsu_init(struct sh_eth_private
*mdp
)
2369 sh_eth_tsu_write(mdp
, 0, TSU_FWEN0
); /* Disable forward(0->1) */
2370 sh_eth_tsu_write(mdp
, 0, TSU_FWEN1
); /* Disable forward(1->0) */
2371 sh_eth_tsu_write(mdp
, 0, TSU_FCM
); /* forward fifo 3k-3k */
2372 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL0
);
2373 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL1
);
2374 sh_eth_tsu_write(mdp
, 0, TSU_PRISL0
);
2375 sh_eth_tsu_write(mdp
, 0, TSU_PRISL1
);
2376 sh_eth_tsu_write(mdp
, 0, TSU_FWSL0
);
2377 sh_eth_tsu_write(mdp
, 0, TSU_FWSL1
);
2378 sh_eth_tsu_write(mdp
, TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
, TSU_FWSLC
);
2379 if (sh_eth_is_gether(mdp
)) {
2380 sh_eth_tsu_write(mdp
, 0, TSU_QTAG0
); /* Disable QTAG(0->1) */
2381 sh_eth_tsu_write(mdp
, 0, TSU_QTAG1
); /* Disable QTAG(1->0) */
2383 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM0
); /* Disable QTAG(0->1) */
2384 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM1
); /* Disable QTAG(1->0) */
2386 sh_eth_tsu_write(mdp
, 0, TSU_FWSR
); /* all interrupt status clear */
2387 sh_eth_tsu_write(mdp
, 0, TSU_FWINMK
); /* Disable all interrupt */
2388 sh_eth_tsu_write(mdp
, 0, TSU_TEN
); /* Disable all CAM entry */
2389 sh_eth_tsu_write(mdp
, 0, TSU_POST1
); /* Disable CAM entry [ 0- 7] */
2390 sh_eth_tsu_write(mdp
, 0, TSU_POST2
); /* Disable CAM entry [ 8-15] */
2391 sh_eth_tsu_write(mdp
, 0, TSU_POST3
); /* Disable CAM entry [16-23] */
2392 sh_eth_tsu_write(mdp
, 0, TSU_POST4
); /* Disable CAM entry [24-31] */
2395 /* MDIO bus release function */
2396 static int sh_mdio_release(struct net_device
*ndev
)
2398 struct mii_bus
*bus
= dev_get_drvdata(&ndev
->dev
);
2400 /* unregister mdio bus */
2401 mdiobus_unregister(bus
);
2403 /* remove mdio bus info from net_device */
2404 dev_set_drvdata(&ndev
->dev
, NULL
);
2406 /* free bitbang info */
2407 free_mdio_bitbang(bus
);
2412 /* MDIO bus init function */
2413 static int sh_mdio_init(struct net_device
*ndev
, int id
,
2414 struct sh_eth_plat_data
*pd
)
2417 struct bb_info
*bitbang
;
2418 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2420 /* create bit control struct for PHY */
2421 bitbang
= devm_kzalloc(&ndev
->dev
, sizeof(struct bb_info
),
2429 bitbang
->addr
= mdp
->addr
+ mdp
->reg_offset
[PIR
];
2430 bitbang
->set_gate
= pd
->set_mdio_gate
;
2431 bitbang
->mdi_msk
= PIR_MDI
;
2432 bitbang
->mdo_msk
= PIR_MDO
;
2433 bitbang
->mmd_msk
= PIR_MMD
;
2434 bitbang
->mdc_msk
= PIR_MDC
;
2435 bitbang
->ctrl
.ops
= &bb_ops
;
2437 /* MII controller setting */
2438 mdp
->mii_bus
= alloc_mdio_bitbang(&bitbang
->ctrl
);
2439 if (!mdp
->mii_bus
) {
2444 /* Hook up MII support for ethtool */
2445 mdp
->mii_bus
->name
= "sh_mii";
2446 mdp
->mii_bus
->parent
= &ndev
->dev
;
2447 snprintf(mdp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2448 mdp
->pdev
->name
, id
);
2451 mdp
->mii_bus
->irq
= devm_kzalloc(&ndev
->dev
,
2452 sizeof(int) * PHY_MAX_ADDR
,
2454 if (!mdp
->mii_bus
->irq
) {
2459 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
2460 mdp
->mii_bus
->irq
[i
] = PHY_POLL
;
2462 /* register mdio bus */
2463 ret
= mdiobus_register(mdp
->mii_bus
);
2467 dev_set_drvdata(&ndev
->dev
, mdp
->mii_bus
);
2472 free_mdio_bitbang(mdp
->mii_bus
);
2478 static const u16
*sh_eth_get_register_offset(int register_type
)
2480 const u16
*reg_offset
= NULL
;
2482 switch (register_type
) {
2483 case SH_ETH_REG_GIGABIT
:
2484 reg_offset
= sh_eth_offset_gigabit
;
2486 case SH_ETH_REG_FAST_RCAR
:
2487 reg_offset
= sh_eth_offset_fast_rcar
;
2489 case SH_ETH_REG_FAST_SH4
:
2490 reg_offset
= sh_eth_offset_fast_sh4
;
2492 case SH_ETH_REG_FAST_SH3_SH2
:
2493 reg_offset
= sh_eth_offset_fast_sh3_sh2
;
2496 pr_err("Unknown register type (%d)\n", register_type
);
2503 static struct net_device_ops sh_eth_netdev_ops
= {
2504 .ndo_open
= sh_eth_open
,
2505 .ndo_stop
= sh_eth_close
,
2506 .ndo_start_xmit
= sh_eth_start_xmit
,
2507 .ndo_get_stats
= sh_eth_get_stats
,
2508 .ndo_tx_timeout
= sh_eth_tx_timeout
,
2509 .ndo_do_ioctl
= sh_eth_do_ioctl
,
2510 .ndo_validate_addr
= eth_validate_addr
,
2511 .ndo_set_mac_address
= eth_mac_addr
,
2512 .ndo_change_mtu
= eth_change_mtu
,
2515 static int sh_eth_drv_probe(struct platform_device
*pdev
)
2518 struct resource
*res
;
2519 struct net_device
*ndev
= NULL
;
2520 struct sh_eth_private
*mdp
= NULL
;
2521 struct sh_eth_plat_data
*pd
= pdev
->dev
.platform_data
;
2524 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2525 if (unlikely(res
== NULL
)) {
2526 dev_err(&pdev
->dev
, "invalid resource\n");
2531 ndev
= alloc_etherdev(sizeof(struct sh_eth_private
));
2537 /* The sh Ether-specific entries in the device structure. */
2538 ndev
->base_addr
= res
->start
;
2544 ret
= platform_get_irq(pdev
, 0);
2551 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2553 /* Fill in the fields of the device structure with ethernet values. */
2556 mdp
= netdev_priv(ndev
);
2557 mdp
->num_tx_ring
= TX_RING_SIZE
;
2558 mdp
->num_rx_ring
= RX_RING_SIZE
;
2559 mdp
->addr
= devm_ioremap_resource(&pdev
->dev
, res
);
2560 if (IS_ERR(mdp
->addr
)) {
2561 ret
= PTR_ERR(mdp
->addr
);
2565 spin_lock_init(&mdp
->lock
);
2567 pm_runtime_enable(&pdev
->dev
);
2568 pm_runtime_resume(&pdev
->dev
);
2571 mdp
->phy_id
= pd
->phy
;
2572 mdp
->phy_interface
= pd
->phy_interface
;
2574 mdp
->edmac_endian
= pd
->edmac_endian
;
2575 mdp
->no_ether_link
= pd
->no_ether_link
;
2576 mdp
->ether_link_active_low
= pd
->ether_link_active_low
;
2577 mdp
->reg_offset
= sh_eth_get_register_offset(pd
->register_type
);
2580 #if defined(SH_ETH_HAS_BOTH_MODULES)
2581 mdp
->cd
= sh_eth_get_cpu_data(mdp
);
2583 mdp
->cd
= &sh_eth_my_cpu_data
;
2585 sh_eth_set_default_cpu_data(mdp
->cd
);
2589 sh_eth_netdev_ops
.ndo_set_rx_mode
= sh_eth_set_multicast_list
;
2590 sh_eth_netdev_ops
.ndo_vlan_rx_add_vid
= sh_eth_vlan_rx_add_vid
;
2591 sh_eth_netdev_ops
.ndo_vlan_rx_kill_vid
=
2592 sh_eth_vlan_rx_kill_vid
;
2595 ndev
->netdev_ops
= &sh_eth_netdev_ops
;
2596 SET_ETHTOOL_OPS(ndev
, &sh_eth_ethtool_ops
);
2597 ndev
->watchdog_timeo
= TX_TIMEOUT
;
2599 /* debug message level */
2600 mdp
->msg_enable
= SH_ETH_DEF_MSG_ENABLE
;
2602 /* read and set MAC address */
2603 read_mac_address(ndev
, pd
->mac_addr
);
2604 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
2605 dev_warn(&pdev
->dev
,
2606 "no valid MAC address supplied, using a random one.\n");
2607 eth_hw_addr_random(ndev
);
2610 /* ioremap the TSU registers */
2612 struct resource
*rtsu
;
2613 rtsu
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2614 mdp
->tsu_addr
= devm_ioremap_resource(&pdev
->dev
, rtsu
);
2615 if (IS_ERR(mdp
->tsu_addr
)) {
2616 ret
= PTR_ERR(mdp
->tsu_addr
);
2619 mdp
->port
= devno
% 2;
2620 ndev
->features
= NETIF_F_HW_VLAN_CTAG_FILTER
;
2623 /* initialize first or needed device */
2624 if (!devno
|| pd
->needs_init
) {
2625 if (mdp
->cd
->chip_reset
)
2626 mdp
->cd
->chip_reset(ndev
);
2629 /* TSU init (Init only)*/
2630 sh_eth_tsu_init(mdp
);
2634 /* network device register */
2635 ret
= register_netdev(ndev
);
2640 ret
= sh_mdio_init(ndev
, pdev
->id
, pd
);
2642 goto out_unregister
;
2644 /* print device information */
2645 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2646 (u32
)ndev
->base_addr
, ndev
->dev_addr
, ndev
->irq
);
2648 platform_set_drvdata(pdev
, ndev
);
2653 unregister_netdev(ndev
);
2664 static int sh_eth_drv_remove(struct platform_device
*pdev
)
2666 struct net_device
*ndev
= platform_get_drvdata(pdev
);
2668 sh_mdio_release(ndev
);
2669 unregister_netdev(ndev
);
2670 pm_runtime_disable(&pdev
->dev
);
2677 static int sh_eth_runtime_nop(struct device
*dev
)
2680 * Runtime PM callback shared between ->runtime_suspend()
2681 * and ->runtime_resume(). Simply returns success.
2683 * This driver re-initializes all registers after
2684 * pm_runtime_get_sync() anyway so there is no need
2685 * to save and restore registers here.
2690 static const struct dev_pm_ops sh_eth_dev_pm_ops
= {
2691 .runtime_suspend
= sh_eth_runtime_nop
,
2692 .runtime_resume
= sh_eth_runtime_nop
,
2694 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2696 #define SH_ETH_PM_OPS NULL
2699 static struct platform_driver sh_eth_driver
= {
2700 .probe
= sh_eth_drv_probe
,
2701 .remove
= sh_eth_drv_remove
,
2704 .pm
= SH_ETH_PM_OPS
,
2708 module_platform_driver(sh_eth_driver
);
2710 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2711 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2712 MODULE_LICENSE("GPL v2");