1 /* SuperH Ethernet device driver
3 * Copyright (C) 2014 Renesas Electronics Corporation
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
49 #define SH_ETH_DEF_MSG_ENABLE \
55 static const u16 sh_eth_offset_gigabit
[SH_ETH_MAX_REGISTER_OFFSET
] = {
109 [TSU_CTRST
] = 0x0004,
110 [TSU_FWEN0
] = 0x0010,
111 [TSU_FWEN1
] = 0x0014,
113 [TSU_BSYSL0
] = 0x0020,
114 [TSU_BSYSL1
] = 0x0024,
115 [TSU_PRISL0
] = 0x0028,
116 [TSU_PRISL1
] = 0x002c,
117 [TSU_FWSL0
] = 0x0030,
118 [TSU_FWSL1
] = 0x0034,
119 [TSU_FWSLC
] = 0x0038,
120 [TSU_QTAG0
] = 0x0040,
121 [TSU_QTAG1
] = 0x0044,
123 [TSU_FWINMK
] = 0x0054,
124 [TSU_ADQT0
] = 0x0048,
125 [TSU_ADQT1
] = 0x004c,
126 [TSU_VTAG0
] = 0x0058,
127 [TSU_VTAG1
] = 0x005c,
128 [TSU_ADSBSY
] = 0x0060,
130 [TSU_POST1
] = 0x0070,
131 [TSU_POST2
] = 0x0074,
132 [TSU_POST3
] = 0x0078,
133 [TSU_POST4
] = 0x007c,
134 [TSU_ADRH0
] = 0x0100,
135 [TSU_ADRL0
] = 0x0104,
136 [TSU_ADRH31
] = 0x01f8,
137 [TSU_ADRL31
] = 0x01fc,
153 static const u16 sh_eth_offset_fast_rz
[SH_ETH_MAX_REGISTER_OFFSET
] = {
197 [TSU_CTRST
] = 0x0004,
198 [TSU_VTAG0
] = 0x0058,
199 [TSU_ADSBSY
] = 0x0060,
201 [TSU_ADRH0
] = 0x0100,
202 [TSU_ADRL0
] = 0x0104,
203 [TSU_ADRH31
] = 0x01f8,
204 [TSU_ADRL31
] = 0x01fc,
212 static const u16 sh_eth_offset_fast_rcar
[SH_ETH_MAX_REGISTER_OFFSET
] = {
258 static const u16 sh_eth_offset_fast_sh4
[SH_ETH_MAX_REGISTER_OFFSET
] = {
310 static const u16 sh_eth_offset_fast_sh3_sh2
[SH_ETH_MAX_REGISTER_OFFSET
] = {
357 [TSU_CTRST
] = 0x0004,
358 [TSU_FWEN0
] = 0x0010,
359 [TSU_FWEN1
] = 0x0014,
361 [TSU_BSYSL0
] = 0x0020,
362 [TSU_BSYSL1
] = 0x0024,
363 [TSU_PRISL0
] = 0x0028,
364 [TSU_PRISL1
] = 0x002c,
365 [TSU_FWSL0
] = 0x0030,
366 [TSU_FWSL1
] = 0x0034,
367 [TSU_FWSLC
] = 0x0038,
368 [TSU_QTAGM0
] = 0x0040,
369 [TSU_QTAGM1
] = 0x0044,
370 [TSU_ADQT0
] = 0x0048,
371 [TSU_ADQT1
] = 0x004c,
373 [TSU_FWINMK
] = 0x0054,
374 [TSU_ADSBSY
] = 0x0060,
376 [TSU_POST1
] = 0x0070,
377 [TSU_POST2
] = 0x0074,
378 [TSU_POST3
] = 0x0078,
379 [TSU_POST4
] = 0x007c,
394 [TSU_ADRH0
] = 0x0100,
395 [TSU_ADRL0
] = 0x0104,
396 [TSU_ADRL31
] = 0x01fc,
399 static void sh_eth_rcv_snd_disable(struct net_device
*ndev
);
400 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
);
402 static bool sh_eth_is_gether(struct sh_eth_private
*mdp
)
404 return mdp
->reg_offset
== sh_eth_offset_gigabit
;
407 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private
*mdp
)
409 return mdp
->reg_offset
== sh_eth_offset_fast_rz
;
412 static void sh_eth_select_mii(struct net_device
*ndev
)
415 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
417 switch (mdp
->phy_interface
) {
418 case PHY_INTERFACE_MODE_GMII
:
421 case PHY_INTERFACE_MODE_MII
:
424 case PHY_INTERFACE_MODE_RMII
:
429 "PHY interface mode was not setup. Set to MII.\n");
434 sh_eth_write(ndev
, value
, RMII_MII
);
437 static void sh_eth_set_duplex(struct net_device
*ndev
)
439 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
441 if (mdp
->duplex
) /* Full */
442 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_DM
, ECMR
);
444 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_DM
, ECMR
);
447 /* There is CPU dependent code */
448 static void sh_eth_set_rate_r8a777x(struct net_device
*ndev
)
450 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
452 switch (mdp
->speed
) {
453 case 10: /* 10BASE */
454 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_ELB
, ECMR
);
456 case 100:/* 100BASE */
457 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_ELB
, ECMR
);
465 static struct sh_eth_cpu_data r8a777x_data
= {
466 .set_duplex
= sh_eth_set_duplex
,
467 .set_rate
= sh_eth_set_rate_r8a777x
,
469 .register_type
= SH_ETH_REG_FAST_RCAR
,
471 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
472 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
473 .eesipr_value
= 0x01ff009f,
475 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
476 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
477 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
479 .fdr_value
= 0x00000f0f,
488 static struct sh_eth_cpu_data r8a779x_data
= {
489 .set_duplex
= sh_eth_set_duplex
,
490 .set_rate
= sh_eth_set_rate_r8a777x
,
492 .register_type
= SH_ETH_REG_FAST_RCAR
,
494 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
495 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
496 .eesipr_value
= 0x01ff009f,
498 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
499 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
500 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
502 .fdr_value
= 0x00000f0f,
504 .trscer_err_mask
= DESC_I_RINT8
,
514 static void sh_eth_set_rate_sh7724(struct net_device
*ndev
)
516 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
518 switch (mdp
->speed
) {
519 case 10: /* 10BASE */
520 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_RTM
, ECMR
);
522 case 100:/* 100BASE */
523 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_RTM
, ECMR
);
531 static struct sh_eth_cpu_data sh7724_data
= {
532 .set_duplex
= sh_eth_set_duplex
,
533 .set_rate
= sh_eth_set_rate_sh7724
,
535 .register_type
= SH_ETH_REG_FAST_SH4
,
537 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
538 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
539 .eesipr_value
= 0x01ff009f,
541 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
542 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
543 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
551 .rpadir_value
= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
554 static void sh_eth_set_rate_sh7757(struct net_device
*ndev
)
556 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
558 switch (mdp
->speed
) {
559 case 10: /* 10BASE */
560 sh_eth_write(ndev
, 0, RTRATE
);
562 case 100:/* 100BASE */
563 sh_eth_write(ndev
, 1, RTRATE
);
571 static struct sh_eth_cpu_data sh7757_data
= {
572 .set_duplex
= sh_eth_set_duplex
,
573 .set_rate
= sh_eth_set_rate_sh7757
,
575 .register_type
= SH_ETH_REG_FAST_SH4
,
577 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
579 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
580 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
581 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
584 .irq_flags
= IRQF_SHARED
,
591 .rpadir_value
= 2 << 16,
594 #define SH_GIGA_ETH_BASE 0xfee00000UL
595 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
596 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
597 static void sh_eth_chip_reset_giga(struct net_device
*ndev
)
600 u32 mahr
[2], malr
[2];
602 /* save MAHR and MALR */
603 for (i
= 0; i
< 2; i
++) {
604 malr
[i
] = ioread32((void *)GIGA_MALR(i
));
605 mahr
[i
] = ioread32((void *)GIGA_MAHR(i
));
609 iowrite32(ARSTR_ARSTR
, (void *)(SH_GIGA_ETH_BASE
+ 0x1800));
612 /* restore MAHR and MALR */
613 for (i
= 0; i
< 2; i
++) {
614 iowrite32(malr
[i
], (void *)GIGA_MALR(i
));
615 iowrite32(mahr
[i
], (void *)GIGA_MAHR(i
));
619 static void sh_eth_set_rate_giga(struct net_device
*ndev
)
621 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
623 switch (mdp
->speed
) {
624 case 10: /* 10BASE */
625 sh_eth_write(ndev
, 0x00000000, GECMR
);
627 case 100:/* 100BASE */
628 sh_eth_write(ndev
, 0x00000010, GECMR
);
630 case 1000: /* 1000BASE */
631 sh_eth_write(ndev
, 0x00000020, GECMR
);
638 /* SH7757(GETHERC) */
639 static struct sh_eth_cpu_data sh7757_data_giga
= {
640 .chip_reset
= sh_eth_chip_reset_giga
,
641 .set_duplex
= sh_eth_set_duplex
,
642 .set_rate
= sh_eth_set_rate_giga
,
644 .register_type
= SH_ETH_REG_GIGABIT
,
646 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
647 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
648 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
650 .tx_check
= EESR_TC1
| EESR_FTC
,
651 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
652 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
654 .fdr_value
= 0x0000072f,
656 .irq_flags
= IRQF_SHARED
,
663 .rpadir_value
= 2 << 16,
669 static void sh_eth_chip_reset(struct net_device
*ndev
)
671 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
674 sh_eth_tsu_write(mdp
, ARSTR_ARSTR
, ARSTR
);
678 static void sh_eth_set_rate_gether(struct net_device
*ndev
)
680 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
682 switch (mdp
->speed
) {
683 case 10: /* 10BASE */
684 sh_eth_write(ndev
, GECMR_10
, GECMR
);
686 case 100:/* 100BASE */
687 sh_eth_write(ndev
, GECMR_100
, GECMR
);
689 case 1000: /* 1000BASE */
690 sh_eth_write(ndev
, GECMR_1000
, GECMR
);
698 static struct sh_eth_cpu_data sh7734_data
= {
699 .chip_reset
= sh_eth_chip_reset
,
700 .set_duplex
= sh_eth_set_duplex
,
701 .set_rate
= sh_eth_set_rate_gether
,
703 .register_type
= SH_ETH_REG_GIGABIT
,
705 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
706 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
707 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
709 .tx_check
= EESR_TC1
| EESR_FTC
,
710 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
711 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
727 static struct sh_eth_cpu_data sh7763_data
= {
728 .chip_reset
= sh_eth_chip_reset
,
729 .set_duplex
= sh_eth_set_duplex
,
730 .set_rate
= sh_eth_set_rate_gether
,
732 .register_type
= SH_ETH_REG_GIGABIT
,
734 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
735 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
736 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
738 .tx_check
= EESR_TC1
| EESR_FTC
,
739 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
740 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
751 .irq_flags
= IRQF_SHARED
,
754 static void sh_eth_chip_reset_r8a7740(struct net_device
*ndev
)
756 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
759 sh_eth_tsu_write(mdp
, ARSTR_ARSTR
, ARSTR
);
762 sh_eth_select_mii(ndev
);
766 static struct sh_eth_cpu_data r8a7740_data
= {
767 .chip_reset
= sh_eth_chip_reset_r8a7740
,
768 .set_duplex
= sh_eth_set_duplex
,
769 .set_rate
= sh_eth_set_rate_gether
,
771 .register_type
= SH_ETH_REG_GIGABIT
,
773 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
774 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
775 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
777 .tx_check
= EESR_TC1
| EESR_FTC
,
778 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
779 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
781 .fdr_value
= 0x0000070f,
789 .rpadir_value
= 2 << 16,
798 static struct sh_eth_cpu_data r7s72100_data
= {
799 .chip_reset
= sh_eth_chip_reset
,
800 .set_duplex
= sh_eth_set_duplex
,
802 .register_type
= SH_ETH_REG_FAST_RZ
,
804 .ecsr_value
= ECSR_ICD
,
805 .ecsipr_value
= ECSIPR_ICDIP
,
806 .eesipr_value
= 0xff7f009f,
808 .tx_check
= EESR_TC1
| EESR_FTC
,
809 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
810 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
812 .fdr_value
= 0x0000070f,
820 .rpadir_value
= 2 << 16,
828 static struct sh_eth_cpu_data sh7619_data
= {
829 .register_type
= SH_ETH_REG_FAST_SH3_SH2
,
831 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
839 static struct sh_eth_cpu_data sh771x_data
= {
840 .register_type
= SH_ETH_REG_FAST_SH3_SH2
,
842 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
846 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data
*cd
)
849 cd
->ecsr_value
= DEFAULT_ECSR_INIT
;
851 if (!cd
->ecsipr_value
)
852 cd
->ecsipr_value
= DEFAULT_ECSIPR_INIT
;
854 if (!cd
->fcftr_value
)
855 cd
->fcftr_value
= DEFAULT_FIFO_F_D_RFF
|
856 DEFAULT_FIFO_F_D_RFD
;
859 cd
->fdr_value
= DEFAULT_FDR_INIT
;
862 cd
->tx_check
= DEFAULT_TX_CHECK
;
864 if (!cd
->eesr_err_check
)
865 cd
->eesr_err_check
= DEFAULT_EESR_ERR_CHECK
;
867 if (!cd
->trscer_err_mask
)
868 cd
->trscer_err_mask
= DEFAULT_TRSCER_ERR_MASK
;
871 static int sh_eth_check_reset(struct net_device
*ndev
)
877 if (!(sh_eth_read(ndev
, EDMR
) & 0x3))
883 netdev_err(ndev
, "Device reset failed\n");
889 static int sh_eth_reset(struct net_device
*ndev
)
891 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
894 if (sh_eth_is_gether(mdp
) || sh_eth_is_rz_fast_ether(mdp
)) {
895 sh_eth_write(ndev
, EDSR_ENALL
, EDSR
);
896 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) | EDMR_SRST_GETHER
,
899 ret
= sh_eth_check_reset(ndev
);
904 sh_eth_write(ndev
, 0x0, TDLAR
);
905 sh_eth_write(ndev
, 0x0, TDFAR
);
906 sh_eth_write(ndev
, 0x0, TDFXR
);
907 sh_eth_write(ndev
, 0x0, TDFFR
);
908 sh_eth_write(ndev
, 0x0, RDLAR
);
909 sh_eth_write(ndev
, 0x0, RDFAR
);
910 sh_eth_write(ndev
, 0x0, RDFXR
);
911 sh_eth_write(ndev
, 0x0, RDFFR
);
913 /* Reset HW CRC register */
915 sh_eth_write(ndev
, 0x0, CSMR
);
917 /* Select MII mode */
918 if (mdp
->cd
->select_mii
)
919 sh_eth_select_mii(ndev
);
921 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) | EDMR_SRST_ETHER
,
924 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) & ~EDMR_SRST_ETHER
,
931 static void sh_eth_set_receive_align(struct sk_buff
*skb
)
933 uintptr_t reserve
= (uintptr_t)skb
->data
& (SH_ETH_RX_ALIGN
- 1);
936 skb_reserve(skb
, SH_ETH_RX_ALIGN
- reserve
);
940 /* CPU <-> EDMAC endian convert */
941 static inline __u32
cpu_to_edmac(struct sh_eth_private
*mdp
, u32 x
)
943 switch (mdp
->edmac_endian
) {
944 case EDMAC_LITTLE_ENDIAN
:
945 return cpu_to_le32(x
);
946 case EDMAC_BIG_ENDIAN
:
947 return cpu_to_be32(x
);
952 static inline __u32
edmac_to_cpu(struct sh_eth_private
*mdp
, u32 x
)
954 switch (mdp
->edmac_endian
) {
955 case EDMAC_LITTLE_ENDIAN
:
956 return le32_to_cpu(x
);
957 case EDMAC_BIG_ENDIAN
:
958 return be32_to_cpu(x
);
963 /* Program the hardware MAC address from dev->dev_addr. */
964 static void update_mac_address(struct net_device
*ndev
)
967 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
968 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
970 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
973 /* Get MAC address from SuperH MAC address register
975 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
976 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
977 * When you want use this device, you must set MAC address in bootloader.
980 static void read_mac_address(struct net_device
*ndev
, unsigned char *mac
)
982 if (mac
[0] || mac
[1] || mac
[2] || mac
[3] || mac
[4] || mac
[5]) {
983 memcpy(ndev
->dev_addr
, mac
, ETH_ALEN
);
985 ndev
->dev_addr
[0] = (sh_eth_read(ndev
, MAHR
) >> 24);
986 ndev
->dev_addr
[1] = (sh_eth_read(ndev
, MAHR
) >> 16) & 0xFF;
987 ndev
->dev_addr
[2] = (sh_eth_read(ndev
, MAHR
) >> 8) & 0xFF;
988 ndev
->dev_addr
[3] = (sh_eth_read(ndev
, MAHR
) & 0xFF);
989 ndev
->dev_addr
[4] = (sh_eth_read(ndev
, MALR
) >> 8) & 0xFF;
990 ndev
->dev_addr
[5] = (sh_eth_read(ndev
, MALR
) & 0xFF);
994 static u32
sh_eth_get_edtrr_trns(struct sh_eth_private
*mdp
)
996 if (sh_eth_is_gether(mdp
) || sh_eth_is_rz_fast_ether(mdp
))
997 return EDTRR_TRNS_GETHER
;
999 return EDTRR_TRNS_ETHER
;
1003 void (*set_gate
)(void *addr
);
1004 struct mdiobb_ctrl ctrl
;
1006 u32 mmd_msk
;/* MMD */
1013 static void bb_set(void *addr
, u32 msk
)
1015 iowrite32(ioread32(addr
) | msk
, addr
);
1019 static void bb_clr(void *addr
, u32 msk
)
1021 iowrite32((ioread32(addr
) & ~msk
), addr
);
1025 static int bb_read(void *addr
, u32 msk
)
1027 return (ioread32(addr
) & msk
) != 0;
1030 /* Data I/O pin control */
1031 static void sh_mmd_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
1033 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1035 if (bitbang
->set_gate
)
1036 bitbang
->set_gate(bitbang
->addr
);
1039 bb_set(bitbang
->addr
, bitbang
->mmd_msk
);
1041 bb_clr(bitbang
->addr
, bitbang
->mmd_msk
);
1045 static void sh_set_mdio(struct mdiobb_ctrl
*ctrl
, int bit
)
1047 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1049 if (bitbang
->set_gate
)
1050 bitbang
->set_gate(bitbang
->addr
);
1053 bb_set(bitbang
->addr
, bitbang
->mdo_msk
);
1055 bb_clr(bitbang
->addr
, bitbang
->mdo_msk
);
1059 static int sh_get_mdio(struct mdiobb_ctrl
*ctrl
)
1061 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1063 if (bitbang
->set_gate
)
1064 bitbang
->set_gate(bitbang
->addr
);
1066 return bb_read(bitbang
->addr
, bitbang
->mdi_msk
);
1069 /* MDC pin control */
1070 static void sh_mdc_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
1072 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1074 if (bitbang
->set_gate
)
1075 bitbang
->set_gate(bitbang
->addr
);
1078 bb_set(bitbang
->addr
, bitbang
->mdc_msk
);
1080 bb_clr(bitbang
->addr
, bitbang
->mdc_msk
);
1083 /* mdio bus control struct */
1084 static struct mdiobb_ops bb_ops
= {
1085 .owner
= THIS_MODULE
,
1086 .set_mdc
= sh_mdc_ctrl
,
1087 .set_mdio_dir
= sh_mmd_ctrl
,
1088 .set_mdio_data
= sh_set_mdio
,
1089 .get_mdio_data
= sh_get_mdio
,
1092 /* free skb and descriptor buffer */
1093 static void sh_eth_ring_free(struct net_device
*ndev
)
1095 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1098 /* Free Rx skb ringbuffer */
1099 if (mdp
->rx_skbuff
) {
1100 for (i
= 0; i
< mdp
->num_rx_ring
; i
++)
1101 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
1103 kfree(mdp
->rx_skbuff
);
1104 mdp
->rx_skbuff
= NULL
;
1106 /* Free Tx skb ringbuffer */
1107 if (mdp
->tx_skbuff
) {
1108 for (i
= 0; i
< mdp
->num_tx_ring
; i
++)
1109 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
1111 kfree(mdp
->tx_skbuff
);
1112 mdp
->tx_skbuff
= NULL
;
1115 /* format skb and descriptor buffer */
1116 static void sh_eth_ring_format(struct net_device
*ndev
)
1118 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1120 struct sk_buff
*skb
;
1121 struct sh_eth_rxdesc
*rxdesc
= NULL
;
1122 struct sh_eth_txdesc
*txdesc
= NULL
;
1123 int rx_ringsize
= sizeof(*rxdesc
) * mdp
->num_rx_ring
;
1124 int tx_ringsize
= sizeof(*txdesc
) * mdp
->num_tx_ring
;
1125 int skbuff_size
= mdp
->rx_buf_sz
+ SH_ETH_RX_ALIGN
- 1;
1126 dma_addr_t dma_addr
;
1133 memset(mdp
->rx_ring
, 0, rx_ringsize
);
1135 /* build Rx ring buffer */
1136 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
1138 mdp
->rx_skbuff
[i
] = NULL
;
1139 skb
= netdev_alloc_skb(ndev
, skbuff_size
);
1142 sh_eth_set_receive_align(skb
);
1145 rxdesc
= &mdp
->rx_ring
[i
];
1146 /* The size of the buffer is a multiple of 16 bytes. */
1147 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
1148 dma_addr
= dma_map_single(&ndev
->dev
, skb
->data
,
1149 rxdesc
->buffer_length
,
1151 if (dma_mapping_error(&ndev
->dev
, dma_addr
)) {
1155 mdp
->rx_skbuff
[i
] = skb
;
1156 rxdesc
->addr
= dma_addr
;
1157 rxdesc
->status
= cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
1159 /* Rx descriptor address set */
1161 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDLAR
);
1162 if (sh_eth_is_gether(mdp
) ||
1163 sh_eth_is_rz_fast_ether(mdp
))
1164 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDFAR
);
1168 mdp
->dirty_rx
= (u32
) (i
- mdp
->num_rx_ring
);
1170 /* Mark the last entry as wrapping the ring. */
1171 rxdesc
->status
|= cpu_to_edmac(mdp
, RD_RDEL
);
1173 memset(mdp
->tx_ring
, 0, tx_ringsize
);
1175 /* build Tx ring buffer */
1176 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
1177 mdp
->tx_skbuff
[i
] = NULL
;
1178 txdesc
= &mdp
->tx_ring
[i
];
1179 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
1180 txdesc
->buffer_length
= 0;
1182 /* Tx descriptor address set */
1183 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDLAR
);
1184 if (sh_eth_is_gether(mdp
) ||
1185 sh_eth_is_rz_fast_ether(mdp
))
1186 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDFAR
);
1190 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
1193 /* Get skb and descriptor buffer */
1194 static int sh_eth_ring_init(struct net_device
*ndev
)
1196 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1197 int rx_ringsize
, tx_ringsize
, ret
= 0;
1199 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1200 * card needs room to do 8 byte alignment, +2 so we can reserve
1201 * the first 2 bytes, and +16 gets room for the status word from the
1204 mdp
->rx_buf_sz
= (ndev
->mtu
<= 1492 ? PKT_BUF_SZ
:
1205 (((ndev
->mtu
+ 26 + 7) & ~7) + 2 + 16));
1206 if (mdp
->cd
->rpadir
)
1207 mdp
->rx_buf_sz
+= NET_IP_ALIGN
;
1209 /* Allocate RX and TX skb rings */
1210 mdp
->rx_skbuff
= kmalloc_array(mdp
->num_rx_ring
,
1211 sizeof(*mdp
->rx_skbuff
), GFP_KERNEL
);
1212 if (!mdp
->rx_skbuff
) {
1217 mdp
->tx_skbuff
= kmalloc_array(mdp
->num_tx_ring
,
1218 sizeof(*mdp
->tx_skbuff
), GFP_KERNEL
);
1219 if (!mdp
->tx_skbuff
) {
1224 /* Allocate all Rx descriptors. */
1225 rx_ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1226 mdp
->rx_ring
= dma_alloc_coherent(NULL
, rx_ringsize
, &mdp
->rx_desc_dma
,
1228 if (!mdp
->rx_ring
) {
1230 goto desc_ring_free
;
1235 /* Allocate all Tx descriptors. */
1236 tx_ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1237 mdp
->tx_ring
= dma_alloc_coherent(NULL
, tx_ringsize
, &mdp
->tx_desc_dma
,
1239 if (!mdp
->tx_ring
) {
1241 goto desc_ring_free
;
1246 /* free DMA buffer */
1247 dma_free_coherent(NULL
, rx_ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
1250 /* Free Rx and Tx skb ring buffer */
1251 sh_eth_ring_free(ndev
);
1252 mdp
->tx_ring
= NULL
;
1253 mdp
->rx_ring
= NULL
;
1258 static void sh_eth_free_dma_buffer(struct sh_eth_private
*mdp
)
1263 ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1264 dma_free_coherent(NULL
, ringsize
, mdp
->rx_ring
,
1266 mdp
->rx_ring
= NULL
;
1270 ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1271 dma_free_coherent(NULL
, ringsize
, mdp
->tx_ring
,
1273 mdp
->tx_ring
= NULL
;
1277 static int sh_eth_dev_init(struct net_device
*ndev
, bool start
)
1280 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1284 ret
= sh_eth_reset(ndev
);
1288 if (mdp
->cd
->rmiimode
)
1289 sh_eth_write(ndev
, 0x1, RMIIMODE
);
1291 /* Descriptor format */
1292 sh_eth_ring_format(ndev
);
1293 if (mdp
->cd
->rpadir
)
1294 sh_eth_write(ndev
, mdp
->cd
->rpadir_value
, RPADIR
);
1296 /* all sh_eth int mask */
1297 sh_eth_write(ndev
, 0, EESIPR
);
1299 #if defined(__LITTLE_ENDIAN)
1300 if (mdp
->cd
->hw_swap
)
1301 sh_eth_write(ndev
, EDMR_EL
, EDMR
);
1304 sh_eth_write(ndev
, 0, EDMR
);
1307 sh_eth_write(ndev
, mdp
->cd
->fdr_value
, FDR
);
1308 sh_eth_write(ndev
, 0, TFTR
);
1310 /* Frame recv control (enable multiple-packets per rx irq) */
1311 sh_eth_write(ndev
, RMCR_RNC
, RMCR
);
1313 sh_eth_write(ndev
, mdp
->cd
->trscer_err_mask
, TRSCER
);
1316 sh_eth_write(ndev
, 0x800, BCULR
); /* Burst sycle set */
1318 sh_eth_write(ndev
, mdp
->cd
->fcftr_value
, FCFTR
);
1320 if (!mdp
->cd
->no_trimd
)
1321 sh_eth_write(ndev
, 0, TRIMD
);
1323 /* Recv frame limit set register */
1324 sh_eth_write(ndev
, ndev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
,
1327 sh_eth_write(ndev
, sh_eth_read(ndev
, EESR
), EESR
);
1329 mdp
->irq_enabled
= true;
1330 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1333 /* PAUSE Prohibition */
1334 val
= (sh_eth_read(ndev
, ECMR
) & ECMR_DM
) |
1335 ECMR_ZPF
| (mdp
->duplex
? ECMR_DM
: 0) | ECMR_TE
| ECMR_RE
;
1337 sh_eth_write(ndev
, val
, ECMR
);
1339 if (mdp
->cd
->set_rate
)
1340 mdp
->cd
->set_rate(ndev
);
1342 /* E-MAC Status Register clear */
1343 sh_eth_write(ndev
, mdp
->cd
->ecsr_value
, ECSR
);
1345 /* E-MAC Interrupt Enable register */
1347 sh_eth_write(ndev
, mdp
->cd
->ecsipr_value
, ECSIPR
);
1349 /* Set MAC address */
1350 update_mac_address(ndev
);
1354 sh_eth_write(ndev
, APR_AP
, APR
);
1356 sh_eth_write(ndev
, MPR_MP
, MPR
);
1357 if (mdp
->cd
->tpauser
)
1358 sh_eth_write(ndev
, TPAUSER_UNLIMITED
, TPAUSER
);
1361 /* Setting the Rx mode will start the Rx process. */
1362 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1364 netif_start_queue(ndev
);
1370 static void sh_eth_dev_exit(struct net_device
*ndev
)
1372 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1375 /* Deactivate all TX descriptors, so DMA should stop at next
1376 * packet boundary if it's currently running
1378 for (i
= 0; i
< mdp
->num_tx_ring
; i
++)
1379 mdp
->tx_ring
[i
].status
&= ~cpu_to_edmac(mdp
, TD_TACT
);
1381 /* Disable TX FIFO egress to MAC */
1382 sh_eth_rcv_snd_disable(ndev
);
1384 /* Stop RX DMA at next packet boundary */
1385 sh_eth_write(ndev
, 0, EDRRR
);
1387 /* Aside from TX DMA, we can't tell when the hardware is
1388 * really stopped, so we need to reset to make sure.
1389 * Before doing that, wait for long enough to *probably*
1390 * finish transmitting the last packet and poll stats.
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev
);
1397 /* free Tx skb function */
1398 static int sh_eth_txfree(struct net_device
*ndev
)
1400 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1401 struct sh_eth_txdesc
*txdesc
;
1405 for (; mdp
->cur_tx
- mdp
->dirty_tx
> 0; mdp
->dirty_tx
++) {
1406 entry
= mdp
->dirty_tx
% mdp
->num_tx_ring
;
1407 txdesc
= &mdp
->tx_ring
[entry
];
1408 if (txdesc
->status
& cpu_to_edmac(mdp
, TD_TACT
))
1410 /* Free the original skb. */
1411 if (mdp
->tx_skbuff
[entry
]) {
1412 dma_unmap_single(&ndev
->dev
, txdesc
->addr
,
1413 txdesc
->buffer_length
, DMA_TO_DEVICE
);
1414 dev_kfree_skb_irq(mdp
->tx_skbuff
[entry
]);
1415 mdp
->tx_skbuff
[entry
] = NULL
;
1418 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
1419 if (entry
>= mdp
->num_tx_ring
- 1)
1420 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
1422 ndev
->stats
.tx_packets
++;
1423 ndev
->stats
.tx_bytes
+= txdesc
->buffer_length
;
1428 /* Packet receive function */
1429 static int sh_eth_rx(struct net_device
*ndev
, u32 intr_status
, int *quota
)
1431 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1432 struct sh_eth_rxdesc
*rxdesc
;
1434 int entry
= mdp
->cur_rx
% mdp
->num_rx_ring
;
1435 int boguscnt
= (mdp
->dirty_rx
+ mdp
->num_rx_ring
) - mdp
->cur_rx
;
1437 struct sk_buff
*skb
;
1440 int skbuff_size
= mdp
->rx_buf_sz
+ SH_ETH_RX_ALIGN
- 1;
1441 dma_addr_t dma_addr
;
1443 boguscnt
= min(boguscnt
, *quota
);
1445 rxdesc
= &mdp
->rx_ring
[entry
];
1446 while (!(rxdesc
->status
& cpu_to_edmac(mdp
, RD_RACT
))) {
1447 desc_status
= edmac_to_cpu(mdp
, rxdesc
->status
);
1448 pkt_len
= rxdesc
->frame_length
;
1453 if (!(desc_status
& RDFEND
))
1454 ndev
->stats
.rx_length_errors
++;
1456 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1457 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1458 * bit 0. However, in case of the R8A7740, R8A779x, and
1459 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1460 * driver needs right shifting by 16.
1462 if (mdp
->cd
->shift_rd0
)
1465 if (desc_status
& (RD_RFS1
| RD_RFS2
| RD_RFS3
| RD_RFS4
|
1466 RD_RFS5
| RD_RFS6
| RD_RFS10
)) {
1467 ndev
->stats
.rx_errors
++;
1468 if (desc_status
& RD_RFS1
)
1469 ndev
->stats
.rx_crc_errors
++;
1470 if (desc_status
& RD_RFS2
)
1471 ndev
->stats
.rx_frame_errors
++;
1472 if (desc_status
& RD_RFS3
)
1473 ndev
->stats
.rx_length_errors
++;
1474 if (desc_status
& RD_RFS4
)
1475 ndev
->stats
.rx_length_errors
++;
1476 if (desc_status
& RD_RFS6
)
1477 ndev
->stats
.rx_missed_errors
++;
1478 if (desc_status
& RD_RFS10
)
1479 ndev
->stats
.rx_over_errors
++;
1481 if (!mdp
->cd
->hw_swap
)
1483 phys_to_virt(ALIGN(rxdesc
->addr
, 4)),
1485 skb
= mdp
->rx_skbuff
[entry
];
1486 mdp
->rx_skbuff
[entry
] = NULL
;
1487 if (mdp
->cd
->rpadir
)
1488 skb_reserve(skb
, NET_IP_ALIGN
);
1489 dma_unmap_single(&ndev
->dev
, rxdesc
->addr
,
1490 ALIGN(mdp
->rx_buf_sz
, 16),
1492 skb_put(skb
, pkt_len
);
1493 skb
->protocol
= eth_type_trans(skb
, ndev
);
1494 netif_receive_skb(skb
);
1495 ndev
->stats
.rx_packets
++;
1496 ndev
->stats
.rx_bytes
+= pkt_len
;
1498 entry
= (++mdp
->cur_rx
) % mdp
->num_rx_ring
;
1499 rxdesc
= &mdp
->rx_ring
[entry
];
1502 /* Refill the Rx ring buffers. */
1503 for (; mdp
->cur_rx
- mdp
->dirty_rx
> 0; mdp
->dirty_rx
++) {
1504 entry
= mdp
->dirty_rx
% mdp
->num_rx_ring
;
1505 rxdesc
= &mdp
->rx_ring
[entry
];
1506 /* The size of the buffer is 16 byte boundary. */
1507 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
1509 if (mdp
->rx_skbuff
[entry
] == NULL
) {
1510 skb
= netdev_alloc_skb(ndev
, skbuff_size
);
1512 break; /* Better luck next round. */
1513 sh_eth_set_receive_align(skb
);
1514 dma_addr
= dma_map_single(&ndev
->dev
, skb
->data
,
1515 rxdesc
->buffer_length
,
1517 if (dma_mapping_error(&ndev
->dev
, dma_addr
)) {
1521 mdp
->rx_skbuff
[entry
] = skb
;
1523 skb_checksum_none_assert(skb
);
1524 rxdesc
->addr
= dma_addr
;
1526 if (entry
>= mdp
->num_rx_ring
- 1)
1528 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
| RD_RDEL
);
1531 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
1534 /* Restart Rx engine if stopped. */
1535 /* If we don't need to check status, don't. -KDU */
1536 if (!(sh_eth_read(ndev
, EDRRR
) & EDRRR_R
)) {
1537 /* fix the values for the next receiving if RDE is set */
1538 if (intr_status
& EESR_RDE
) {
1539 u32 count
= (sh_eth_read(ndev
, RDFAR
) -
1540 sh_eth_read(ndev
, RDLAR
)) >> 4;
1542 mdp
->cur_rx
= count
;
1543 mdp
->dirty_rx
= count
;
1545 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1548 *quota
-= limit
- boguscnt
- 1;
1553 static void sh_eth_rcv_snd_disable(struct net_device
*ndev
)
1555 /* disable tx and rx */
1556 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) &
1557 ~(ECMR_RE
| ECMR_TE
), ECMR
);
1560 static void sh_eth_rcv_snd_enable(struct net_device
*ndev
)
1562 /* enable tx and rx */
1563 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) |
1564 (ECMR_RE
| ECMR_TE
), ECMR
);
1567 /* error control function */
1568 static void sh_eth_error(struct net_device
*ndev
, u32 intr_status
)
1570 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1575 if (intr_status
& EESR_ECI
) {
1576 felic_stat
= sh_eth_read(ndev
, ECSR
);
1577 sh_eth_write(ndev
, felic_stat
, ECSR
); /* clear int */
1578 if (felic_stat
& ECSR_ICD
)
1579 ndev
->stats
.tx_carrier_errors
++;
1580 if (felic_stat
& ECSR_LCHNG
) {
1582 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
) {
1585 link_stat
= (sh_eth_read(ndev
, PSR
));
1586 if (mdp
->ether_link_active_low
)
1587 link_stat
= ~link_stat
;
1589 if (!(link_stat
& PHY_ST_LINK
)) {
1590 sh_eth_rcv_snd_disable(ndev
);
1593 sh_eth_write(ndev
, sh_eth_read(ndev
, EESIPR
) &
1594 ~DMAC_M_ECI
, EESIPR
);
1596 sh_eth_write(ndev
, sh_eth_read(ndev
, ECSR
),
1598 sh_eth_write(ndev
, sh_eth_read(ndev
, EESIPR
) |
1599 DMAC_M_ECI
, EESIPR
);
1600 /* enable tx and rx */
1601 sh_eth_rcv_snd_enable(ndev
);
1607 if (intr_status
& EESR_TWB
) {
1608 /* Unused write back interrupt */
1609 if (intr_status
& EESR_TABT
) { /* Transmit Abort int */
1610 ndev
->stats
.tx_aborted_errors
++;
1611 netif_err(mdp
, tx_err
, ndev
, "Transmit Abort\n");
1615 if (intr_status
& EESR_RABT
) {
1616 /* Receive Abort int */
1617 if (intr_status
& EESR_RFRMER
) {
1618 /* Receive Frame Overflow int */
1619 ndev
->stats
.rx_frame_errors
++;
1623 if (intr_status
& EESR_TDE
) {
1624 /* Transmit Descriptor Empty int */
1625 ndev
->stats
.tx_fifo_errors
++;
1626 netif_err(mdp
, tx_err
, ndev
, "Transmit Descriptor Empty\n");
1629 if (intr_status
& EESR_TFE
) {
1630 /* FIFO under flow */
1631 ndev
->stats
.tx_fifo_errors
++;
1632 netif_err(mdp
, tx_err
, ndev
, "Transmit FIFO Under flow\n");
1635 if (intr_status
& EESR_RDE
) {
1636 /* Receive Descriptor Empty int */
1637 ndev
->stats
.rx_over_errors
++;
1640 if (intr_status
& EESR_RFE
) {
1641 /* Receive FIFO Overflow int */
1642 ndev
->stats
.rx_fifo_errors
++;
1645 if (!mdp
->cd
->no_ade
&& (intr_status
& EESR_ADE
)) {
1647 ndev
->stats
.tx_fifo_errors
++;
1648 netif_err(mdp
, tx_err
, ndev
, "Address Error\n");
1651 mask
= EESR_TWB
| EESR_TABT
| EESR_ADE
| EESR_TDE
| EESR_TFE
;
1652 if (mdp
->cd
->no_ade
)
1654 if (intr_status
& mask
) {
1656 u32 edtrr
= sh_eth_read(ndev
, EDTRR
);
1659 netdev_err(ndev
, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1660 intr_status
, mdp
->cur_tx
, mdp
->dirty_tx
,
1661 (u32
)ndev
->state
, edtrr
);
1662 /* dirty buffer free */
1663 sh_eth_txfree(ndev
);
1666 if (edtrr
^ sh_eth_get_edtrr_trns(mdp
)) {
1668 sh_eth_write(ndev
, sh_eth_get_edtrr_trns(mdp
), EDTRR
);
1671 netif_wake_queue(ndev
);
1675 static irqreturn_t
sh_eth_interrupt(int irq
, void *netdev
)
1677 struct net_device
*ndev
= netdev
;
1678 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1679 struct sh_eth_cpu_data
*cd
= mdp
->cd
;
1680 irqreturn_t ret
= IRQ_NONE
;
1681 u32 intr_status
, intr_enable
;
1683 spin_lock(&mdp
->lock
);
1685 /* Get interrupt status */
1686 intr_status
= sh_eth_read(ndev
, EESR
);
1687 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1688 * enabled since it's the one that comes thru regardless of the mask,
1689 * and we need to fully handle it in sh_eth_error() in order to quench
1690 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1692 intr_enable
= sh_eth_read(ndev
, EESIPR
);
1693 intr_status
&= intr_enable
| DMAC_M_ECI
;
1694 if (intr_status
& (EESR_RX_CHECK
| cd
->tx_check
| cd
->eesr_err_check
))
1699 if (!likely(mdp
->irq_enabled
)) {
1700 sh_eth_write(ndev
, 0, EESIPR
);
1704 if (intr_status
& EESR_RX_CHECK
) {
1705 if (napi_schedule_prep(&mdp
->napi
)) {
1706 /* Mask Rx interrupts */
1707 sh_eth_write(ndev
, intr_enable
& ~EESR_RX_CHECK
,
1709 __napi_schedule(&mdp
->napi
);
1712 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1713 intr_status
, intr_enable
);
1718 if (intr_status
& cd
->tx_check
) {
1719 /* Clear Tx interrupts */
1720 sh_eth_write(ndev
, intr_status
& cd
->tx_check
, EESR
);
1722 sh_eth_txfree(ndev
);
1723 netif_wake_queue(ndev
);
1726 if (intr_status
& cd
->eesr_err_check
) {
1727 /* Clear error interrupts */
1728 sh_eth_write(ndev
, intr_status
& cd
->eesr_err_check
, EESR
);
1730 sh_eth_error(ndev
, intr_status
);
1734 spin_unlock(&mdp
->lock
);
1739 static int sh_eth_poll(struct napi_struct
*napi
, int budget
)
1741 struct sh_eth_private
*mdp
= container_of(napi
, struct sh_eth_private
,
1743 struct net_device
*ndev
= napi
->dev
;
1748 intr_status
= sh_eth_read(ndev
, EESR
);
1749 if (!(intr_status
& EESR_RX_CHECK
))
1751 /* Clear Rx interrupts */
1752 sh_eth_write(ndev
, intr_status
& EESR_RX_CHECK
, EESR
);
1754 if (sh_eth_rx(ndev
, intr_status
, "a
))
1758 napi_complete(napi
);
1760 /* Reenable Rx interrupts */
1761 if (mdp
->irq_enabled
)
1762 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1764 return budget
- quota
;
1767 /* PHY state control function */
1768 static void sh_eth_adjust_link(struct net_device
*ndev
)
1770 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1771 struct phy_device
*phydev
= mdp
->phydev
;
1775 if (phydev
->duplex
!= mdp
->duplex
) {
1777 mdp
->duplex
= phydev
->duplex
;
1778 if (mdp
->cd
->set_duplex
)
1779 mdp
->cd
->set_duplex(ndev
);
1782 if (phydev
->speed
!= mdp
->speed
) {
1784 mdp
->speed
= phydev
->speed
;
1785 if (mdp
->cd
->set_rate
)
1786 mdp
->cd
->set_rate(ndev
);
1790 sh_eth_read(ndev
, ECMR
) & ~ECMR_TXF
,
1793 mdp
->link
= phydev
->link
;
1794 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1795 sh_eth_rcv_snd_enable(ndev
);
1797 } else if (mdp
->link
) {
1802 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1803 sh_eth_rcv_snd_disable(ndev
);
1806 if (new_state
&& netif_msg_link(mdp
))
1807 phy_print_status(phydev
);
1810 /* PHY init function */
1811 static int sh_eth_phy_init(struct net_device
*ndev
)
1813 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
1814 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1815 struct phy_device
*phydev
= NULL
;
1821 /* Try connect to PHY */
1823 struct device_node
*pn
;
1825 pn
= of_parse_phandle(np
, "phy-handle", 0);
1826 phydev
= of_phy_connect(ndev
, pn
,
1827 sh_eth_adjust_link
, 0,
1828 mdp
->phy_interface
);
1831 phydev
= ERR_PTR(-ENOENT
);
1833 char phy_id
[MII_BUS_ID_SIZE
+ 3];
1835 snprintf(phy_id
, sizeof(phy_id
), PHY_ID_FMT
,
1836 mdp
->mii_bus
->id
, mdp
->phy_id
);
1838 phydev
= phy_connect(ndev
, phy_id
, sh_eth_adjust_link
,
1839 mdp
->phy_interface
);
1842 if (IS_ERR(phydev
)) {
1843 netdev_err(ndev
, "failed to connect PHY\n");
1844 return PTR_ERR(phydev
);
1847 netdev_info(ndev
, "attached PHY %d (IRQ %d) to driver %s\n",
1848 phydev
->addr
, phydev
->irq
, phydev
->drv
->name
);
1850 mdp
->phydev
= phydev
;
1855 /* PHY control start function */
1856 static int sh_eth_phy_start(struct net_device
*ndev
)
1858 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1861 ret
= sh_eth_phy_init(ndev
);
1865 phy_start(mdp
->phydev
);
1870 static int sh_eth_get_settings(struct net_device
*ndev
,
1871 struct ethtool_cmd
*ecmd
)
1873 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1874 unsigned long flags
;
1880 spin_lock_irqsave(&mdp
->lock
, flags
);
1881 ret
= phy_ethtool_gset(mdp
->phydev
, ecmd
);
1882 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1887 static int sh_eth_set_settings(struct net_device
*ndev
,
1888 struct ethtool_cmd
*ecmd
)
1890 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1891 unsigned long flags
;
1897 spin_lock_irqsave(&mdp
->lock
, flags
);
1899 /* disable tx and rx */
1900 sh_eth_rcv_snd_disable(ndev
);
1902 ret
= phy_ethtool_sset(mdp
->phydev
, ecmd
);
1906 if (ecmd
->duplex
== DUPLEX_FULL
)
1911 if (mdp
->cd
->set_duplex
)
1912 mdp
->cd
->set_duplex(ndev
);
1917 /* enable tx and rx */
1918 sh_eth_rcv_snd_enable(ndev
);
1920 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1925 static int sh_eth_nway_reset(struct net_device
*ndev
)
1927 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1928 unsigned long flags
;
1934 spin_lock_irqsave(&mdp
->lock
, flags
);
1935 ret
= phy_start_aneg(mdp
->phydev
);
1936 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1941 static u32
sh_eth_get_msglevel(struct net_device
*ndev
)
1943 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1944 return mdp
->msg_enable
;
1947 static void sh_eth_set_msglevel(struct net_device
*ndev
, u32 value
)
1949 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1950 mdp
->msg_enable
= value
;
1953 static const char sh_eth_gstrings_stats
[][ETH_GSTRING_LEN
] = {
1954 "rx_current", "tx_current",
1955 "rx_dirty", "tx_dirty",
1957 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1959 static int sh_eth_get_sset_count(struct net_device
*netdev
, int sset
)
1963 return SH_ETH_STATS_LEN
;
1969 static void sh_eth_get_ethtool_stats(struct net_device
*ndev
,
1970 struct ethtool_stats
*stats
, u64
*data
)
1972 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1975 /* device-specific stats */
1976 data
[i
++] = mdp
->cur_rx
;
1977 data
[i
++] = mdp
->cur_tx
;
1978 data
[i
++] = mdp
->dirty_rx
;
1979 data
[i
++] = mdp
->dirty_tx
;
1982 static void sh_eth_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
1984 switch (stringset
) {
1986 memcpy(data
, *sh_eth_gstrings_stats
,
1987 sizeof(sh_eth_gstrings_stats
));
1992 static void sh_eth_get_ringparam(struct net_device
*ndev
,
1993 struct ethtool_ringparam
*ring
)
1995 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1997 ring
->rx_max_pending
= RX_RING_MAX
;
1998 ring
->tx_max_pending
= TX_RING_MAX
;
1999 ring
->rx_pending
= mdp
->num_rx_ring
;
2000 ring
->tx_pending
= mdp
->num_tx_ring
;
2003 static int sh_eth_set_ringparam(struct net_device
*ndev
,
2004 struct ethtool_ringparam
*ring
)
2006 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2009 if (ring
->tx_pending
> TX_RING_MAX
||
2010 ring
->rx_pending
> RX_RING_MAX
||
2011 ring
->tx_pending
< TX_RING_MIN
||
2012 ring
->rx_pending
< RX_RING_MIN
)
2014 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
2017 if (netif_running(ndev
)) {
2018 netif_device_detach(ndev
);
2019 netif_tx_disable(ndev
);
2021 /* Serialise with the interrupt handler and NAPI, then
2022 * disable interrupts. We have to clear the
2023 * irq_enabled flag first to ensure that interrupts
2024 * won't be re-enabled.
2026 mdp
->irq_enabled
= false;
2027 synchronize_irq(ndev
->irq
);
2028 napi_synchronize(&mdp
->napi
);
2029 sh_eth_write(ndev
, 0x0000, EESIPR
);
2031 sh_eth_dev_exit(ndev
);
2033 /* Free all the skbuffs in the Rx queue. */
2034 sh_eth_ring_free(ndev
);
2035 /* Free DMA buffer */
2036 sh_eth_free_dma_buffer(mdp
);
2039 /* Set new parameters */
2040 mdp
->num_rx_ring
= ring
->rx_pending
;
2041 mdp
->num_tx_ring
= ring
->tx_pending
;
2043 if (netif_running(ndev
)) {
2044 ret
= sh_eth_ring_init(ndev
);
2046 netdev_err(ndev
, "%s: sh_eth_ring_init failed.\n",
2050 ret
= sh_eth_dev_init(ndev
, false);
2052 netdev_err(ndev
, "%s: sh_eth_dev_init failed.\n",
2057 mdp
->irq_enabled
= true;
2058 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
2059 /* Setting the Rx mode will start the Rx process. */
2060 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
2061 netif_device_attach(ndev
);
2067 static const struct ethtool_ops sh_eth_ethtool_ops
= {
2068 .get_settings
= sh_eth_get_settings
,
2069 .set_settings
= sh_eth_set_settings
,
2070 .nway_reset
= sh_eth_nway_reset
,
2071 .get_msglevel
= sh_eth_get_msglevel
,
2072 .set_msglevel
= sh_eth_set_msglevel
,
2073 .get_link
= ethtool_op_get_link
,
2074 .get_strings
= sh_eth_get_strings
,
2075 .get_ethtool_stats
= sh_eth_get_ethtool_stats
,
2076 .get_sset_count
= sh_eth_get_sset_count
,
2077 .get_ringparam
= sh_eth_get_ringparam
,
2078 .set_ringparam
= sh_eth_set_ringparam
,
2081 /* network device open function */
2082 static int sh_eth_open(struct net_device
*ndev
)
2085 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2087 pm_runtime_get_sync(&mdp
->pdev
->dev
);
2089 napi_enable(&mdp
->napi
);
2091 ret
= request_irq(ndev
->irq
, sh_eth_interrupt
,
2092 mdp
->cd
->irq_flags
, ndev
->name
, ndev
);
2094 netdev_err(ndev
, "Can not assign IRQ number\n");
2098 /* Descriptor set */
2099 ret
= sh_eth_ring_init(ndev
);
2104 ret
= sh_eth_dev_init(ndev
, true);
2108 /* PHY control start*/
2109 ret
= sh_eth_phy_start(ndev
);
2118 free_irq(ndev
->irq
, ndev
);
2120 napi_disable(&mdp
->napi
);
2121 pm_runtime_put_sync(&mdp
->pdev
->dev
);
2125 /* Timeout function */
2126 static void sh_eth_tx_timeout(struct net_device
*ndev
)
2128 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2129 struct sh_eth_rxdesc
*rxdesc
;
2132 netif_stop_queue(ndev
);
2134 netif_err(mdp
, timer
, ndev
,
2135 "transmit timed out, status %8.8x, resetting...\n",
2136 sh_eth_read(ndev
, EESR
));
2138 /* tx_errors count up */
2139 ndev
->stats
.tx_errors
++;
2141 /* Free all the skbuffs in the Rx queue. */
2142 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
2143 rxdesc
= &mdp
->rx_ring
[i
];
2145 rxdesc
->addr
= 0xBADF00D0;
2146 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
2147 mdp
->rx_skbuff
[i
] = NULL
;
2149 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
2150 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
2151 mdp
->tx_skbuff
[i
] = NULL
;
2155 sh_eth_dev_init(ndev
, true);
2158 /* Packet transmit function */
2159 static int sh_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
2161 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2162 struct sh_eth_txdesc
*txdesc
;
2164 unsigned long flags
;
2166 spin_lock_irqsave(&mdp
->lock
, flags
);
2167 if ((mdp
->cur_tx
- mdp
->dirty_tx
) >= (mdp
->num_tx_ring
- 4)) {
2168 if (!sh_eth_txfree(ndev
)) {
2169 netif_warn(mdp
, tx_queued
, ndev
, "TxFD exhausted.\n");
2170 netif_stop_queue(ndev
);
2171 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2172 return NETDEV_TX_BUSY
;
2175 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2177 if (skb_padto(skb
, ETH_ZLEN
))
2178 return NETDEV_TX_OK
;
2180 entry
= mdp
->cur_tx
% mdp
->num_tx_ring
;
2181 mdp
->tx_skbuff
[entry
] = skb
;
2182 txdesc
= &mdp
->tx_ring
[entry
];
2184 if (!mdp
->cd
->hw_swap
)
2185 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc
->addr
, 4)),
2187 txdesc
->addr
= dma_map_single(&ndev
->dev
, skb
->data
, skb
->len
,
2189 if (dma_mapping_error(&ndev
->dev
, txdesc
->addr
)) {
2191 return NETDEV_TX_OK
;
2193 txdesc
->buffer_length
= skb
->len
;
2195 if (entry
>= mdp
->num_tx_ring
- 1)
2196 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
| TD_TDLE
);
2198 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
);
2202 if (!(sh_eth_read(ndev
, EDTRR
) & sh_eth_get_edtrr_trns(mdp
)))
2203 sh_eth_write(ndev
, sh_eth_get_edtrr_trns(mdp
), EDTRR
);
2205 return NETDEV_TX_OK
;
2208 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
)
2210 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2212 if (sh_eth_is_rz_fast_ether(mdp
))
2213 return &ndev
->stats
;
2215 if (!mdp
->is_opened
)
2216 return &ndev
->stats
;
2218 ndev
->stats
.tx_dropped
+= sh_eth_read(ndev
, TROCR
);
2219 sh_eth_write(ndev
, 0, TROCR
); /* (write clear) */
2220 ndev
->stats
.collisions
+= sh_eth_read(ndev
, CDCR
);
2221 sh_eth_write(ndev
, 0, CDCR
); /* (write clear) */
2222 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, LCCR
);
2223 sh_eth_write(ndev
, 0, LCCR
); /* (write clear) */
2225 if (sh_eth_is_gether(mdp
)) {
2226 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CERCR
);
2227 sh_eth_write(ndev
, 0, CERCR
); /* (write clear) */
2228 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CEECR
);
2229 sh_eth_write(ndev
, 0, CEECR
); /* (write clear) */
2231 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CNDCR
);
2232 sh_eth_write(ndev
, 0, CNDCR
); /* (write clear) */
2235 return &ndev
->stats
;
2238 /* device close function */
2239 static int sh_eth_close(struct net_device
*ndev
)
2241 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2243 netif_stop_queue(ndev
);
2245 /* Serialise with the interrupt handler and NAPI, then disable
2246 * interrupts. We have to clear the irq_enabled flag first to
2247 * ensure that interrupts won't be re-enabled.
2249 mdp
->irq_enabled
= false;
2250 synchronize_irq(ndev
->irq
);
2251 napi_disable(&mdp
->napi
);
2252 sh_eth_write(ndev
, 0x0000, EESIPR
);
2254 sh_eth_dev_exit(ndev
);
2256 /* PHY Disconnect */
2258 phy_stop(mdp
->phydev
);
2259 phy_disconnect(mdp
->phydev
);
2263 free_irq(ndev
->irq
, ndev
);
2265 /* Free all the skbuffs in the Rx queue. */
2266 sh_eth_ring_free(ndev
);
2268 /* free DMA buffer */
2269 sh_eth_free_dma_buffer(mdp
);
2271 pm_runtime_put_sync(&mdp
->pdev
->dev
);
2278 /* ioctl to device function */
2279 static int sh_eth_do_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2281 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2282 struct phy_device
*phydev
= mdp
->phydev
;
2284 if (!netif_running(ndev
))
2290 return phy_mii_ioctl(phydev
, rq
, cmd
);
2293 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2294 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private
*mdp
,
2297 return sh_eth_tsu_get_offset(mdp
, TSU_POST1
) + (entry
/ 8 * 4);
2300 static u32
sh_eth_tsu_get_post_mask(int entry
)
2302 return 0x0f << (28 - ((entry
% 8) * 4));
2305 static u32
sh_eth_tsu_get_post_bit(struct sh_eth_private
*mdp
, int entry
)
2307 return (0x08 >> (mdp
->port
<< 1)) << (28 - ((entry
% 8) * 4));
2310 static void sh_eth_tsu_enable_cam_entry_post(struct net_device
*ndev
,
2313 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2317 reg_offset
= sh_eth_tsu_get_post_reg_offset(mdp
, entry
);
2318 tmp
= ioread32(reg_offset
);
2319 iowrite32(tmp
| sh_eth_tsu_get_post_bit(mdp
, entry
), reg_offset
);
2322 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device
*ndev
,
2325 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2326 u32 post_mask
, ref_mask
, tmp
;
2329 reg_offset
= sh_eth_tsu_get_post_reg_offset(mdp
, entry
);
2330 post_mask
= sh_eth_tsu_get_post_mask(entry
);
2331 ref_mask
= sh_eth_tsu_get_post_bit(mdp
, entry
) & ~post_mask
;
2333 tmp
= ioread32(reg_offset
);
2334 iowrite32(tmp
& ~post_mask
, reg_offset
);
2336 /* If other port enables, the function returns "true" */
2337 return tmp
& ref_mask
;
2340 static int sh_eth_tsu_busy(struct net_device
*ndev
)
2342 int timeout
= SH_ETH_TSU_TIMEOUT_MS
* 100;
2343 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2345 while ((sh_eth_tsu_read(mdp
, TSU_ADSBSY
) & TSU_ADSBSY_0
)) {
2349 netdev_err(ndev
, "%s: timeout\n", __func__
);
2357 static int sh_eth_tsu_write_entry(struct net_device
*ndev
, void *reg
,
2362 val
= addr
[0] << 24 | addr
[1] << 16 | addr
[2] << 8 | addr
[3];
2363 iowrite32(val
, reg
);
2364 if (sh_eth_tsu_busy(ndev
) < 0)
2367 val
= addr
[4] << 8 | addr
[5];
2368 iowrite32(val
, reg
+ 4);
2369 if (sh_eth_tsu_busy(ndev
) < 0)
2375 static void sh_eth_tsu_read_entry(void *reg
, u8
*addr
)
2379 val
= ioread32(reg
);
2380 addr
[0] = (val
>> 24) & 0xff;
2381 addr
[1] = (val
>> 16) & 0xff;
2382 addr
[2] = (val
>> 8) & 0xff;
2383 addr
[3] = val
& 0xff;
2384 val
= ioread32(reg
+ 4);
2385 addr
[4] = (val
>> 8) & 0xff;
2386 addr
[5] = val
& 0xff;
2390 static int sh_eth_tsu_find_entry(struct net_device
*ndev
, const u8
*addr
)
2392 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2393 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2395 u8 c_addr
[ETH_ALEN
];
2397 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2398 sh_eth_tsu_read_entry(reg_offset
, c_addr
);
2399 if (ether_addr_equal(addr
, c_addr
))
2406 static int sh_eth_tsu_find_empty(struct net_device
*ndev
)
2411 memset(blank
, 0, sizeof(blank
));
2412 entry
= sh_eth_tsu_find_entry(ndev
, blank
);
2413 return (entry
< 0) ? -ENOMEM
: entry
;
2416 static int sh_eth_tsu_disable_cam_entry_table(struct net_device
*ndev
,
2419 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2420 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2424 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) &
2425 ~(1 << (31 - entry
)), TSU_TEN
);
2427 memset(blank
, 0, sizeof(blank
));
2428 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ entry
* 8, blank
);
2434 static int sh_eth_tsu_add_entry(struct net_device
*ndev
, const u8
*addr
)
2436 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2437 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2443 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2445 /* No entry found, create one */
2446 i
= sh_eth_tsu_find_empty(ndev
);
2449 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ i
* 8, addr
);
2453 /* Enable the entry */
2454 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) |
2455 (1 << (31 - i
)), TSU_TEN
);
2458 /* Entry found or created, enable POST */
2459 sh_eth_tsu_enable_cam_entry_post(ndev
, i
);
2464 static int sh_eth_tsu_del_entry(struct net_device
*ndev
, const u8
*addr
)
2466 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2472 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2475 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2478 /* Disable the entry if both ports was disabled */
2479 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2487 static int sh_eth_tsu_purge_all(struct net_device
*ndev
)
2489 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2495 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++) {
2496 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2499 /* Disable the entry if both ports was disabled */
2500 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2508 static void sh_eth_tsu_purge_mcast(struct net_device
*ndev
)
2510 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2512 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2518 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2519 sh_eth_tsu_read_entry(reg_offset
, addr
);
2520 if (is_multicast_ether_addr(addr
))
2521 sh_eth_tsu_del_entry(ndev
, addr
);
2525 /* Update promiscuous flag and multicast filter */
2526 static void sh_eth_set_rx_mode(struct net_device
*ndev
)
2528 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2531 unsigned long flags
;
2533 spin_lock_irqsave(&mdp
->lock
, flags
);
2534 /* Initial condition is MCT = 1, PRM = 0.
2535 * Depending on ndev->flags, set PRM or clear MCT
2537 ecmr_bits
= sh_eth_read(ndev
, ECMR
) & ~ECMR_PRM
;
2539 ecmr_bits
|= ECMR_MCT
;
2541 if (!(ndev
->flags
& IFF_MULTICAST
)) {
2542 sh_eth_tsu_purge_mcast(ndev
);
2545 if (ndev
->flags
& IFF_ALLMULTI
) {
2546 sh_eth_tsu_purge_mcast(ndev
);
2547 ecmr_bits
&= ~ECMR_MCT
;
2551 if (ndev
->flags
& IFF_PROMISC
) {
2552 sh_eth_tsu_purge_all(ndev
);
2553 ecmr_bits
= (ecmr_bits
& ~ECMR_MCT
) | ECMR_PRM
;
2554 } else if (mdp
->cd
->tsu
) {
2555 struct netdev_hw_addr
*ha
;
2556 netdev_for_each_mc_addr(ha
, ndev
) {
2557 if (mcast_all
&& is_multicast_ether_addr(ha
->addr
))
2560 if (sh_eth_tsu_add_entry(ndev
, ha
->addr
) < 0) {
2562 sh_eth_tsu_purge_mcast(ndev
);
2563 ecmr_bits
&= ~ECMR_MCT
;
2570 /* update the ethernet mode */
2571 sh_eth_write(ndev
, ecmr_bits
, ECMR
);
2573 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2576 static int sh_eth_get_vtag_index(struct sh_eth_private
*mdp
)
2584 static int sh_eth_vlan_rx_add_vid(struct net_device
*ndev
,
2585 __be16 proto
, u16 vid
)
2587 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2588 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2590 if (unlikely(!mdp
->cd
->tsu
))
2593 /* No filtering if vid = 0 */
2597 mdp
->vlan_num_ids
++;
2599 /* The controller has one VLAN tag HW filter. So, if the filter is
2600 * already enabled, the driver disables it and the filte
2602 if (mdp
->vlan_num_ids
> 1) {
2603 /* disable VLAN filter */
2604 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
2608 sh_eth_tsu_write(mdp
, TSU_VTAG_ENABLE
| (vid
& TSU_VTAG_VID_MASK
),
2614 static int sh_eth_vlan_rx_kill_vid(struct net_device
*ndev
,
2615 __be16 proto
, u16 vid
)
2617 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2618 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2620 if (unlikely(!mdp
->cd
->tsu
))
2623 /* No filtering if vid = 0 */
2627 mdp
->vlan_num_ids
--;
2628 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
2633 /* SuperH's TSU register init function */
2634 static void sh_eth_tsu_init(struct sh_eth_private
*mdp
)
2636 if (sh_eth_is_rz_fast_ether(mdp
)) {
2637 sh_eth_tsu_write(mdp
, 0, TSU_TEN
); /* Disable all CAM entry */
2641 sh_eth_tsu_write(mdp
, 0, TSU_FWEN0
); /* Disable forward(0->1) */
2642 sh_eth_tsu_write(mdp
, 0, TSU_FWEN1
); /* Disable forward(1->0) */
2643 sh_eth_tsu_write(mdp
, 0, TSU_FCM
); /* forward fifo 3k-3k */
2644 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL0
);
2645 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL1
);
2646 sh_eth_tsu_write(mdp
, 0, TSU_PRISL0
);
2647 sh_eth_tsu_write(mdp
, 0, TSU_PRISL1
);
2648 sh_eth_tsu_write(mdp
, 0, TSU_FWSL0
);
2649 sh_eth_tsu_write(mdp
, 0, TSU_FWSL1
);
2650 sh_eth_tsu_write(mdp
, TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
, TSU_FWSLC
);
2651 if (sh_eth_is_gether(mdp
)) {
2652 sh_eth_tsu_write(mdp
, 0, TSU_QTAG0
); /* Disable QTAG(0->1) */
2653 sh_eth_tsu_write(mdp
, 0, TSU_QTAG1
); /* Disable QTAG(1->0) */
2655 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM0
); /* Disable QTAG(0->1) */
2656 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM1
); /* Disable QTAG(1->0) */
2658 sh_eth_tsu_write(mdp
, 0, TSU_FWSR
); /* all interrupt status clear */
2659 sh_eth_tsu_write(mdp
, 0, TSU_FWINMK
); /* Disable all interrupt */
2660 sh_eth_tsu_write(mdp
, 0, TSU_TEN
); /* Disable all CAM entry */
2661 sh_eth_tsu_write(mdp
, 0, TSU_POST1
); /* Disable CAM entry [ 0- 7] */
2662 sh_eth_tsu_write(mdp
, 0, TSU_POST2
); /* Disable CAM entry [ 8-15] */
2663 sh_eth_tsu_write(mdp
, 0, TSU_POST3
); /* Disable CAM entry [16-23] */
2664 sh_eth_tsu_write(mdp
, 0, TSU_POST4
); /* Disable CAM entry [24-31] */
2667 /* MDIO bus release function */
2668 static int sh_mdio_release(struct sh_eth_private
*mdp
)
2670 /* unregister mdio bus */
2671 mdiobus_unregister(mdp
->mii_bus
);
2673 /* free bitbang info */
2674 free_mdio_bitbang(mdp
->mii_bus
);
2679 /* MDIO bus init function */
2680 static int sh_mdio_init(struct sh_eth_private
*mdp
,
2681 struct sh_eth_plat_data
*pd
)
2684 struct bb_info
*bitbang
;
2685 struct platform_device
*pdev
= mdp
->pdev
;
2686 struct device
*dev
= &mdp
->pdev
->dev
;
2688 /* create bit control struct for PHY */
2689 bitbang
= devm_kzalloc(dev
, sizeof(struct bb_info
), GFP_KERNEL
);
2694 bitbang
->addr
= mdp
->addr
+ mdp
->reg_offset
[PIR
];
2695 bitbang
->set_gate
= pd
->set_mdio_gate
;
2696 bitbang
->mdi_msk
= PIR_MDI
;
2697 bitbang
->mdo_msk
= PIR_MDO
;
2698 bitbang
->mmd_msk
= PIR_MMD
;
2699 bitbang
->mdc_msk
= PIR_MDC
;
2700 bitbang
->ctrl
.ops
= &bb_ops
;
2702 /* MII controller setting */
2703 mdp
->mii_bus
= alloc_mdio_bitbang(&bitbang
->ctrl
);
2707 /* Hook up MII support for ethtool */
2708 mdp
->mii_bus
->name
= "sh_mii";
2709 mdp
->mii_bus
->parent
= dev
;
2710 snprintf(mdp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2711 pdev
->name
, pdev
->id
);
2714 mdp
->mii_bus
->irq
= devm_kmalloc_array(dev
, PHY_MAX_ADDR
, sizeof(int),
2716 if (!mdp
->mii_bus
->irq
) {
2721 /* register MDIO bus */
2723 ret
= of_mdiobus_register(mdp
->mii_bus
, dev
->of_node
);
2725 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
2726 mdp
->mii_bus
->irq
[i
] = PHY_POLL
;
2727 if (pd
->phy_irq
> 0)
2728 mdp
->mii_bus
->irq
[pd
->phy
] = pd
->phy_irq
;
2730 ret
= mdiobus_register(mdp
->mii_bus
);
2739 free_mdio_bitbang(mdp
->mii_bus
);
2743 static const u16
*sh_eth_get_register_offset(int register_type
)
2745 const u16
*reg_offset
= NULL
;
2747 switch (register_type
) {
2748 case SH_ETH_REG_GIGABIT
:
2749 reg_offset
= sh_eth_offset_gigabit
;
2751 case SH_ETH_REG_FAST_RZ
:
2752 reg_offset
= sh_eth_offset_fast_rz
;
2754 case SH_ETH_REG_FAST_RCAR
:
2755 reg_offset
= sh_eth_offset_fast_rcar
;
2757 case SH_ETH_REG_FAST_SH4
:
2758 reg_offset
= sh_eth_offset_fast_sh4
;
2760 case SH_ETH_REG_FAST_SH3_SH2
:
2761 reg_offset
= sh_eth_offset_fast_sh3_sh2
;
2770 static const struct net_device_ops sh_eth_netdev_ops
= {
2771 .ndo_open
= sh_eth_open
,
2772 .ndo_stop
= sh_eth_close
,
2773 .ndo_start_xmit
= sh_eth_start_xmit
,
2774 .ndo_get_stats
= sh_eth_get_stats
,
2775 .ndo_set_rx_mode
= sh_eth_set_rx_mode
,
2776 .ndo_tx_timeout
= sh_eth_tx_timeout
,
2777 .ndo_do_ioctl
= sh_eth_do_ioctl
,
2778 .ndo_validate_addr
= eth_validate_addr
,
2779 .ndo_set_mac_address
= eth_mac_addr
,
2780 .ndo_change_mtu
= eth_change_mtu
,
2783 static const struct net_device_ops sh_eth_netdev_ops_tsu
= {
2784 .ndo_open
= sh_eth_open
,
2785 .ndo_stop
= sh_eth_close
,
2786 .ndo_start_xmit
= sh_eth_start_xmit
,
2787 .ndo_get_stats
= sh_eth_get_stats
,
2788 .ndo_set_rx_mode
= sh_eth_set_rx_mode
,
2789 .ndo_vlan_rx_add_vid
= sh_eth_vlan_rx_add_vid
,
2790 .ndo_vlan_rx_kill_vid
= sh_eth_vlan_rx_kill_vid
,
2791 .ndo_tx_timeout
= sh_eth_tx_timeout
,
2792 .ndo_do_ioctl
= sh_eth_do_ioctl
,
2793 .ndo_validate_addr
= eth_validate_addr
,
2794 .ndo_set_mac_address
= eth_mac_addr
,
2795 .ndo_change_mtu
= eth_change_mtu
,
2799 static struct sh_eth_plat_data
*sh_eth_parse_dt(struct device
*dev
)
2801 struct device_node
*np
= dev
->of_node
;
2802 struct sh_eth_plat_data
*pdata
;
2803 const char *mac_addr
;
2805 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
2809 pdata
->phy_interface
= of_get_phy_mode(np
);
2811 mac_addr
= of_get_mac_address(np
);
2813 memcpy(pdata
->mac_addr
, mac_addr
, ETH_ALEN
);
2815 pdata
->no_ether_link
=
2816 of_property_read_bool(np
, "renesas,no-ether-link");
2817 pdata
->ether_link_active_low
=
2818 of_property_read_bool(np
, "renesas,ether-link-active-low");
2823 static const struct of_device_id sh_eth_match_table
[] = {
2824 { .compatible
= "renesas,gether-r8a7740", .data
= &r8a7740_data
},
2825 { .compatible
= "renesas,ether-r8a7778", .data
= &r8a777x_data
},
2826 { .compatible
= "renesas,ether-r8a7779", .data
= &r8a777x_data
},
2827 { .compatible
= "renesas,ether-r8a7790", .data
= &r8a779x_data
},
2828 { .compatible
= "renesas,ether-r8a7791", .data
= &r8a779x_data
},
2829 { .compatible
= "renesas,ether-r8a7793", .data
= &r8a779x_data
},
2830 { .compatible
= "renesas,ether-r8a7794", .data
= &r8a779x_data
},
2831 { .compatible
= "renesas,ether-r7s72100", .data
= &r7s72100_data
},
2834 MODULE_DEVICE_TABLE(of
, sh_eth_match_table
);
2836 static inline struct sh_eth_plat_data
*sh_eth_parse_dt(struct device
*dev
)
2842 static int sh_eth_drv_probe(struct platform_device
*pdev
)
2845 struct resource
*res
;
2846 struct net_device
*ndev
= NULL
;
2847 struct sh_eth_private
*mdp
= NULL
;
2848 struct sh_eth_plat_data
*pd
= dev_get_platdata(&pdev
->dev
);
2849 const struct platform_device_id
*id
= platform_get_device_id(pdev
);
2852 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2854 ndev
= alloc_etherdev(sizeof(struct sh_eth_private
));
2858 pm_runtime_enable(&pdev
->dev
);
2859 pm_runtime_get_sync(&pdev
->dev
);
2866 ret
= platform_get_irq(pdev
, 0);
2873 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2875 mdp
= netdev_priv(ndev
);
2876 mdp
->num_tx_ring
= TX_RING_SIZE
;
2877 mdp
->num_rx_ring
= RX_RING_SIZE
;
2878 mdp
->addr
= devm_ioremap_resource(&pdev
->dev
, res
);
2879 if (IS_ERR(mdp
->addr
)) {
2880 ret
= PTR_ERR(mdp
->addr
);
2884 ndev
->base_addr
= res
->start
;
2886 spin_lock_init(&mdp
->lock
);
2889 if (pdev
->dev
.of_node
)
2890 pd
= sh_eth_parse_dt(&pdev
->dev
);
2892 dev_err(&pdev
->dev
, "no platform data\n");
2898 mdp
->phy_id
= pd
->phy
;
2899 mdp
->phy_interface
= pd
->phy_interface
;
2901 mdp
->edmac_endian
= pd
->edmac_endian
;
2902 mdp
->no_ether_link
= pd
->no_ether_link
;
2903 mdp
->ether_link_active_low
= pd
->ether_link_active_low
;
2907 mdp
->cd
= (struct sh_eth_cpu_data
*)id
->driver_data
;
2909 const struct of_device_id
*match
;
2911 match
= of_match_device(of_match_ptr(sh_eth_match_table
),
2913 mdp
->cd
= (struct sh_eth_cpu_data
*)match
->data
;
2915 mdp
->reg_offset
= sh_eth_get_register_offset(mdp
->cd
->register_type
);
2916 if (!mdp
->reg_offset
) {
2917 dev_err(&pdev
->dev
, "Unknown register type (%d)\n",
2918 mdp
->cd
->register_type
);
2922 sh_eth_set_default_cpu_data(mdp
->cd
);
2926 ndev
->netdev_ops
= &sh_eth_netdev_ops_tsu
;
2928 ndev
->netdev_ops
= &sh_eth_netdev_ops
;
2929 ndev
->ethtool_ops
= &sh_eth_ethtool_ops
;
2930 ndev
->watchdog_timeo
= TX_TIMEOUT
;
2932 /* debug message level */
2933 mdp
->msg_enable
= SH_ETH_DEF_MSG_ENABLE
;
2935 /* read and set MAC address */
2936 read_mac_address(ndev
, pd
->mac_addr
);
2937 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
2938 dev_warn(&pdev
->dev
,
2939 "no valid MAC address supplied, using a random one.\n");
2940 eth_hw_addr_random(ndev
);
2943 /* ioremap the TSU registers */
2945 struct resource
*rtsu
;
2946 rtsu
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2947 mdp
->tsu_addr
= devm_ioremap_resource(&pdev
->dev
, rtsu
);
2948 if (IS_ERR(mdp
->tsu_addr
)) {
2949 ret
= PTR_ERR(mdp
->tsu_addr
);
2952 mdp
->port
= devno
% 2;
2953 ndev
->features
= NETIF_F_HW_VLAN_CTAG_FILTER
;
2956 /* initialize first or needed device */
2957 if (!devno
|| pd
->needs_init
) {
2958 if (mdp
->cd
->chip_reset
)
2959 mdp
->cd
->chip_reset(ndev
);
2962 /* TSU init (Init only)*/
2963 sh_eth_tsu_init(mdp
);
2967 if (mdp
->cd
->rmiimode
)
2968 sh_eth_write(ndev
, 0x1, RMIIMODE
);
2971 ret
= sh_mdio_init(mdp
, pd
);
2973 dev_err(&ndev
->dev
, "failed to initialise MDIO\n");
2977 netif_napi_add(ndev
, &mdp
->napi
, sh_eth_poll
, 64);
2979 /* network device register */
2980 ret
= register_netdev(ndev
);
2984 /* print device information */
2985 netdev_info(ndev
, "Base address at 0x%x, %pM, IRQ %d.\n",
2986 (u32
)ndev
->base_addr
, ndev
->dev_addr
, ndev
->irq
);
2988 pm_runtime_put(&pdev
->dev
);
2989 platform_set_drvdata(pdev
, ndev
);
2994 netif_napi_del(&mdp
->napi
);
2995 sh_mdio_release(mdp
);
3002 pm_runtime_put(&pdev
->dev
);
3003 pm_runtime_disable(&pdev
->dev
);
3007 static int sh_eth_drv_remove(struct platform_device
*pdev
)
3009 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3010 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
3012 unregister_netdev(ndev
);
3013 netif_napi_del(&mdp
->napi
);
3014 sh_mdio_release(mdp
);
3015 pm_runtime_disable(&pdev
->dev
);
3022 #ifdef CONFIG_PM_SLEEP
3023 static int sh_eth_suspend(struct device
*dev
)
3025 struct net_device
*ndev
= dev_get_drvdata(dev
);
3028 if (netif_running(ndev
)) {
3029 netif_device_detach(ndev
);
3030 ret
= sh_eth_close(ndev
);
3036 static int sh_eth_resume(struct device
*dev
)
3038 struct net_device
*ndev
= dev_get_drvdata(dev
);
3041 if (netif_running(ndev
)) {
3042 ret
= sh_eth_open(ndev
);
3045 netif_device_attach(ndev
);
3052 static int sh_eth_runtime_nop(struct device
*dev
)
3054 /* Runtime PM callback shared between ->runtime_suspend()
3055 * and ->runtime_resume(). Simply returns success.
3057 * This driver re-initializes all registers after
3058 * pm_runtime_get_sync() anyway so there is no need
3059 * to save and restore registers here.
3064 static const struct dev_pm_ops sh_eth_dev_pm_ops
= {
3065 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend
, sh_eth_resume
)
3066 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop
, sh_eth_runtime_nop
, NULL
)
3068 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3070 #define SH_ETH_PM_OPS NULL
3073 static struct platform_device_id sh_eth_id_table
[] = {
3074 { "sh7619-ether", (kernel_ulong_t
)&sh7619_data
},
3075 { "sh771x-ether", (kernel_ulong_t
)&sh771x_data
},
3076 { "sh7724-ether", (kernel_ulong_t
)&sh7724_data
},
3077 { "sh7734-gether", (kernel_ulong_t
)&sh7734_data
},
3078 { "sh7757-ether", (kernel_ulong_t
)&sh7757_data
},
3079 { "sh7757-gether", (kernel_ulong_t
)&sh7757_data_giga
},
3080 { "sh7763-gether", (kernel_ulong_t
)&sh7763_data
},
3081 { "r7s72100-ether", (kernel_ulong_t
)&r7s72100_data
},
3082 { "r8a7740-gether", (kernel_ulong_t
)&r8a7740_data
},
3083 { "r8a777x-ether", (kernel_ulong_t
)&r8a777x_data
},
3084 { "r8a7790-ether", (kernel_ulong_t
)&r8a779x_data
},
3085 { "r8a7791-ether", (kernel_ulong_t
)&r8a779x_data
},
3086 { "r8a7793-ether", (kernel_ulong_t
)&r8a779x_data
},
3087 { "r8a7794-ether", (kernel_ulong_t
)&r8a779x_data
},
3090 MODULE_DEVICE_TABLE(platform
, sh_eth_id_table
);
3092 static struct platform_driver sh_eth_driver
= {
3093 .probe
= sh_eth_drv_probe
,
3094 .remove
= sh_eth_drv_remove
,
3095 .id_table
= sh_eth_id_table
,
3098 .pm
= SH_ETH_PM_OPS
,
3099 .of_match_table
= of_match_ptr(sh_eth_match_table
),
3103 module_platform_driver(sh_eth_driver
);
3105 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3106 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3107 MODULE_LICENSE("GPL v2");