1 /* SuperH Ethernet device driver
3 * Copyright (C) 2014 Renesas Electronics Corporation
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
49 #define SH_ETH_DEF_MSG_ENABLE \
55 static const u16 sh_eth_offset_gigabit
[SH_ETH_MAX_REGISTER_OFFSET
] = {
109 [TSU_CTRST
] = 0x0004,
110 [TSU_FWEN0
] = 0x0010,
111 [TSU_FWEN1
] = 0x0014,
113 [TSU_BSYSL0
] = 0x0020,
114 [TSU_BSYSL1
] = 0x0024,
115 [TSU_PRISL0
] = 0x0028,
116 [TSU_PRISL1
] = 0x002c,
117 [TSU_FWSL0
] = 0x0030,
118 [TSU_FWSL1
] = 0x0034,
119 [TSU_FWSLC
] = 0x0038,
120 [TSU_QTAG0
] = 0x0040,
121 [TSU_QTAG1
] = 0x0044,
123 [TSU_FWINMK
] = 0x0054,
124 [TSU_ADQT0
] = 0x0048,
125 [TSU_ADQT1
] = 0x004c,
126 [TSU_VTAG0
] = 0x0058,
127 [TSU_VTAG1
] = 0x005c,
128 [TSU_ADSBSY
] = 0x0060,
130 [TSU_POST1
] = 0x0070,
131 [TSU_POST2
] = 0x0074,
132 [TSU_POST3
] = 0x0078,
133 [TSU_POST4
] = 0x007c,
134 [TSU_ADRH0
] = 0x0100,
135 [TSU_ADRL0
] = 0x0104,
136 [TSU_ADRH31
] = 0x01f8,
137 [TSU_ADRL31
] = 0x01fc,
153 static const u16 sh_eth_offset_fast_rz
[SH_ETH_MAX_REGISTER_OFFSET
] = {
197 [TSU_CTRST
] = 0x0004,
198 [TSU_VTAG0
] = 0x0058,
199 [TSU_ADSBSY
] = 0x0060,
201 [TSU_ADRH0
] = 0x0100,
202 [TSU_ADRL0
] = 0x0104,
203 [TSU_ADRH31
] = 0x01f8,
204 [TSU_ADRL31
] = 0x01fc,
212 static const u16 sh_eth_offset_fast_rcar
[SH_ETH_MAX_REGISTER_OFFSET
] = {
258 static const u16 sh_eth_offset_fast_sh4
[SH_ETH_MAX_REGISTER_OFFSET
] = {
310 static const u16 sh_eth_offset_fast_sh3_sh2
[SH_ETH_MAX_REGISTER_OFFSET
] = {
357 [TSU_CTRST
] = 0x0004,
358 [TSU_FWEN0
] = 0x0010,
359 [TSU_FWEN1
] = 0x0014,
361 [TSU_BSYSL0
] = 0x0020,
362 [TSU_BSYSL1
] = 0x0024,
363 [TSU_PRISL0
] = 0x0028,
364 [TSU_PRISL1
] = 0x002c,
365 [TSU_FWSL0
] = 0x0030,
366 [TSU_FWSL1
] = 0x0034,
367 [TSU_FWSLC
] = 0x0038,
368 [TSU_QTAGM0
] = 0x0040,
369 [TSU_QTAGM1
] = 0x0044,
370 [TSU_ADQT0
] = 0x0048,
371 [TSU_ADQT1
] = 0x004c,
373 [TSU_FWINMK
] = 0x0054,
374 [TSU_ADSBSY
] = 0x0060,
376 [TSU_POST1
] = 0x0070,
377 [TSU_POST2
] = 0x0074,
378 [TSU_POST3
] = 0x0078,
379 [TSU_POST4
] = 0x007c,
394 [TSU_ADRH0
] = 0x0100,
395 [TSU_ADRL0
] = 0x0104,
396 [TSU_ADRL31
] = 0x01fc,
399 static void sh_eth_rcv_snd_disable(struct net_device
*ndev
);
400 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
);
402 static bool sh_eth_is_gether(struct sh_eth_private
*mdp
)
404 return mdp
->reg_offset
== sh_eth_offset_gigabit
;
407 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private
*mdp
)
409 return mdp
->reg_offset
== sh_eth_offset_fast_rz
;
412 static void sh_eth_select_mii(struct net_device
*ndev
)
415 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
417 switch (mdp
->phy_interface
) {
418 case PHY_INTERFACE_MODE_GMII
:
421 case PHY_INTERFACE_MODE_MII
:
424 case PHY_INTERFACE_MODE_RMII
:
429 "PHY interface mode was not setup. Set to MII.\n");
434 sh_eth_write(ndev
, value
, RMII_MII
);
437 static void sh_eth_set_duplex(struct net_device
*ndev
)
439 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
441 if (mdp
->duplex
) /* Full */
442 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_DM
, ECMR
);
444 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_DM
, ECMR
);
447 /* There is CPU dependent code */
448 static void sh_eth_set_rate_r8a777x(struct net_device
*ndev
)
450 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
452 switch (mdp
->speed
) {
453 case 10: /* 10BASE */
454 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_ELB
, ECMR
);
456 case 100:/* 100BASE */
457 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_ELB
, ECMR
);
465 static struct sh_eth_cpu_data r8a777x_data
= {
466 .set_duplex
= sh_eth_set_duplex
,
467 .set_rate
= sh_eth_set_rate_r8a777x
,
469 .register_type
= SH_ETH_REG_FAST_RCAR
,
471 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
472 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
473 .eesipr_value
= 0x01ff009f,
475 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
476 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
477 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
479 .fdr_value
= 0x00000f0f,
488 static struct sh_eth_cpu_data r8a779x_data
= {
489 .set_duplex
= sh_eth_set_duplex
,
490 .set_rate
= sh_eth_set_rate_r8a777x
,
492 .register_type
= SH_ETH_REG_FAST_RCAR
,
494 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
495 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
496 .eesipr_value
= 0x01ff009f,
498 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
499 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
500 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
502 .fdr_value
= 0x00000f0f,
504 .trscer_err_mask
= DESC_I_RINT8
,
513 static void sh_eth_set_rate_sh7724(struct net_device
*ndev
)
515 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
517 switch (mdp
->speed
) {
518 case 10: /* 10BASE */
519 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) & ~ECMR_RTM
, ECMR
);
521 case 100:/* 100BASE */
522 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) | ECMR_RTM
, ECMR
);
530 static struct sh_eth_cpu_data sh7724_data
= {
531 .set_duplex
= sh_eth_set_duplex
,
532 .set_rate
= sh_eth_set_rate_sh7724
,
534 .register_type
= SH_ETH_REG_FAST_SH4
,
536 .ecsr_value
= ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
,
537 .ecsipr_value
= ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
| ECSIPR_ICDIP
,
538 .eesipr_value
= 0x01ff009f,
540 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
541 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
542 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
550 .rpadir_value
= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
553 static void sh_eth_set_rate_sh7757(struct net_device
*ndev
)
555 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
557 switch (mdp
->speed
) {
558 case 10: /* 10BASE */
559 sh_eth_write(ndev
, 0, RTRATE
);
561 case 100:/* 100BASE */
562 sh_eth_write(ndev
, 1, RTRATE
);
570 static struct sh_eth_cpu_data sh7757_data
= {
571 .set_duplex
= sh_eth_set_duplex
,
572 .set_rate
= sh_eth_set_rate_sh7757
,
574 .register_type
= SH_ETH_REG_FAST_SH4
,
576 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
578 .tx_check
= EESR_FTC
| EESR_CND
| EESR_DLC
| EESR_CD
| EESR_RTO
,
579 .eesr_err_check
= EESR_TWB
| EESR_TABT
| EESR_RABT
| EESR_RFE
|
580 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
583 .irq_flags
= IRQF_SHARED
,
590 .rpadir_value
= 2 << 16,
593 #define SH_GIGA_ETH_BASE 0xfee00000UL
594 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
595 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
596 static void sh_eth_chip_reset_giga(struct net_device
*ndev
)
599 u32 mahr
[2], malr
[2];
601 /* save MAHR and MALR */
602 for (i
= 0; i
< 2; i
++) {
603 malr
[i
] = ioread32((void *)GIGA_MALR(i
));
604 mahr
[i
] = ioread32((void *)GIGA_MAHR(i
));
608 iowrite32(ARSTR_ARSTR
, (void *)(SH_GIGA_ETH_BASE
+ 0x1800));
611 /* restore MAHR and MALR */
612 for (i
= 0; i
< 2; i
++) {
613 iowrite32(malr
[i
], (void *)GIGA_MALR(i
));
614 iowrite32(mahr
[i
], (void *)GIGA_MAHR(i
));
618 static void sh_eth_set_rate_giga(struct net_device
*ndev
)
620 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
622 switch (mdp
->speed
) {
623 case 10: /* 10BASE */
624 sh_eth_write(ndev
, 0x00000000, GECMR
);
626 case 100:/* 100BASE */
627 sh_eth_write(ndev
, 0x00000010, GECMR
);
629 case 1000: /* 1000BASE */
630 sh_eth_write(ndev
, 0x00000020, GECMR
);
637 /* SH7757(GETHERC) */
638 static struct sh_eth_cpu_data sh7757_data_giga
= {
639 .chip_reset
= sh_eth_chip_reset_giga
,
640 .set_duplex
= sh_eth_set_duplex
,
641 .set_rate
= sh_eth_set_rate_giga
,
643 .register_type
= SH_ETH_REG_GIGABIT
,
645 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
646 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
647 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
649 .tx_check
= EESR_TC1
| EESR_FTC
,
650 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
651 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
653 .fdr_value
= 0x0000072f,
655 .irq_flags
= IRQF_SHARED
,
662 .rpadir_value
= 2 << 16,
668 static void sh_eth_chip_reset(struct net_device
*ndev
)
670 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
673 sh_eth_tsu_write(mdp
, ARSTR_ARSTR
, ARSTR
);
677 static void sh_eth_set_rate_gether(struct net_device
*ndev
)
679 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
681 switch (mdp
->speed
) {
682 case 10: /* 10BASE */
683 sh_eth_write(ndev
, GECMR_10
, GECMR
);
685 case 100:/* 100BASE */
686 sh_eth_write(ndev
, GECMR_100
, GECMR
);
688 case 1000: /* 1000BASE */
689 sh_eth_write(ndev
, GECMR_1000
, GECMR
);
697 static struct sh_eth_cpu_data sh7734_data
= {
698 .chip_reset
= sh_eth_chip_reset
,
699 .set_duplex
= sh_eth_set_duplex
,
700 .set_rate
= sh_eth_set_rate_gether
,
702 .register_type
= SH_ETH_REG_GIGABIT
,
704 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
705 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
706 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
708 .tx_check
= EESR_TC1
| EESR_FTC
,
709 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
710 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
726 static struct sh_eth_cpu_data sh7763_data
= {
727 .chip_reset
= sh_eth_chip_reset
,
728 .set_duplex
= sh_eth_set_duplex
,
729 .set_rate
= sh_eth_set_rate_gether
,
731 .register_type
= SH_ETH_REG_GIGABIT
,
733 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
734 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
735 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
737 .tx_check
= EESR_TC1
| EESR_FTC
,
738 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
739 EESR_RDE
| EESR_RFRMER
| EESR_TFE
| EESR_TDE
|
750 .irq_flags
= IRQF_SHARED
,
753 static void sh_eth_chip_reset_r8a7740(struct net_device
*ndev
)
755 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
758 sh_eth_tsu_write(mdp
, ARSTR_ARSTR
, ARSTR
);
761 sh_eth_select_mii(ndev
);
765 static struct sh_eth_cpu_data r8a7740_data
= {
766 .chip_reset
= sh_eth_chip_reset_r8a7740
,
767 .set_duplex
= sh_eth_set_duplex
,
768 .set_rate
= sh_eth_set_rate_gether
,
770 .register_type
= SH_ETH_REG_GIGABIT
,
772 .ecsr_value
= ECSR_ICD
| ECSR_MPD
,
773 .ecsipr_value
= ECSIPR_LCHNGIP
| ECSIPR_ICDIP
| ECSIPR_MPDIP
,
774 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
776 .tx_check
= EESR_TC1
| EESR_FTC
,
777 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
778 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
780 .fdr_value
= 0x0000070f,
788 .rpadir_value
= 2 << 16,
797 static struct sh_eth_cpu_data r7s72100_data
= {
798 .chip_reset
= sh_eth_chip_reset
,
799 .set_duplex
= sh_eth_set_duplex
,
801 .register_type
= SH_ETH_REG_FAST_RZ
,
803 .ecsr_value
= ECSR_ICD
,
804 .ecsipr_value
= ECSIPR_ICDIP
,
805 .eesipr_value
= 0xff7f009f,
807 .tx_check
= EESR_TC1
| EESR_FTC
,
808 .eesr_err_check
= EESR_TWB1
| EESR_TWB
| EESR_TABT
| EESR_RABT
|
809 EESR_RFE
| EESR_RDE
| EESR_RFRMER
| EESR_TFE
|
811 .fdr_value
= 0x0000070f,
819 .rpadir_value
= 2 << 16,
827 static struct sh_eth_cpu_data sh7619_data
= {
828 .register_type
= SH_ETH_REG_FAST_SH3_SH2
,
830 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
838 static struct sh_eth_cpu_data sh771x_data
= {
839 .register_type
= SH_ETH_REG_FAST_SH3_SH2
,
841 .eesipr_value
= DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff,
845 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data
*cd
)
848 cd
->ecsr_value
= DEFAULT_ECSR_INIT
;
850 if (!cd
->ecsipr_value
)
851 cd
->ecsipr_value
= DEFAULT_ECSIPR_INIT
;
853 if (!cd
->fcftr_value
)
854 cd
->fcftr_value
= DEFAULT_FIFO_F_D_RFF
|
855 DEFAULT_FIFO_F_D_RFD
;
858 cd
->fdr_value
= DEFAULT_FDR_INIT
;
861 cd
->tx_check
= DEFAULT_TX_CHECK
;
863 if (!cd
->eesr_err_check
)
864 cd
->eesr_err_check
= DEFAULT_EESR_ERR_CHECK
;
866 if (!cd
->trscer_err_mask
)
867 cd
->trscer_err_mask
= DEFAULT_TRSCER_ERR_MASK
;
870 static int sh_eth_check_reset(struct net_device
*ndev
)
876 if (!(sh_eth_read(ndev
, EDMR
) & 0x3))
882 netdev_err(ndev
, "Device reset failed\n");
888 static int sh_eth_reset(struct net_device
*ndev
)
890 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
893 if (sh_eth_is_gether(mdp
) || sh_eth_is_rz_fast_ether(mdp
)) {
894 sh_eth_write(ndev
, EDSR_ENALL
, EDSR
);
895 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) | EDMR_SRST_GETHER
,
898 ret
= sh_eth_check_reset(ndev
);
903 sh_eth_write(ndev
, 0x0, TDLAR
);
904 sh_eth_write(ndev
, 0x0, TDFAR
);
905 sh_eth_write(ndev
, 0x0, TDFXR
);
906 sh_eth_write(ndev
, 0x0, TDFFR
);
907 sh_eth_write(ndev
, 0x0, RDLAR
);
908 sh_eth_write(ndev
, 0x0, RDFAR
);
909 sh_eth_write(ndev
, 0x0, RDFXR
);
910 sh_eth_write(ndev
, 0x0, RDFFR
);
912 /* Reset HW CRC register */
914 sh_eth_write(ndev
, 0x0, CSMR
);
916 /* Select MII mode */
917 if (mdp
->cd
->select_mii
)
918 sh_eth_select_mii(ndev
);
920 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) | EDMR_SRST_ETHER
,
923 sh_eth_write(ndev
, sh_eth_read(ndev
, EDMR
) & ~EDMR_SRST_ETHER
,
930 static void sh_eth_set_receive_align(struct sk_buff
*skb
)
932 uintptr_t reserve
= (uintptr_t)skb
->data
& (SH_ETH_RX_ALIGN
- 1);
935 skb_reserve(skb
, SH_ETH_RX_ALIGN
- reserve
);
939 /* CPU <-> EDMAC endian convert */
940 static inline __u32
cpu_to_edmac(struct sh_eth_private
*mdp
, u32 x
)
942 switch (mdp
->edmac_endian
) {
943 case EDMAC_LITTLE_ENDIAN
:
944 return cpu_to_le32(x
);
945 case EDMAC_BIG_ENDIAN
:
946 return cpu_to_be32(x
);
951 static inline __u32
edmac_to_cpu(struct sh_eth_private
*mdp
, u32 x
)
953 switch (mdp
->edmac_endian
) {
954 case EDMAC_LITTLE_ENDIAN
:
955 return le32_to_cpu(x
);
956 case EDMAC_BIG_ENDIAN
:
957 return be32_to_cpu(x
);
962 /* Program the hardware MAC address from dev->dev_addr. */
963 static void update_mac_address(struct net_device
*ndev
)
966 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
967 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
969 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
972 /* Get MAC address from SuperH MAC address register
974 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
975 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
976 * When you want use this device, you must set MAC address in bootloader.
979 static void read_mac_address(struct net_device
*ndev
, unsigned char *mac
)
981 if (mac
[0] || mac
[1] || mac
[2] || mac
[3] || mac
[4] || mac
[5]) {
982 memcpy(ndev
->dev_addr
, mac
, ETH_ALEN
);
984 ndev
->dev_addr
[0] = (sh_eth_read(ndev
, MAHR
) >> 24);
985 ndev
->dev_addr
[1] = (sh_eth_read(ndev
, MAHR
) >> 16) & 0xFF;
986 ndev
->dev_addr
[2] = (sh_eth_read(ndev
, MAHR
) >> 8) & 0xFF;
987 ndev
->dev_addr
[3] = (sh_eth_read(ndev
, MAHR
) & 0xFF);
988 ndev
->dev_addr
[4] = (sh_eth_read(ndev
, MALR
) >> 8) & 0xFF;
989 ndev
->dev_addr
[5] = (sh_eth_read(ndev
, MALR
) & 0xFF);
993 static u32
sh_eth_get_edtrr_trns(struct sh_eth_private
*mdp
)
995 if (sh_eth_is_gether(mdp
) || sh_eth_is_rz_fast_ether(mdp
))
996 return EDTRR_TRNS_GETHER
;
998 return EDTRR_TRNS_ETHER
;
1002 void (*set_gate
)(void *addr
);
1003 struct mdiobb_ctrl ctrl
;
1005 u32 mmd_msk
;/* MMD */
1012 static void bb_set(void *addr
, u32 msk
)
1014 iowrite32(ioread32(addr
) | msk
, addr
);
1018 static void bb_clr(void *addr
, u32 msk
)
1020 iowrite32((ioread32(addr
) & ~msk
), addr
);
1024 static int bb_read(void *addr
, u32 msk
)
1026 return (ioread32(addr
) & msk
) != 0;
1029 /* Data I/O pin control */
1030 static void sh_mmd_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
1032 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1034 if (bitbang
->set_gate
)
1035 bitbang
->set_gate(bitbang
->addr
);
1038 bb_set(bitbang
->addr
, bitbang
->mmd_msk
);
1040 bb_clr(bitbang
->addr
, bitbang
->mmd_msk
);
1044 static void sh_set_mdio(struct mdiobb_ctrl
*ctrl
, int bit
)
1046 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1048 if (bitbang
->set_gate
)
1049 bitbang
->set_gate(bitbang
->addr
);
1052 bb_set(bitbang
->addr
, bitbang
->mdo_msk
);
1054 bb_clr(bitbang
->addr
, bitbang
->mdo_msk
);
1058 static int sh_get_mdio(struct mdiobb_ctrl
*ctrl
)
1060 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1062 if (bitbang
->set_gate
)
1063 bitbang
->set_gate(bitbang
->addr
);
1065 return bb_read(bitbang
->addr
, bitbang
->mdi_msk
);
1068 /* MDC pin control */
1069 static void sh_mdc_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
1071 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
1073 if (bitbang
->set_gate
)
1074 bitbang
->set_gate(bitbang
->addr
);
1077 bb_set(bitbang
->addr
, bitbang
->mdc_msk
);
1079 bb_clr(bitbang
->addr
, bitbang
->mdc_msk
);
1082 /* mdio bus control struct */
1083 static struct mdiobb_ops bb_ops
= {
1084 .owner
= THIS_MODULE
,
1085 .set_mdc
= sh_mdc_ctrl
,
1086 .set_mdio_dir
= sh_mmd_ctrl
,
1087 .set_mdio_data
= sh_set_mdio
,
1088 .get_mdio_data
= sh_get_mdio
,
1091 /* free skb and descriptor buffer */
1092 static void sh_eth_ring_free(struct net_device
*ndev
)
1094 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1097 /* Free Rx skb ringbuffer */
1098 if (mdp
->rx_skbuff
) {
1099 for (i
= 0; i
< mdp
->num_rx_ring
; i
++)
1100 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
1102 kfree(mdp
->rx_skbuff
);
1103 mdp
->rx_skbuff
= NULL
;
1105 /* Free Tx skb ringbuffer */
1106 if (mdp
->tx_skbuff
) {
1107 for (i
= 0; i
< mdp
->num_tx_ring
; i
++)
1108 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
1110 kfree(mdp
->tx_skbuff
);
1111 mdp
->tx_skbuff
= NULL
;
1114 /* format skb and descriptor buffer */
1115 static void sh_eth_ring_format(struct net_device
*ndev
)
1117 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1119 struct sk_buff
*skb
;
1120 struct sh_eth_rxdesc
*rxdesc
= NULL
;
1121 struct sh_eth_txdesc
*txdesc
= NULL
;
1122 int rx_ringsize
= sizeof(*rxdesc
) * mdp
->num_rx_ring
;
1123 int tx_ringsize
= sizeof(*txdesc
) * mdp
->num_tx_ring
;
1124 int skbuff_size
= mdp
->rx_buf_sz
+ SH_ETH_RX_ALIGN
- 1;
1125 dma_addr_t dma_addr
;
1132 memset(mdp
->rx_ring
, 0, rx_ringsize
);
1134 /* build Rx ring buffer */
1135 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
1137 mdp
->rx_skbuff
[i
] = NULL
;
1138 skb
= netdev_alloc_skb(ndev
, skbuff_size
);
1141 sh_eth_set_receive_align(skb
);
1144 rxdesc
= &mdp
->rx_ring
[i
];
1145 /* The size of the buffer is a multiple of 16 bytes. */
1146 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
1147 dma_addr
= dma_map_single(&ndev
->dev
, skb
->data
,
1148 rxdesc
->buffer_length
,
1150 if (dma_mapping_error(&ndev
->dev
, dma_addr
)) {
1154 mdp
->rx_skbuff
[i
] = skb
;
1155 rxdesc
->addr
= dma_addr
;
1156 rxdesc
->status
= cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
1158 /* Rx descriptor address set */
1160 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDLAR
);
1161 if (sh_eth_is_gether(mdp
) ||
1162 sh_eth_is_rz_fast_ether(mdp
))
1163 sh_eth_write(ndev
, mdp
->rx_desc_dma
, RDFAR
);
1167 mdp
->dirty_rx
= (u32
) (i
- mdp
->num_rx_ring
);
1169 /* Mark the last entry as wrapping the ring. */
1170 rxdesc
->status
|= cpu_to_edmac(mdp
, RD_RDEL
);
1172 memset(mdp
->tx_ring
, 0, tx_ringsize
);
1174 /* build Tx ring buffer */
1175 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
1176 mdp
->tx_skbuff
[i
] = NULL
;
1177 txdesc
= &mdp
->tx_ring
[i
];
1178 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
1179 txdesc
->buffer_length
= 0;
1181 /* Tx descriptor address set */
1182 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDLAR
);
1183 if (sh_eth_is_gether(mdp
) ||
1184 sh_eth_is_rz_fast_ether(mdp
))
1185 sh_eth_write(ndev
, mdp
->tx_desc_dma
, TDFAR
);
1189 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
1192 /* Get skb and descriptor buffer */
1193 static int sh_eth_ring_init(struct net_device
*ndev
)
1195 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1196 int rx_ringsize
, tx_ringsize
, ret
= 0;
1198 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1199 * card needs room to do 8 byte alignment, +2 so we can reserve
1200 * the first 2 bytes, and +16 gets room for the status word from the
1203 mdp
->rx_buf_sz
= (ndev
->mtu
<= 1492 ? PKT_BUF_SZ
:
1204 (((ndev
->mtu
+ 26 + 7) & ~7) + 2 + 16));
1205 if (mdp
->cd
->rpadir
)
1206 mdp
->rx_buf_sz
+= NET_IP_ALIGN
;
1208 /* Allocate RX and TX skb rings */
1209 mdp
->rx_skbuff
= kmalloc_array(mdp
->num_rx_ring
,
1210 sizeof(*mdp
->rx_skbuff
), GFP_KERNEL
);
1211 if (!mdp
->rx_skbuff
) {
1216 mdp
->tx_skbuff
= kmalloc_array(mdp
->num_tx_ring
,
1217 sizeof(*mdp
->tx_skbuff
), GFP_KERNEL
);
1218 if (!mdp
->tx_skbuff
) {
1223 /* Allocate all Rx descriptors. */
1224 rx_ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1225 mdp
->rx_ring
= dma_alloc_coherent(NULL
, rx_ringsize
, &mdp
->rx_desc_dma
,
1227 if (!mdp
->rx_ring
) {
1229 goto desc_ring_free
;
1234 /* Allocate all Tx descriptors. */
1235 tx_ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1236 mdp
->tx_ring
= dma_alloc_coherent(NULL
, tx_ringsize
, &mdp
->tx_desc_dma
,
1238 if (!mdp
->tx_ring
) {
1240 goto desc_ring_free
;
1245 /* free DMA buffer */
1246 dma_free_coherent(NULL
, rx_ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
1249 /* Free Rx and Tx skb ring buffer */
1250 sh_eth_ring_free(ndev
);
1251 mdp
->tx_ring
= NULL
;
1252 mdp
->rx_ring
= NULL
;
1257 static void sh_eth_free_dma_buffer(struct sh_eth_private
*mdp
)
1262 ringsize
= sizeof(struct sh_eth_rxdesc
) * mdp
->num_rx_ring
;
1263 dma_free_coherent(NULL
, ringsize
, mdp
->rx_ring
,
1265 mdp
->rx_ring
= NULL
;
1269 ringsize
= sizeof(struct sh_eth_txdesc
) * mdp
->num_tx_ring
;
1270 dma_free_coherent(NULL
, ringsize
, mdp
->tx_ring
,
1272 mdp
->tx_ring
= NULL
;
1276 static int sh_eth_dev_init(struct net_device
*ndev
, bool start
)
1279 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1283 ret
= sh_eth_reset(ndev
);
1287 if (mdp
->cd
->rmiimode
)
1288 sh_eth_write(ndev
, 0x1, RMIIMODE
);
1290 /* Descriptor format */
1291 sh_eth_ring_format(ndev
);
1292 if (mdp
->cd
->rpadir
)
1293 sh_eth_write(ndev
, mdp
->cd
->rpadir_value
, RPADIR
);
1295 /* all sh_eth int mask */
1296 sh_eth_write(ndev
, 0, EESIPR
);
1298 #if defined(__LITTLE_ENDIAN)
1299 if (mdp
->cd
->hw_swap
)
1300 sh_eth_write(ndev
, EDMR_EL
, EDMR
);
1303 sh_eth_write(ndev
, 0, EDMR
);
1306 sh_eth_write(ndev
, mdp
->cd
->fdr_value
, FDR
);
1307 sh_eth_write(ndev
, 0, TFTR
);
1309 /* Frame recv control (enable multiple-packets per rx irq) */
1310 sh_eth_write(ndev
, RMCR_RNC
, RMCR
);
1312 sh_eth_write(ndev
, mdp
->cd
->trscer_err_mask
, TRSCER
);
1315 sh_eth_write(ndev
, 0x800, BCULR
); /* Burst sycle set */
1317 sh_eth_write(ndev
, mdp
->cd
->fcftr_value
, FCFTR
);
1319 if (!mdp
->cd
->no_trimd
)
1320 sh_eth_write(ndev
, 0, TRIMD
);
1322 /* Recv frame limit set register */
1323 sh_eth_write(ndev
, ndev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
,
1326 sh_eth_write(ndev
, sh_eth_read(ndev
, EESR
), EESR
);
1328 mdp
->irq_enabled
= true;
1329 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1332 /* PAUSE Prohibition */
1333 val
= (sh_eth_read(ndev
, ECMR
) & ECMR_DM
) |
1334 ECMR_ZPF
| (mdp
->duplex
? ECMR_DM
: 0) | ECMR_TE
| ECMR_RE
;
1336 sh_eth_write(ndev
, val
, ECMR
);
1338 if (mdp
->cd
->set_rate
)
1339 mdp
->cd
->set_rate(ndev
);
1341 /* E-MAC Status Register clear */
1342 sh_eth_write(ndev
, mdp
->cd
->ecsr_value
, ECSR
);
1344 /* E-MAC Interrupt Enable register */
1346 sh_eth_write(ndev
, mdp
->cd
->ecsipr_value
, ECSIPR
);
1348 /* Set MAC address */
1349 update_mac_address(ndev
);
1353 sh_eth_write(ndev
, APR_AP
, APR
);
1355 sh_eth_write(ndev
, MPR_MP
, MPR
);
1356 if (mdp
->cd
->tpauser
)
1357 sh_eth_write(ndev
, TPAUSER_UNLIMITED
, TPAUSER
);
1360 /* Setting the Rx mode will start the Rx process. */
1361 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1363 netif_start_queue(ndev
);
1369 static void sh_eth_dev_exit(struct net_device
*ndev
)
1371 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1374 /* Deactivate all TX descriptors, so DMA should stop at next
1375 * packet boundary if it's currently running
1377 for (i
= 0; i
< mdp
->num_tx_ring
; i
++)
1378 mdp
->tx_ring
[i
].status
&= ~cpu_to_edmac(mdp
, TD_TACT
);
1380 /* Disable TX FIFO egress to MAC */
1381 sh_eth_rcv_snd_disable(ndev
);
1383 /* Stop RX DMA at next packet boundary */
1384 sh_eth_write(ndev
, 0, EDRRR
);
1386 /* Aside from TX DMA, we can't tell when the hardware is
1387 * really stopped, so we need to reset to make sure.
1388 * Before doing that, wait for long enough to *probably*
1389 * finish transmitting the last packet and poll stats.
1391 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1392 sh_eth_get_stats(ndev
);
1395 /* Set MAC address again */
1396 update_mac_address(ndev
);
1399 /* free Tx skb function */
1400 static int sh_eth_txfree(struct net_device
*ndev
)
1402 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1403 struct sh_eth_txdesc
*txdesc
;
1407 for (; mdp
->cur_tx
- mdp
->dirty_tx
> 0; mdp
->dirty_tx
++) {
1408 entry
= mdp
->dirty_tx
% mdp
->num_tx_ring
;
1409 txdesc
= &mdp
->tx_ring
[entry
];
1410 if (txdesc
->status
& cpu_to_edmac(mdp
, TD_TACT
))
1412 /* TACT bit must be checked before all the following reads */
1414 /* Free the original skb. */
1415 if (mdp
->tx_skbuff
[entry
]) {
1416 dma_unmap_single(&ndev
->dev
, txdesc
->addr
,
1417 txdesc
->buffer_length
, DMA_TO_DEVICE
);
1418 dev_kfree_skb_irq(mdp
->tx_skbuff
[entry
]);
1419 mdp
->tx_skbuff
[entry
] = NULL
;
1422 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
1423 if (entry
>= mdp
->num_tx_ring
- 1)
1424 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
1426 ndev
->stats
.tx_packets
++;
1427 ndev
->stats
.tx_bytes
+= txdesc
->buffer_length
;
1432 /* Packet receive function */
1433 static int sh_eth_rx(struct net_device
*ndev
, u32 intr_status
, int *quota
)
1435 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1436 struct sh_eth_rxdesc
*rxdesc
;
1438 int entry
= mdp
->cur_rx
% mdp
->num_rx_ring
;
1439 int boguscnt
= (mdp
->dirty_rx
+ mdp
->num_rx_ring
) - mdp
->cur_rx
;
1441 struct sk_buff
*skb
;
1444 int skbuff_size
= mdp
->rx_buf_sz
+ SH_ETH_RX_ALIGN
- 1;
1445 dma_addr_t dma_addr
;
1447 boguscnt
= min(boguscnt
, *quota
);
1449 rxdesc
= &mdp
->rx_ring
[entry
];
1450 while (!(rxdesc
->status
& cpu_to_edmac(mdp
, RD_RACT
))) {
1451 /* RACT bit must be checked before all the following reads */
1453 desc_status
= edmac_to_cpu(mdp
, rxdesc
->status
);
1454 pkt_len
= rxdesc
->frame_length
;
1459 if (!(desc_status
& RDFEND
))
1460 ndev
->stats
.rx_length_errors
++;
1462 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1463 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1464 * bit 0. However, in case of the R8A7740 and R7S72100
1465 * the RFS bits are from bit 25 to bit 16. So, the
1466 * driver needs right shifting by 16.
1468 if (mdp
->cd
->shift_rd0
)
1471 if (desc_status
& (RD_RFS1
| RD_RFS2
| RD_RFS3
| RD_RFS4
|
1472 RD_RFS5
| RD_RFS6
| RD_RFS10
)) {
1473 ndev
->stats
.rx_errors
++;
1474 if (desc_status
& RD_RFS1
)
1475 ndev
->stats
.rx_crc_errors
++;
1476 if (desc_status
& RD_RFS2
)
1477 ndev
->stats
.rx_frame_errors
++;
1478 if (desc_status
& RD_RFS3
)
1479 ndev
->stats
.rx_length_errors
++;
1480 if (desc_status
& RD_RFS4
)
1481 ndev
->stats
.rx_length_errors
++;
1482 if (desc_status
& RD_RFS6
)
1483 ndev
->stats
.rx_missed_errors
++;
1484 if (desc_status
& RD_RFS10
)
1485 ndev
->stats
.rx_over_errors
++;
1487 if (!mdp
->cd
->hw_swap
)
1489 phys_to_virt(ALIGN(rxdesc
->addr
, 4)),
1491 skb
= mdp
->rx_skbuff
[entry
];
1492 mdp
->rx_skbuff
[entry
] = NULL
;
1493 if (mdp
->cd
->rpadir
)
1494 skb_reserve(skb
, NET_IP_ALIGN
);
1495 dma_unmap_single(&ndev
->dev
, rxdesc
->addr
,
1496 ALIGN(mdp
->rx_buf_sz
, 16),
1498 skb_put(skb
, pkt_len
);
1499 skb
->protocol
= eth_type_trans(skb
, ndev
);
1500 netif_receive_skb(skb
);
1501 ndev
->stats
.rx_packets
++;
1502 ndev
->stats
.rx_bytes
+= pkt_len
;
1504 entry
= (++mdp
->cur_rx
) % mdp
->num_rx_ring
;
1505 rxdesc
= &mdp
->rx_ring
[entry
];
1508 /* Refill the Rx ring buffers. */
1509 for (; mdp
->cur_rx
- mdp
->dirty_rx
> 0; mdp
->dirty_rx
++) {
1510 entry
= mdp
->dirty_rx
% mdp
->num_rx_ring
;
1511 rxdesc
= &mdp
->rx_ring
[entry
];
1512 /* The size of the buffer is 16 byte boundary. */
1513 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
1515 if (mdp
->rx_skbuff
[entry
] == NULL
) {
1516 skb
= netdev_alloc_skb(ndev
, skbuff_size
);
1518 break; /* Better luck next round. */
1519 sh_eth_set_receive_align(skb
);
1520 dma_addr
= dma_map_single(&ndev
->dev
, skb
->data
,
1521 rxdesc
->buffer_length
,
1523 if (dma_mapping_error(&ndev
->dev
, dma_addr
)) {
1527 mdp
->rx_skbuff
[entry
] = skb
;
1529 skb_checksum_none_assert(skb
);
1530 rxdesc
->addr
= dma_addr
;
1532 wmb(); /* RACT bit must be set after all the above writes */
1533 if (entry
>= mdp
->num_rx_ring
- 1)
1535 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
| RD_RDEL
);
1538 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
1541 /* Restart Rx engine if stopped. */
1542 /* If we don't need to check status, don't. -KDU */
1543 if (!(sh_eth_read(ndev
, EDRRR
) & EDRRR_R
)) {
1544 /* fix the values for the next receiving if RDE is set */
1545 if (intr_status
& EESR_RDE
&& mdp
->reg_offset
[RDFAR
] != 0) {
1546 u32 count
= (sh_eth_read(ndev
, RDFAR
) -
1547 sh_eth_read(ndev
, RDLAR
)) >> 4;
1549 mdp
->cur_rx
= count
;
1550 mdp
->dirty_rx
= count
;
1552 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
1555 *quota
-= limit
- boguscnt
- 1;
1560 static void sh_eth_rcv_snd_disable(struct net_device
*ndev
)
1562 /* disable tx and rx */
1563 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) &
1564 ~(ECMR_RE
| ECMR_TE
), ECMR
);
1567 static void sh_eth_rcv_snd_enable(struct net_device
*ndev
)
1569 /* enable tx and rx */
1570 sh_eth_write(ndev
, sh_eth_read(ndev
, ECMR
) |
1571 (ECMR_RE
| ECMR_TE
), ECMR
);
1574 /* error control function */
1575 static void sh_eth_error(struct net_device
*ndev
, u32 intr_status
)
1577 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1582 if (intr_status
& EESR_ECI
) {
1583 felic_stat
= sh_eth_read(ndev
, ECSR
);
1584 sh_eth_write(ndev
, felic_stat
, ECSR
); /* clear int */
1585 if (felic_stat
& ECSR_ICD
)
1586 ndev
->stats
.tx_carrier_errors
++;
1587 if (felic_stat
& ECSR_LCHNG
) {
1589 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
) {
1592 link_stat
= (sh_eth_read(ndev
, PSR
));
1593 if (mdp
->ether_link_active_low
)
1594 link_stat
= ~link_stat
;
1596 if (!(link_stat
& PHY_ST_LINK
)) {
1597 sh_eth_rcv_snd_disable(ndev
);
1600 sh_eth_write(ndev
, sh_eth_read(ndev
, EESIPR
) &
1601 ~DMAC_M_ECI
, EESIPR
);
1603 sh_eth_write(ndev
, sh_eth_read(ndev
, ECSR
),
1605 sh_eth_write(ndev
, sh_eth_read(ndev
, EESIPR
) |
1606 DMAC_M_ECI
, EESIPR
);
1607 /* enable tx and rx */
1608 sh_eth_rcv_snd_enable(ndev
);
1614 if (intr_status
& EESR_TWB
) {
1615 /* Unused write back interrupt */
1616 if (intr_status
& EESR_TABT
) { /* Transmit Abort int */
1617 ndev
->stats
.tx_aborted_errors
++;
1618 netif_err(mdp
, tx_err
, ndev
, "Transmit Abort\n");
1622 if (intr_status
& EESR_RABT
) {
1623 /* Receive Abort int */
1624 if (intr_status
& EESR_RFRMER
) {
1625 /* Receive Frame Overflow int */
1626 ndev
->stats
.rx_frame_errors
++;
1630 if (intr_status
& EESR_TDE
) {
1631 /* Transmit Descriptor Empty int */
1632 ndev
->stats
.tx_fifo_errors
++;
1633 netif_err(mdp
, tx_err
, ndev
, "Transmit Descriptor Empty\n");
1636 if (intr_status
& EESR_TFE
) {
1637 /* FIFO under flow */
1638 ndev
->stats
.tx_fifo_errors
++;
1639 netif_err(mdp
, tx_err
, ndev
, "Transmit FIFO Under flow\n");
1642 if (intr_status
& EESR_RDE
) {
1643 /* Receive Descriptor Empty int */
1644 ndev
->stats
.rx_over_errors
++;
1647 if (intr_status
& EESR_RFE
) {
1648 /* Receive FIFO Overflow int */
1649 ndev
->stats
.rx_fifo_errors
++;
1652 if (!mdp
->cd
->no_ade
&& (intr_status
& EESR_ADE
)) {
1654 ndev
->stats
.tx_fifo_errors
++;
1655 netif_err(mdp
, tx_err
, ndev
, "Address Error\n");
1658 mask
= EESR_TWB
| EESR_TABT
| EESR_ADE
| EESR_TDE
| EESR_TFE
;
1659 if (mdp
->cd
->no_ade
)
1661 if (intr_status
& mask
) {
1663 u32 edtrr
= sh_eth_read(ndev
, EDTRR
);
1666 netdev_err(ndev
, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1667 intr_status
, mdp
->cur_tx
, mdp
->dirty_tx
,
1668 (u32
)ndev
->state
, edtrr
);
1669 /* dirty buffer free */
1670 sh_eth_txfree(ndev
);
1673 if (edtrr
^ sh_eth_get_edtrr_trns(mdp
)) {
1675 sh_eth_write(ndev
, sh_eth_get_edtrr_trns(mdp
), EDTRR
);
1678 netif_wake_queue(ndev
);
1682 static irqreturn_t
sh_eth_interrupt(int irq
, void *netdev
)
1684 struct net_device
*ndev
= netdev
;
1685 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1686 struct sh_eth_cpu_data
*cd
= mdp
->cd
;
1687 irqreturn_t ret
= IRQ_NONE
;
1688 u32 intr_status
, intr_enable
;
1690 spin_lock(&mdp
->lock
);
1692 /* Get interrupt status */
1693 intr_status
= sh_eth_read(ndev
, EESR
);
1694 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1695 * enabled since it's the one that comes thru regardless of the mask,
1696 * and we need to fully handle it in sh_eth_error() in order to quench
1697 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1699 intr_enable
= sh_eth_read(ndev
, EESIPR
);
1700 intr_status
&= intr_enable
| DMAC_M_ECI
;
1701 if (intr_status
& (EESR_RX_CHECK
| cd
->tx_check
| cd
->eesr_err_check
))
1706 if (!likely(mdp
->irq_enabled
)) {
1707 sh_eth_write(ndev
, 0, EESIPR
);
1711 if (intr_status
& EESR_RX_CHECK
) {
1712 if (napi_schedule_prep(&mdp
->napi
)) {
1713 /* Mask Rx interrupts */
1714 sh_eth_write(ndev
, intr_enable
& ~EESR_RX_CHECK
,
1716 __napi_schedule(&mdp
->napi
);
1719 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1720 intr_status
, intr_enable
);
1725 if (intr_status
& cd
->tx_check
) {
1726 /* Clear Tx interrupts */
1727 sh_eth_write(ndev
, intr_status
& cd
->tx_check
, EESR
);
1729 sh_eth_txfree(ndev
);
1730 netif_wake_queue(ndev
);
1733 if (intr_status
& cd
->eesr_err_check
) {
1734 /* Clear error interrupts */
1735 sh_eth_write(ndev
, intr_status
& cd
->eesr_err_check
, EESR
);
1737 sh_eth_error(ndev
, intr_status
);
1741 spin_unlock(&mdp
->lock
);
1746 static int sh_eth_poll(struct napi_struct
*napi
, int budget
)
1748 struct sh_eth_private
*mdp
= container_of(napi
, struct sh_eth_private
,
1750 struct net_device
*ndev
= napi
->dev
;
1755 intr_status
= sh_eth_read(ndev
, EESR
);
1756 if (!(intr_status
& EESR_RX_CHECK
))
1758 /* Clear Rx interrupts */
1759 sh_eth_write(ndev
, intr_status
& EESR_RX_CHECK
, EESR
);
1761 if (sh_eth_rx(ndev
, intr_status
, "a
))
1765 napi_complete(napi
);
1767 /* Reenable Rx interrupts */
1768 if (mdp
->irq_enabled
)
1769 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
1771 return budget
- quota
;
1774 /* PHY state control function */
1775 static void sh_eth_adjust_link(struct net_device
*ndev
)
1777 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1778 struct phy_device
*phydev
= mdp
->phydev
;
1782 if (phydev
->duplex
!= mdp
->duplex
) {
1784 mdp
->duplex
= phydev
->duplex
;
1785 if (mdp
->cd
->set_duplex
)
1786 mdp
->cd
->set_duplex(ndev
);
1789 if (phydev
->speed
!= mdp
->speed
) {
1791 mdp
->speed
= phydev
->speed
;
1792 if (mdp
->cd
->set_rate
)
1793 mdp
->cd
->set_rate(ndev
);
1797 sh_eth_read(ndev
, ECMR
) & ~ECMR_TXF
,
1800 mdp
->link
= phydev
->link
;
1801 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1802 sh_eth_rcv_snd_enable(ndev
);
1804 } else if (mdp
->link
) {
1809 if (mdp
->cd
->no_psr
|| mdp
->no_ether_link
)
1810 sh_eth_rcv_snd_disable(ndev
);
1813 if (new_state
&& netif_msg_link(mdp
))
1814 phy_print_status(phydev
);
1817 /* PHY init function */
1818 static int sh_eth_phy_init(struct net_device
*ndev
)
1820 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
1821 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1822 struct phy_device
*phydev
= NULL
;
1828 /* Try connect to PHY */
1830 struct device_node
*pn
;
1832 pn
= of_parse_phandle(np
, "phy-handle", 0);
1833 phydev
= of_phy_connect(ndev
, pn
,
1834 sh_eth_adjust_link
, 0,
1835 mdp
->phy_interface
);
1838 phydev
= ERR_PTR(-ENOENT
);
1840 char phy_id
[MII_BUS_ID_SIZE
+ 3];
1842 snprintf(phy_id
, sizeof(phy_id
), PHY_ID_FMT
,
1843 mdp
->mii_bus
->id
, mdp
->phy_id
);
1845 phydev
= phy_connect(ndev
, phy_id
, sh_eth_adjust_link
,
1846 mdp
->phy_interface
);
1849 if (IS_ERR(phydev
)) {
1850 netdev_err(ndev
, "failed to connect PHY\n");
1851 return PTR_ERR(phydev
);
1854 netdev_info(ndev
, "attached PHY %d (IRQ %d) to driver %s\n",
1855 phydev
->addr
, phydev
->irq
, phydev
->drv
->name
);
1857 mdp
->phydev
= phydev
;
1862 /* PHY control start function */
1863 static int sh_eth_phy_start(struct net_device
*ndev
)
1865 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1868 ret
= sh_eth_phy_init(ndev
);
1872 phy_start(mdp
->phydev
);
1877 static int sh_eth_get_settings(struct net_device
*ndev
,
1878 struct ethtool_cmd
*ecmd
)
1880 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1881 unsigned long flags
;
1887 spin_lock_irqsave(&mdp
->lock
, flags
);
1888 ret
= phy_ethtool_gset(mdp
->phydev
, ecmd
);
1889 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1894 static int sh_eth_set_settings(struct net_device
*ndev
,
1895 struct ethtool_cmd
*ecmd
)
1897 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1898 unsigned long flags
;
1904 spin_lock_irqsave(&mdp
->lock
, flags
);
1906 /* disable tx and rx */
1907 sh_eth_rcv_snd_disable(ndev
);
1909 ret
= phy_ethtool_sset(mdp
->phydev
, ecmd
);
1913 if (ecmd
->duplex
== DUPLEX_FULL
)
1918 if (mdp
->cd
->set_duplex
)
1919 mdp
->cd
->set_duplex(ndev
);
1924 /* enable tx and rx */
1925 sh_eth_rcv_snd_enable(ndev
);
1927 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1932 static int sh_eth_nway_reset(struct net_device
*ndev
)
1934 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1935 unsigned long flags
;
1941 spin_lock_irqsave(&mdp
->lock
, flags
);
1942 ret
= phy_start_aneg(mdp
->phydev
);
1943 spin_unlock_irqrestore(&mdp
->lock
, flags
);
1948 static u32
sh_eth_get_msglevel(struct net_device
*ndev
)
1950 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1951 return mdp
->msg_enable
;
1954 static void sh_eth_set_msglevel(struct net_device
*ndev
, u32 value
)
1956 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1957 mdp
->msg_enable
= value
;
1960 static const char sh_eth_gstrings_stats
[][ETH_GSTRING_LEN
] = {
1961 "rx_current", "tx_current",
1962 "rx_dirty", "tx_dirty",
1964 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1966 static int sh_eth_get_sset_count(struct net_device
*netdev
, int sset
)
1970 return SH_ETH_STATS_LEN
;
1976 static void sh_eth_get_ethtool_stats(struct net_device
*ndev
,
1977 struct ethtool_stats
*stats
, u64
*data
)
1979 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1982 /* device-specific stats */
1983 data
[i
++] = mdp
->cur_rx
;
1984 data
[i
++] = mdp
->cur_tx
;
1985 data
[i
++] = mdp
->dirty_rx
;
1986 data
[i
++] = mdp
->dirty_tx
;
1989 static void sh_eth_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
1991 switch (stringset
) {
1993 memcpy(data
, *sh_eth_gstrings_stats
,
1994 sizeof(sh_eth_gstrings_stats
));
1999 static void sh_eth_get_ringparam(struct net_device
*ndev
,
2000 struct ethtool_ringparam
*ring
)
2002 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2004 ring
->rx_max_pending
= RX_RING_MAX
;
2005 ring
->tx_max_pending
= TX_RING_MAX
;
2006 ring
->rx_pending
= mdp
->num_rx_ring
;
2007 ring
->tx_pending
= mdp
->num_tx_ring
;
2010 static int sh_eth_set_ringparam(struct net_device
*ndev
,
2011 struct ethtool_ringparam
*ring
)
2013 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2016 if (ring
->tx_pending
> TX_RING_MAX
||
2017 ring
->rx_pending
> RX_RING_MAX
||
2018 ring
->tx_pending
< TX_RING_MIN
||
2019 ring
->rx_pending
< RX_RING_MIN
)
2021 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
2024 if (netif_running(ndev
)) {
2025 netif_device_detach(ndev
);
2026 netif_tx_disable(ndev
);
2028 /* Serialise with the interrupt handler and NAPI, then
2029 * disable interrupts. We have to clear the
2030 * irq_enabled flag first to ensure that interrupts
2031 * won't be re-enabled.
2033 mdp
->irq_enabled
= false;
2034 synchronize_irq(ndev
->irq
);
2035 napi_synchronize(&mdp
->napi
);
2036 sh_eth_write(ndev
, 0x0000, EESIPR
);
2038 sh_eth_dev_exit(ndev
);
2040 /* Free all the skbuffs in the Rx queue. */
2041 sh_eth_ring_free(ndev
);
2042 /* Free DMA buffer */
2043 sh_eth_free_dma_buffer(mdp
);
2046 /* Set new parameters */
2047 mdp
->num_rx_ring
= ring
->rx_pending
;
2048 mdp
->num_tx_ring
= ring
->tx_pending
;
2050 if (netif_running(ndev
)) {
2051 ret
= sh_eth_ring_init(ndev
);
2053 netdev_err(ndev
, "%s: sh_eth_ring_init failed.\n",
2057 ret
= sh_eth_dev_init(ndev
, false);
2059 netdev_err(ndev
, "%s: sh_eth_dev_init failed.\n",
2064 mdp
->irq_enabled
= true;
2065 sh_eth_write(ndev
, mdp
->cd
->eesipr_value
, EESIPR
);
2066 /* Setting the Rx mode will start the Rx process. */
2067 sh_eth_write(ndev
, EDRRR_R
, EDRRR
);
2068 netif_device_attach(ndev
);
2074 static const struct ethtool_ops sh_eth_ethtool_ops
= {
2075 .get_settings
= sh_eth_get_settings
,
2076 .set_settings
= sh_eth_set_settings
,
2077 .nway_reset
= sh_eth_nway_reset
,
2078 .get_msglevel
= sh_eth_get_msglevel
,
2079 .set_msglevel
= sh_eth_set_msglevel
,
2080 .get_link
= ethtool_op_get_link
,
2081 .get_strings
= sh_eth_get_strings
,
2082 .get_ethtool_stats
= sh_eth_get_ethtool_stats
,
2083 .get_sset_count
= sh_eth_get_sset_count
,
2084 .get_ringparam
= sh_eth_get_ringparam
,
2085 .set_ringparam
= sh_eth_set_ringparam
,
2088 /* network device open function */
2089 static int sh_eth_open(struct net_device
*ndev
)
2092 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2094 pm_runtime_get_sync(&mdp
->pdev
->dev
);
2096 napi_enable(&mdp
->napi
);
2098 ret
= request_irq(ndev
->irq
, sh_eth_interrupt
,
2099 mdp
->cd
->irq_flags
, ndev
->name
, ndev
);
2101 netdev_err(ndev
, "Can not assign IRQ number\n");
2105 /* Descriptor set */
2106 ret
= sh_eth_ring_init(ndev
);
2111 ret
= sh_eth_dev_init(ndev
, true);
2115 /* PHY control start*/
2116 ret
= sh_eth_phy_start(ndev
);
2125 free_irq(ndev
->irq
, ndev
);
2127 napi_disable(&mdp
->napi
);
2128 pm_runtime_put_sync(&mdp
->pdev
->dev
);
2132 /* Timeout function */
2133 static void sh_eth_tx_timeout(struct net_device
*ndev
)
2135 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2136 struct sh_eth_rxdesc
*rxdesc
;
2139 netif_stop_queue(ndev
);
2141 netif_err(mdp
, timer
, ndev
,
2142 "transmit timed out, status %8.8x, resetting...\n",
2143 sh_eth_read(ndev
, EESR
));
2145 /* tx_errors count up */
2146 ndev
->stats
.tx_errors
++;
2148 /* Free all the skbuffs in the Rx queue. */
2149 for (i
= 0; i
< mdp
->num_rx_ring
; i
++) {
2150 rxdesc
= &mdp
->rx_ring
[i
];
2152 rxdesc
->addr
= 0xBADF00D0;
2153 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
2154 mdp
->rx_skbuff
[i
] = NULL
;
2156 for (i
= 0; i
< mdp
->num_tx_ring
; i
++) {
2157 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
2158 mdp
->tx_skbuff
[i
] = NULL
;
2162 sh_eth_dev_init(ndev
, true);
2165 /* Packet transmit function */
2166 static int sh_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
2168 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2169 struct sh_eth_txdesc
*txdesc
;
2171 unsigned long flags
;
2173 spin_lock_irqsave(&mdp
->lock
, flags
);
2174 if ((mdp
->cur_tx
- mdp
->dirty_tx
) >= (mdp
->num_tx_ring
- 4)) {
2175 if (!sh_eth_txfree(ndev
)) {
2176 netif_warn(mdp
, tx_queued
, ndev
, "TxFD exhausted.\n");
2177 netif_stop_queue(ndev
);
2178 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2179 return NETDEV_TX_BUSY
;
2182 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2184 if (skb_put_padto(skb
, ETH_ZLEN
))
2185 return NETDEV_TX_OK
;
2187 entry
= mdp
->cur_tx
% mdp
->num_tx_ring
;
2188 mdp
->tx_skbuff
[entry
] = skb
;
2189 txdesc
= &mdp
->tx_ring
[entry
];
2191 if (!mdp
->cd
->hw_swap
)
2192 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc
->addr
, 4)),
2194 txdesc
->addr
= dma_map_single(&ndev
->dev
, skb
->data
, skb
->len
,
2196 if (dma_mapping_error(&ndev
->dev
, txdesc
->addr
)) {
2198 return NETDEV_TX_OK
;
2200 txdesc
->buffer_length
= skb
->len
;
2202 wmb(); /* TACT bit must be set after all the above writes */
2203 if (entry
>= mdp
->num_tx_ring
- 1)
2204 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
| TD_TDLE
);
2206 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
);
2210 if (!(sh_eth_read(ndev
, EDTRR
) & sh_eth_get_edtrr_trns(mdp
)))
2211 sh_eth_write(ndev
, sh_eth_get_edtrr_trns(mdp
), EDTRR
);
2213 return NETDEV_TX_OK
;
2216 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
)
2218 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2220 if (sh_eth_is_rz_fast_ether(mdp
))
2221 return &ndev
->stats
;
2223 if (!mdp
->is_opened
)
2224 return &ndev
->stats
;
2226 ndev
->stats
.tx_dropped
+= sh_eth_read(ndev
, TROCR
);
2227 sh_eth_write(ndev
, 0, TROCR
); /* (write clear) */
2228 ndev
->stats
.collisions
+= sh_eth_read(ndev
, CDCR
);
2229 sh_eth_write(ndev
, 0, CDCR
); /* (write clear) */
2230 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, LCCR
);
2231 sh_eth_write(ndev
, 0, LCCR
); /* (write clear) */
2233 if (sh_eth_is_gether(mdp
)) {
2234 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CERCR
);
2235 sh_eth_write(ndev
, 0, CERCR
); /* (write clear) */
2236 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CEECR
);
2237 sh_eth_write(ndev
, 0, CEECR
); /* (write clear) */
2239 ndev
->stats
.tx_carrier_errors
+= sh_eth_read(ndev
, CNDCR
);
2240 sh_eth_write(ndev
, 0, CNDCR
); /* (write clear) */
2243 return &ndev
->stats
;
2246 /* device close function */
2247 static int sh_eth_close(struct net_device
*ndev
)
2249 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2251 netif_stop_queue(ndev
);
2253 /* Serialise with the interrupt handler and NAPI, then disable
2254 * interrupts. We have to clear the irq_enabled flag first to
2255 * ensure that interrupts won't be re-enabled.
2257 mdp
->irq_enabled
= false;
2258 synchronize_irq(ndev
->irq
);
2259 napi_disable(&mdp
->napi
);
2260 sh_eth_write(ndev
, 0x0000, EESIPR
);
2262 sh_eth_dev_exit(ndev
);
2264 /* PHY Disconnect */
2266 phy_stop(mdp
->phydev
);
2267 phy_disconnect(mdp
->phydev
);
2271 free_irq(ndev
->irq
, ndev
);
2273 /* Free all the skbuffs in the Rx queue. */
2274 sh_eth_ring_free(ndev
);
2276 /* free DMA buffer */
2277 sh_eth_free_dma_buffer(mdp
);
2279 pm_runtime_put_sync(&mdp
->pdev
->dev
);
2286 /* ioctl to device function */
2287 static int sh_eth_do_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2289 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2290 struct phy_device
*phydev
= mdp
->phydev
;
2292 if (!netif_running(ndev
))
2298 return phy_mii_ioctl(phydev
, rq
, cmd
);
2301 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2302 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private
*mdp
,
2305 return sh_eth_tsu_get_offset(mdp
, TSU_POST1
) + (entry
/ 8 * 4);
2308 static u32
sh_eth_tsu_get_post_mask(int entry
)
2310 return 0x0f << (28 - ((entry
% 8) * 4));
2313 static u32
sh_eth_tsu_get_post_bit(struct sh_eth_private
*mdp
, int entry
)
2315 return (0x08 >> (mdp
->port
<< 1)) << (28 - ((entry
% 8) * 4));
2318 static void sh_eth_tsu_enable_cam_entry_post(struct net_device
*ndev
,
2321 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2325 reg_offset
= sh_eth_tsu_get_post_reg_offset(mdp
, entry
);
2326 tmp
= ioread32(reg_offset
);
2327 iowrite32(tmp
| sh_eth_tsu_get_post_bit(mdp
, entry
), reg_offset
);
2330 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device
*ndev
,
2333 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2334 u32 post_mask
, ref_mask
, tmp
;
2337 reg_offset
= sh_eth_tsu_get_post_reg_offset(mdp
, entry
);
2338 post_mask
= sh_eth_tsu_get_post_mask(entry
);
2339 ref_mask
= sh_eth_tsu_get_post_bit(mdp
, entry
) & ~post_mask
;
2341 tmp
= ioread32(reg_offset
);
2342 iowrite32(tmp
& ~post_mask
, reg_offset
);
2344 /* If other port enables, the function returns "true" */
2345 return tmp
& ref_mask
;
2348 static int sh_eth_tsu_busy(struct net_device
*ndev
)
2350 int timeout
= SH_ETH_TSU_TIMEOUT_MS
* 100;
2351 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2353 while ((sh_eth_tsu_read(mdp
, TSU_ADSBSY
) & TSU_ADSBSY_0
)) {
2357 netdev_err(ndev
, "%s: timeout\n", __func__
);
2365 static int sh_eth_tsu_write_entry(struct net_device
*ndev
, void *reg
,
2370 val
= addr
[0] << 24 | addr
[1] << 16 | addr
[2] << 8 | addr
[3];
2371 iowrite32(val
, reg
);
2372 if (sh_eth_tsu_busy(ndev
) < 0)
2375 val
= addr
[4] << 8 | addr
[5];
2376 iowrite32(val
, reg
+ 4);
2377 if (sh_eth_tsu_busy(ndev
) < 0)
2383 static void sh_eth_tsu_read_entry(void *reg
, u8
*addr
)
2387 val
= ioread32(reg
);
2388 addr
[0] = (val
>> 24) & 0xff;
2389 addr
[1] = (val
>> 16) & 0xff;
2390 addr
[2] = (val
>> 8) & 0xff;
2391 addr
[3] = val
& 0xff;
2392 val
= ioread32(reg
+ 4);
2393 addr
[4] = (val
>> 8) & 0xff;
2394 addr
[5] = val
& 0xff;
2398 static int sh_eth_tsu_find_entry(struct net_device
*ndev
, const u8
*addr
)
2400 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2401 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2403 u8 c_addr
[ETH_ALEN
];
2405 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2406 sh_eth_tsu_read_entry(reg_offset
, c_addr
);
2407 if (ether_addr_equal(addr
, c_addr
))
2414 static int sh_eth_tsu_find_empty(struct net_device
*ndev
)
2419 memset(blank
, 0, sizeof(blank
));
2420 entry
= sh_eth_tsu_find_entry(ndev
, blank
);
2421 return (entry
< 0) ? -ENOMEM
: entry
;
2424 static int sh_eth_tsu_disable_cam_entry_table(struct net_device
*ndev
,
2427 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2428 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2432 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) &
2433 ~(1 << (31 - entry
)), TSU_TEN
);
2435 memset(blank
, 0, sizeof(blank
));
2436 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ entry
* 8, blank
);
2442 static int sh_eth_tsu_add_entry(struct net_device
*ndev
, const u8
*addr
)
2444 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2445 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2451 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2453 /* No entry found, create one */
2454 i
= sh_eth_tsu_find_empty(ndev
);
2457 ret
= sh_eth_tsu_write_entry(ndev
, reg_offset
+ i
* 8, addr
);
2461 /* Enable the entry */
2462 sh_eth_tsu_write(mdp
, sh_eth_tsu_read(mdp
, TSU_TEN
) |
2463 (1 << (31 - i
)), TSU_TEN
);
2466 /* Entry found or created, enable POST */
2467 sh_eth_tsu_enable_cam_entry_post(ndev
, i
);
2472 static int sh_eth_tsu_del_entry(struct net_device
*ndev
, const u8
*addr
)
2474 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2480 i
= sh_eth_tsu_find_entry(ndev
, addr
);
2483 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2486 /* Disable the entry if both ports was disabled */
2487 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2495 static int sh_eth_tsu_purge_all(struct net_device
*ndev
)
2497 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2503 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++) {
2504 if (sh_eth_tsu_disable_cam_entry_post(ndev
, i
))
2507 /* Disable the entry if both ports was disabled */
2508 ret
= sh_eth_tsu_disable_cam_entry_table(ndev
, i
);
2516 static void sh_eth_tsu_purge_mcast(struct net_device
*ndev
)
2518 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2520 void *reg_offset
= sh_eth_tsu_get_offset(mdp
, TSU_ADRH0
);
2526 for (i
= 0; i
< SH_ETH_TSU_CAM_ENTRIES
; i
++, reg_offset
+= 8) {
2527 sh_eth_tsu_read_entry(reg_offset
, addr
);
2528 if (is_multicast_ether_addr(addr
))
2529 sh_eth_tsu_del_entry(ndev
, addr
);
2533 /* Update promiscuous flag and multicast filter */
2534 static void sh_eth_set_rx_mode(struct net_device
*ndev
)
2536 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2539 unsigned long flags
;
2541 spin_lock_irqsave(&mdp
->lock
, flags
);
2542 /* Initial condition is MCT = 1, PRM = 0.
2543 * Depending on ndev->flags, set PRM or clear MCT
2545 ecmr_bits
= sh_eth_read(ndev
, ECMR
) & ~ECMR_PRM
;
2547 ecmr_bits
|= ECMR_MCT
;
2549 if (!(ndev
->flags
& IFF_MULTICAST
)) {
2550 sh_eth_tsu_purge_mcast(ndev
);
2553 if (ndev
->flags
& IFF_ALLMULTI
) {
2554 sh_eth_tsu_purge_mcast(ndev
);
2555 ecmr_bits
&= ~ECMR_MCT
;
2559 if (ndev
->flags
& IFF_PROMISC
) {
2560 sh_eth_tsu_purge_all(ndev
);
2561 ecmr_bits
= (ecmr_bits
& ~ECMR_MCT
) | ECMR_PRM
;
2562 } else if (mdp
->cd
->tsu
) {
2563 struct netdev_hw_addr
*ha
;
2564 netdev_for_each_mc_addr(ha
, ndev
) {
2565 if (mcast_all
&& is_multicast_ether_addr(ha
->addr
))
2568 if (sh_eth_tsu_add_entry(ndev
, ha
->addr
) < 0) {
2570 sh_eth_tsu_purge_mcast(ndev
);
2571 ecmr_bits
&= ~ECMR_MCT
;
2578 /* update the ethernet mode */
2579 sh_eth_write(ndev
, ecmr_bits
, ECMR
);
2581 spin_unlock_irqrestore(&mdp
->lock
, flags
);
2584 static int sh_eth_get_vtag_index(struct sh_eth_private
*mdp
)
2592 static int sh_eth_vlan_rx_add_vid(struct net_device
*ndev
,
2593 __be16 proto
, u16 vid
)
2595 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2596 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2598 if (unlikely(!mdp
->cd
->tsu
))
2601 /* No filtering if vid = 0 */
2605 mdp
->vlan_num_ids
++;
2607 /* The controller has one VLAN tag HW filter. So, if the filter is
2608 * already enabled, the driver disables it and the filte
2610 if (mdp
->vlan_num_ids
> 1) {
2611 /* disable VLAN filter */
2612 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
2616 sh_eth_tsu_write(mdp
, TSU_VTAG_ENABLE
| (vid
& TSU_VTAG_VID_MASK
),
2622 static int sh_eth_vlan_rx_kill_vid(struct net_device
*ndev
,
2623 __be16 proto
, u16 vid
)
2625 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
2626 int vtag_reg_index
= sh_eth_get_vtag_index(mdp
);
2628 if (unlikely(!mdp
->cd
->tsu
))
2631 /* No filtering if vid = 0 */
2635 mdp
->vlan_num_ids
--;
2636 sh_eth_tsu_write(mdp
, 0, vtag_reg_index
);
2641 /* SuperH's TSU register init function */
2642 static void sh_eth_tsu_init(struct sh_eth_private
*mdp
)
2644 if (sh_eth_is_rz_fast_ether(mdp
)) {
2645 sh_eth_tsu_write(mdp
, 0, TSU_TEN
); /* Disable all CAM entry */
2649 sh_eth_tsu_write(mdp
, 0, TSU_FWEN0
); /* Disable forward(0->1) */
2650 sh_eth_tsu_write(mdp
, 0, TSU_FWEN1
); /* Disable forward(1->0) */
2651 sh_eth_tsu_write(mdp
, 0, TSU_FCM
); /* forward fifo 3k-3k */
2652 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL0
);
2653 sh_eth_tsu_write(mdp
, 0xc, TSU_BSYSL1
);
2654 sh_eth_tsu_write(mdp
, 0, TSU_PRISL0
);
2655 sh_eth_tsu_write(mdp
, 0, TSU_PRISL1
);
2656 sh_eth_tsu_write(mdp
, 0, TSU_FWSL0
);
2657 sh_eth_tsu_write(mdp
, 0, TSU_FWSL1
);
2658 sh_eth_tsu_write(mdp
, TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
, TSU_FWSLC
);
2659 if (sh_eth_is_gether(mdp
)) {
2660 sh_eth_tsu_write(mdp
, 0, TSU_QTAG0
); /* Disable QTAG(0->1) */
2661 sh_eth_tsu_write(mdp
, 0, TSU_QTAG1
); /* Disable QTAG(1->0) */
2663 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM0
); /* Disable QTAG(0->1) */
2664 sh_eth_tsu_write(mdp
, 0, TSU_QTAGM1
); /* Disable QTAG(1->0) */
2666 sh_eth_tsu_write(mdp
, 0, TSU_FWSR
); /* all interrupt status clear */
2667 sh_eth_tsu_write(mdp
, 0, TSU_FWINMK
); /* Disable all interrupt */
2668 sh_eth_tsu_write(mdp
, 0, TSU_TEN
); /* Disable all CAM entry */
2669 sh_eth_tsu_write(mdp
, 0, TSU_POST1
); /* Disable CAM entry [ 0- 7] */
2670 sh_eth_tsu_write(mdp
, 0, TSU_POST2
); /* Disable CAM entry [ 8-15] */
2671 sh_eth_tsu_write(mdp
, 0, TSU_POST3
); /* Disable CAM entry [16-23] */
2672 sh_eth_tsu_write(mdp
, 0, TSU_POST4
); /* Disable CAM entry [24-31] */
2675 /* MDIO bus release function */
2676 static int sh_mdio_release(struct sh_eth_private
*mdp
)
2678 /* unregister mdio bus */
2679 mdiobus_unregister(mdp
->mii_bus
);
2681 /* free bitbang info */
2682 free_mdio_bitbang(mdp
->mii_bus
);
2687 /* MDIO bus init function */
2688 static int sh_mdio_init(struct sh_eth_private
*mdp
,
2689 struct sh_eth_plat_data
*pd
)
2692 struct bb_info
*bitbang
;
2693 struct platform_device
*pdev
= mdp
->pdev
;
2694 struct device
*dev
= &mdp
->pdev
->dev
;
2696 /* create bit control struct for PHY */
2697 bitbang
= devm_kzalloc(dev
, sizeof(struct bb_info
), GFP_KERNEL
);
2702 bitbang
->addr
= mdp
->addr
+ mdp
->reg_offset
[PIR
];
2703 bitbang
->set_gate
= pd
->set_mdio_gate
;
2704 bitbang
->mdi_msk
= PIR_MDI
;
2705 bitbang
->mdo_msk
= PIR_MDO
;
2706 bitbang
->mmd_msk
= PIR_MMD
;
2707 bitbang
->mdc_msk
= PIR_MDC
;
2708 bitbang
->ctrl
.ops
= &bb_ops
;
2710 /* MII controller setting */
2711 mdp
->mii_bus
= alloc_mdio_bitbang(&bitbang
->ctrl
);
2715 /* Hook up MII support for ethtool */
2716 mdp
->mii_bus
->name
= "sh_mii";
2717 mdp
->mii_bus
->parent
= dev
;
2718 snprintf(mdp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2719 pdev
->name
, pdev
->id
);
2722 mdp
->mii_bus
->irq
= devm_kmalloc_array(dev
, PHY_MAX_ADDR
, sizeof(int),
2724 if (!mdp
->mii_bus
->irq
) {
2729 /* register MDIO bus */
2731 ret
= of_mdiobus_register(mdp
->mii_bus
, dev
->of_node
);
2733 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
2734 mdp
->mii_bus
->irq
[i
] = PHY_POLL
;
2735 if (pd
->phy_irq
> 0)
2736 mdp
->mii_bus
->irq
[pd
->phy
] = pd
->phy_irq
;
2738 ret
= mdiobus_register(mdp
->mii_bus
);
2747 free_mdio_bitbang(mdp
->mii_bus
);
2751 static const u16
*sh_eth_get_register_offset(int register_type
)
2753 const u16
*reg_offset
= NULL
;
2755 switch (register_type
) {
2756 case SH_ETH_REG_GIGABIT
:
2757 reg_offset
= sh_eth_offset_gigabit
;
2759 case SH_ETH_REG_FAST_RZ
:
2760 reg_offset
= sh_eth_offset_fast_rz
;
2762 case SH_ETH_REG_FAST_RCAR
:
2763 reg_offset
= sh_eth_offset_fast_rcar
;
2765 case SH_ETH_REG_FAST_SH4
:
2766 reg_offset
= sh_eth_offset_fast_sh4
;
2768 case SH_ETH_REG_FAST_SH3_SH2
:
2769 reg_offset
= sh_eth_offset_fast_sh3_sh2
;
2778 static const struct net_device_ops sh_eth_netdev_ops
= {
2779 .ndo_open
= sh_eth_open
,
2780 .ndo_stop
= sh_eth_close
,
2781 .ndo_start_xmit
= sh_eth_start_xmit
,
2782 .ndo_get_stats
= sh_eth_get_stats
,
2783 .ndo_set_rx_mode
= sh_eth_set_rx_mode
,
2784 .ndo_tx_timeout
= sh_eth_tx_timeout
,
2785 .ndo_do_ioctl
= sh_eth_do_ioctl
,
2786 .ndo_validate_addr
= eth_validate_addr
,
2787 .ndo_set_mac_address
= eth_mac_addr
,
2788 .ndo_change_mtu
= eth_change_mtu
,
2791 static const struct net_device_ops sh_eth_netdev_ops_tsu
= {
2792 .ndo_open
= sh_eth_open
,
2793 .ndo_stop
= sh_eth_close
,
2794 .ndo_start_xmit
= sh_eth_start_xmit
,
2795 .ndo_get_stats
= sh_eth_get_stats
,
2796 .ndo_set_rx_mode
= sh_eth_set_rx_mode
,
2797 .ndo_vlan_rx_add_vid
= sh_eth_vlan_rx_add_vid
,
2798 .ndo_vlan_rx_kill_vid
= sh_eth_vlan_rx_kill_vid
,
2799 .ndo_tx_timeout
= sh_eth_tx_timeout
,
2800 .ndo_do_ioctl
= sh_eth_do_ioctl
,
2801 .ndo_validate_addr
= eth_validate_addr
,
2802 .ndo_set_mac_address
= eth_mac_addr
,
2803 .ndo_change_mtu
= eth_change_mtu
,
2807 static struct sh_eth_plat_data
*sh_eth_parse_dt(struct device
*dev
)
2809 struct device_node
*np
= dev
->of_node
;
2810 struct sh_eth_plat_data
*pdata
;
2811 const char *mac_addr
;
2813 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
2817 pdata
->phy_interface
= of_get_phy_mode(np
);
2819 mac_addr
= of_get_mac_address(np
);
2821 memcpy(pdata
->mac_addr
, mac_addr
, ETH_ALEN
);
2823 pdata
->no_ether_link
=
2824 of_property_read_bool(np
, "renesas,no-ether-link");
2825 pdata
->ether_link_active_low
=
2826 of_property_read_bool(np
, "renesas,ether-link-active-low");
2831 static const struct of_device_id sh_eth_match_table
[] = {
2832 { .compatible
= "renesas,gether-r8a7740", .data
= &r8a7740_data
},
2833 { .compatible
= "renesas,ether-r8a7778", .data
= &r8a777x_data
},
2834 { .compatible
= "renesas,ether-r8a7779", .data
= &r8a777x_data
},
2835 { .compatible
= "renesas,ether-r8a7790", .data
= &r8a779x_data
},
2836 { .compatible
= "renesas,ether-r8a7791", .data
= &r8a779x_data
},
2837 { .compatible
= "renesas,ether-r8a7793", .data
= &r8a779x_data
},
2838 { .compatible
= "renesas,ether-r8a7794", .data
= &r8a779x_data
},
2839 { .compatible
= "renesas,ether-r7s72100", .data
= &r7s72100_data
},
2842 MODULE_DEVICE_TABLE(of
, sh_eth_match_table
);
2844 static inline struct sh_eth_plat_data
*sh_eth_parse_dt(struct device
*dev
)
2850 static int sh_eth_drv_probe(struct platform_device
*pdev
)
2853 struct resource
*res
;
2854 struct net_device
*ndev
= NULL
;
2855 struct sh_eth_private
*mdp
= NULL
;
2856 struct sh_eth_plat_data
*pd
= dev_get_platdata(&pdev
->dev
);
2857 const struct platform_device_id
*id
= platform_get_device_id(pdev
);
2860 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2862 ndev
= alloc_etherdev(sizeof(struct sh_eth_private
));
2866 pm_runtime_enable(&pdev
->dev
);
2867 pm_runtime_get_sync(&pdev
->dev
);
2874 ret
= platform_get_irq(pdev
, 0);
2881 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2883 mdp
= netdev_priv(ndev
);
2884 mdp
->num_tx_ring
= TX_RING_SIZE
;
2885 mdp
->num_rx_ring
= RX_RING_SIZE
;
2886 mdp
->addr
= devm_ioremap_resource(&pdev
->dev
, res
);
2887 if (IS_ERR(mdp
->addr
)) {
2888 ret
= PTR_ERR(mdp
->addr
);
2892 ndev
->base_addr
= res
->start
;
2894 spin_lock_init(&mdp
->lock
);
2897 if (pdev
->dev
.of_node
)
2898 pd
= sh_eth_parse_dt(&pdev
->dev
);
2900 dev_err(&pdev
->dev
, "no platform data\n");
2906 mdp
->phy_id
= pd
->phy
;
2907 mdp
->phy_interface
= pd
->phy_interface
;
2909 mdp
->edmac_endian
= pd
->edmac_endian
;
2910 mdp
->no_ether_link
= pd
->no_ether_link
;
2911 mdp
->ether_link_active_low
= pd
->ether_link_active_low
;
2915 mdp
->cd
= (struct sh_eth_cpu_data
*)id
->driver_data
;
2917 const struct of_device_id
*match
;
2919 match
= of_match_device(of_match_ptr(sh_eth_match_table
),
2921 mdp
->cd
= (struct sh_eth_cpu_data
*)match
->data
;
2923 mdp
->reg_offset
= sh_eth_get_register_offset(mdp
->cd
->register_type
);
2924 if (!mdp
->reg_offset
) {
2925 dev_err(&pdev
->dev
, "Unknown register type (%d)\n",
2926 mdp
->cd
->register_type
);
2930 sh_eth_set_default_cpu_data(mdp
->cd
);
2934 ndev
->netdev_ops
= &sh_eth_netdev_ops_tsu
;
2936 ndev
->netdev_ops
= &sh_eth_netdev_ops
;
2937 ndev
->ethtool_ops
= &sh_eth_ethtool_ops
;
2938 ndev
->watchdog_timeo
= TX_TIMEOUT
;
2940 /* debug message level */
2941 mdp
->msg_enable
= SH_ETH_DEF_MSG_ENABLE
;
2943 /* read and set MAC address */
2944 read_mac_address(ndev
, pd
->mac_addr
);
2945 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
2946 dev_warn(&pdev
->dev
,
2947 "no valid MAC address supplied, using a random one.\n");
2948 eth_hw_addr_random(ndev
);
2951 /* ioremap the TSU registers */
2953 struct resource
*rtsu
;
2954 rtsu
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2955 mdp
->tsu_addr
= devm_ioremap_resource(&pdev
->dev
, rtsu
);
2956 if (IS_ERR(mdp
->tsu_addr
)) {
2957 ret
= PTR_ERR(mdp
->tsu_addr
);
2960 mdp
->port
= devno
% 2;
2961 ndev
->features
= NETIF_F_HW_VLAN_CTAG_FILTER
;
2964 /* initialize first or needed device */
2965 if (!devno
|| pd
->needs_init
) {
2966 if (mdp
->cd
->chip_reset
)
2967 mdp
->cd
->chip_reset(ndev
);
2970 /* TSU init (Init only)*/
2971 sh_eth_tsu_init(mdp
);
2975 if (mdp
->cd
->rmiimode
)
2976 sh_eth_write(ndev
, 0x1, RMIIMODE
);
2979 ret
= sh_mdio_init(mdp
, pd
);
2981 dev_err(&ndev
->dev
, "failed to initialise MDIO\n");
2985 netif_napi_add(ndev
, &mdp
->napi
, sh_eth_poll
, 64);
2987 /* network device register */
2988 ret
= register_netdev(ndev
);
2992 /* print device information */
2993 netdev_info(ndev
, "Base address at 0x%x, %pM, IRQ %d.\n",
2994 (u32
)ndev
->base_addr
, ndev
->dev_addr
, ndev
->irq
);
2996 pm_runtime_put(&pdev
->dev
);
2997 platform_set_drvdata(pdev
, ndev
);
3002 netif_napi_del(&mdp
->napi
);
3003 sh_mdio_release(mdp
);
3010 pm_runtime_put(&pdev
->dev
);
3011 pm_runtime_disable(&pdev
->dev
);
3015 static int sh_eth_drv_remove(struct platform_device
*pdev
)
3017 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3018 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
3020 unregister_netdev(ndev
);
3021 netif_napi_del(&mdp
->napi
);
3022 sh_mdio_release(mdp
);
3023 pm_runtime_disable(&pdev
->dev
);
3030 #ifdef CONFIG_PM_SLEEP
3031 static int sh_eth_suspend(struct device
*dev
)
3033 struct net_device
*ndev
= dev_get_drvdata(dev
);
3036 if (netif_running(ndev
)) {
3037 netif_device_detach(ndev
);
3038 ret
= sh_eth_close(ndev
);
3044 static int sh_eth_resume(struct device
*dev
)
3046 struct net_device
*ndev
= dev_get_drvdata(dev
);
3049 if (netif_running(ndev
)) {
3050 ret
= sh_eth_open(ndev
);
3053 netif_device_attach(ndev
);
3060 static int sh_eth_runtime_nop(struct device
*dev
)
3062 /* Runtime PM callback shared between ->runtime_suspend()
3063 * and ->runtime_resume(). Simply returns success.
3065 * This driver re-initializes all registers after
3066 * pm_runtime_get_sync() anyway so there is no need
3067 * to save and restore registers here.
3072 static const struct dev_pm_ops sh_eth_dev_pm_ops
= {
3073 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend
, sh_eth_resume
)
3074 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop
, sh_eth_runtime_nop
, NULL
)
3076 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3078 #define SH_ETH_PM_OPS NULL
3081 static struct platform_device_id sh_eth_id_table
[] = {
3082 { "sh7619-ether", (kernel_ulong_t
)&sh7619_data
},
3083 { "sh771x-ether", (kernel_ulong_t
)&sh771x_data
},
3084 { "sh7724-ether", (kernel_ulong_t
)&sh7724_data
},
3085 { "sh7734-gether", (kernel_ulong_t
)&sh7734_data
},
3086 { "sh7757-ether", (kernel_ulong_t
)&sh7757_data
},
3087 { "sh7757-gether", (kernel_ulong_t
)&sh7757_data_giga
},
3088 { "sh7763-gether", (kernel_ulong_t
)&sh7763_data
},
3089 { "r7s72100-ether", (kernel_ulong_t
)&r7s72100_data
},
3090 { "r8a7740-gether", (kernel_ulong_t
)&r8a7740_data
},
3091 { "r8a777x-ether", (kernel_ulong_t
)&r8a777x_data
},
3092 { "r8a7790-ether", (kernel_ulong_t
)&r8a779x_data
},
3093 { "r8a7791-ether", (kernel_ulong_t
)&r8a779x_data
},
3094 { "r8a7793-ether", (kernel_ulong_t
)&r8a779x_data
},
3095 { "r8a7794-ether", (kernel_ulong_t
)&r8a779x_data
},
3098 MODULE_DEVICE_TABLE(platform
, sh_eth_id_table
);
3100 static struct platform_driver sh_eth_driver
= {
3101 .probe
= sh_eth_drv_probe
,
3102 .remove
= sh_eth_drv_remove
,
3103 .id_table
= sh_eth_id_table
,
3106 .pm
= SH_ETH_PM_OPS
,
3107 .of_match_table
= of_match_ptr(sh_eth_match_table
),
3111 module_platform_driver(sh_eth_driver
);
3113 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3114 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3115 MODULE_LICENSE("GPL v2");