2 * SuperH Ethernet device driver
4 * Copyright (C) 2006,2007 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
23 #include <linux/version.h>
24 #include <linux/init.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/etherdevice.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/mdio-bitbang.h>
30 #include <linux/netdevice.h>
31 #include <linux/phy.h>
32 #include <linux/cache.h>
38 * Program the hardware MAC address from dev->dev_addr.
40 static void update_mac_address(struct net_device
*ndev
)
42 u32 ioaddr
= ndev
->base_addr
;
44 ctrl_outl((ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
45 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]),
47 ctrl_outl((ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]),
52 * Get MAC address from SuperH MAC address register
54 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
55 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
56 * When you want use this device, you must set MAC address in bootloader.
59 static void read_mac_address(struct net_device
*ndev
)
61 u32 ioaddr
= ndev
->base_addr
;
63 ndev
->dev_addr
[0] = (ctrl_inl(ioaddr
+ MAHR
) >> 24);
64 ndev
->dev_addr
[1] = (ctrl_inl(ioaddr
+ MAHR
) >> 16) & 0xFF;
65 ndev
->dev_addr
[2] = (ctrl_inl(ioaddr
+ MAHR
) >> 8) & 0xFF;
66 ndev
->dev_addr
[3] = (ctrl_inl(ioaddr
+ MAHR
) & 0xFF);
67 ndev
->dev_addr
[4] = (ctrl_inl(ioaddr
+ MALR
) >> 8) & 0xFF;
68 ndev
->dev_addr
[5] = (ctrl_inl(ioaddr
+ MALR
) & 0xFF);
72 struct mdiobb_ctrl ctrl
;
81 static void bb_set(u32 addr
, u32 msk
)
83 ctrl_outl(ctrl_inl(addr
) | msk
, addr
);
87 static void bb_clr(u32 addr
, u32 msk
)
89 ctrl_outl((ctrl_inl(addr
) & ~msk
), addr
);
93 static int bb_read(u32 addr
, u32 msk
)
95 return (ctrl_inl(addr
) & msk
) != 0;
98 /* Data I/O pin control */
99 static void sh_mmd_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
101 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
103 bb_set(bitbang
->addr
, bitbang
->mmd_msk
);
105 bb_clr(bitbang
->addr
, bitbang
->mmd_msk
);
109 static void sh_set_mdio(struct mdiobb_ctrl
*ctrl
, int bit
)
111 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
114 bb_set(bitbang
->addr
, bitbang
->mdo_msk
);
116 bb_clr(bitbang
->addr
, bitbang
->mdo_msk
);
120 static int sh_get_mdio(struct mdiobb_ctrl
*ctrl
)
122 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
123 return bb_read(bitbang
->addr
, bitbang
->mdi_msk
);
126 /* MDC pin control */
127 static void sh_mdc_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
129 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
132 bb_set(bitbang
->addr
, bitbang
->mdc_msk
);
134 bb_clr(bitbang
->addr
, bitbang
->mdc_msk
);
137 /* mdio bus control struct */
138 static struct mdiobb_ops bb_ops
= {
139 .owner
= THIS_MODULE
,
140 .set_mdc
= sh_mdc_ctrl
,
141 .set_mdio_dir
= sh_mmd_ctrl
,
142 .set_mdio_data
= sh_set_mdio
,
143 .get_mdio_data
= sh_get_mdio
,
146 static void sh_eth_reset(struct net_device
*ndev
)
148 u32 ioaddr
= ndev
->base_addr
;
150 ctrl_outl(ctrl_inl(ioaddr
+ EDMR
) | EDMR_SRST
, ioaddr
+ EDMR
);
152 ctrl_outl(ctrl_inl(ioaddr
+ EDMR
) & ~EDMR_SRST
, ioaddr
+ EDMR
);
155 /* free skb and descriptor buffer */
156 static void sh_eth_ring_free(struct net_device
*ndev
)
158 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
161 /* Free Rx skb ringbuffer */
162 if (mdp
->rx_skbuff
) {
163 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
164 if (mdp
->rx_skbuff
[i
])
165 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
168 kfree(mdp
->rx_skbuff
);
170 /* Free Tx skb ringbuffer */
171 if (mdp
->tx_skbuff
) {
172 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
173 if (mdp
->tx_skbuff
[i
])
174 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
177 kfree(mdp
->tx_skbuff
);
180 /* format skb and descriptor buffer */
181 static void sh_eth_ring_format(struct net_device
*ndev
)
183 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
186 struct sh_eth_rxdesc
*rxdesc
= NULL
;
187 struct sh_eth_txdesc
*txdesc
= NULL
;
188 int rx_ringsize
= sizeof(*rxdesc
) * RX_RING_SIZE
;
189 int tx_ringsize
= sizeof(*txdesc
) * TX_RING_SIZE
;
191 mdp
->cur_rx
= mdp
->cur_tx
= 0;
192 mdp
->dirty_rx
= mdp
->dirty_tx
= 0;
194 memset(mdp
->rx_ring
, 0, rx_ringsize
);
196 /* build Rx ring buffer */
197 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
199 mdp
->rx_skbuff
[i
] = NULL
;
200 skb
= dev_alloc_skb(mdp
->rx_buf_sz
);
201 mdp
->rx_skbuff
[i
] = skb
;
204 skb
->dev
= ndev
; /* Mark as being used by this device. */
205 skb_reserve(skb
, RX_OFFSET
);
208 rxdesc
= &mdp
->rx_ring
[i
];
209 rxdesc
->addr
= (u32
)skb
->data
& ~0x3UL
;
210 rxdesc
->status
= cpu_to_le32(RD_RACT
| RD_RFP
);
212 /* The size of the buffer is 16 byte boundary. */
213 rxdesc
->buffer_length
= (mdp
->rx_buf_sz
+ 16) & ~0x0F;
216 mdp
->dirty_rx
= (u32
) (i
- RX_RING_SIZE
);
218 /* Mark the last entry as wrapping the ring. */
219 rxdesc
->status
|= cpu_to_le32(RC_RDEL
);
221 memset(mdp
->tx_ring
, 0, tx_ringsize
);
223 /* build Tx ring buffer */
224 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
225 mdp
->tx_skbuff
[i
] = NULL
;
226 txdesc
= &mdp
->tx_ring
[i
];
227 txdesc
->status
= cpu_to_le32(TD_TFP
);
228 txdesc
->buffer_length
= 0;
231 txdesc
->status
|= cpu_to_le32(TD_TDLE
);
234 /* Get skb and descriptor buffer */
235 static int sh_eth_ring_init(struct net_device
*ndev
)
237 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
238 int rx_ringsize
, tx_ringsize
, ret
= 0;
241 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
242 * card needs room to do 8 byte alignment, +2 so we can reserve
243 * the first 2 bytes, and +16 gets room for the status word from the
246 mdp
->rx_buf_sz
= (ndev
->mtu
<= 1492 ? PKT_BUF_SZ
:
247 (((ndev
->mtu
+ 26 + 7) & ~7) + 2 + 16));
249 /* Allocate RX and TX skb rings */
250 mdp
->rx_skbuff
= kmalloc(sizeof(*mdp
->rx_skbuff
) * RX_RING_SIZE
,
252 if (!mdp
->rx_skbuff
) {
253 printk(KERN_ERR
"%s: Cannot allocate Rx skb\n", ndev
->name
);
258 mdp
->tx_skbuff
= kmalloc(sizeof(*mdp
->tx_skbuff
) * TX_RING_SIZE
,
260 if (!mdp
->tx_skbuff
) {
261 printk(KERN_ERR
"%s: Cannot allocate Tx skb\n", ndev
->name
);
266 /* Allocate all Rx descriptors. */
267 rx_ringsize
= sizeof(struct sh_eth_rxdesc
) * RX_RING_SIZE
;
268 mdp
->rx_ring
= dma_alloc_coherent(NULL
, rx_ringsize
, &mdp
->rx_desc_dma
,
272 printk(KERN_ERR
"%s: Cannot allocate Rx Ring (size %d bytes)\n",
273 ndev
->name
, rx_ringsize
);
280 /* Allocate all Tx descriptors. */
281 tx_ringsize
= sizeof(struct sh_eth_txdesc
) * TX_RING_SIZE
;
282 mdp
->tx_ring
= dma_alloc_coherent(NULL
, tx_ringsize
, &mdp
->tx_desc_dma
,
285 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
286 ndev
->name
, tx_ringsize
);
293 /* free DMA buffer */
294 dma_free_coherent(NULL
, rx_ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
297 /* Free Rx and Tx skb ring buffer */
298 sh_eth_ring_free(ndev
);
303 static int sh_eth_dev_init(struct net_device
*ndev
)
306 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
307 u32 ioaddr
= ndev
->base_addr
;
308 u_int32_t rx_int_var
, tx_int_var
;
314 ctrl_outl(RPADIR_PADS1
, ioaddr
+ RPADIR
); /* SH7712-DMA-RX-PAD2 */
316 /* all sh_eth int mask */
317 ctrl_outl(0, ioaddr
+ EESIPR
);
320 ctrl_outl(0, ioaddr
+ EDMR
); /* Endian change */
322 ctrl_outl((FIFO_SIZE_T
| FIFO_SIZE_R
), ioaddr
+ FDR
);
323 ctrl_outl(0, ioaddr
+ TFTR
);
325 ctrl_outl(0, ioaddr
+ RMCR
);
327 rx_int_var
= mdp
->rx_int_var
= DESC_I_RINT8
| DESC_I_RINT5
;
328 tx_int_var
= mdp
->tx_int_var
= DESC_I_TINT2
;
329 ctrl_outl(rx_int_var
| tx_int_var
, ioaddr
+ TRSCER
);
331 ctrl_outl((FIFO_F_D_RFF
| FIFO_F_D_RFD
), ioaddr
+ FCFTR
);
332 ctrl_outl(0, ioaddr
+ TRIMD
);
334 /* Descriptor format */
335 sh_eth_ring_format(ndev
);
337 ctrl_outl((u32
)mdp
->rx_ring
, ioaddr
+ RDLAR
);
338 ctrl_outl((u32
)mdp
->tx_ring
, ioaddr
+ TDLAR
);
340 ctrl_outl(ctrl_inl(ioaddr
+ EESR
), ioaddr
+ EESR
);
341 ctrl_outl((DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff), ioaddr
+ EESIPR
);
343 /* PAUSE Prohibition */
344 val
= (ctrl_inl(ioaddr
+ ECMR
) & ECMR_DM
) |
345 ECMR_ZPF
| (mdp
->duplex
? ECMR_DM
: 0) | ECMR_TE
| ECMR_RE
;
347 ctrl_outl(val
, ioaddr
+ ECMR
);
348 ctrl_outl(ECSR_BRCRX
| ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
|
349 ECSIPR_MPDIP
, ioaddr
+ ECSR
);
350 ctrl_outl(ECSIPR_BRCRXIP
| ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
|
351 ECSIPR_ICDIP
| ECSIPR_MPDIP
, ioaddr
+ ECSIPR
);
353 /* Set MAC address */
354 update_mac_address(ndev
);
357 #if defined(CONFIG_CPU_SUBTYPE_SH7710)
358 ctrl_outl(APR_AP
, ioaddr
+ APR
);
359 ctrl_outl(MPR_MP
, ioaddr
+ MPR
);
360 ctrl_outl(TPAUSER_UNLIMITED
, ioaddr
+ TPAUSER
);
361 ctrl_outl(BCFR_UNLIMITED
, ioaddr
+ BCFR
);
363 /* Setting the Rx mode will start the Rx process. */
364 ctrl_outl(EDRRR_R
, ioaddr
+ EDRRR
);
366 netif_start_queue(ndev
);
371 /* free Tx skb function */
372 static int sh_eth_txfree(struct net_device
*ndev
)
374 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
375 struct sh_eth_txdesc
*txdesc
;
379 for (; mdp
->cur_tx
- mdp
->dirty_tx
> 0; mdp
->dirty_tx
++) {
380 entry
= mdp
->dirty_tx
% TX_RING_SIZE
;
381 txdesc
= &mdp
->tx_ring
[entry
];
382 if (txdesc
->status
& cpu_to_le32(TD_TACT
))
384 /* Free the original skb. */
385 if (mdp
->tx_skbuff
[entry
]) {
386 dev_kfree_skb_irq(mdp
->tx_skbuff
[entry
]);
387 mdp
->tx_skbuff
[entry
] = NULL
;
390 txdesc
->status
= cpu_to_le32(TD_TFP
);
391 if (entry
>= TX_RING_SIZE
- 1)
392 txdesc
->status
|= cpu_to_le32(TD_TDLE
);
394 mdp
->stats
.tx_packets
++;
395 mdp
->stats
.tx_bytes
+= txdesc
->buffer_length
;
400 /* Packet receive function */
401 static int sh_eth_rx(struct net_device
*ndev
)
403 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
404 struct sh_eth_rxdesc
*rxdesc
;
406 int entry
= mdp
->cur_rx
% RX_RING_SIZE
;
407 int boguscnt
= (mdp
->dirty_rx
+ RX_RING_SIZE
) - mdp
->cur_rx
;
412 rxdesc
= &mdp
->rx_ring
[entry
];
413 while (!(rxdesc
->status
& cpu_to_le32(RD_RACT
))) {
414 desc_status
= le32_to_cpu(rxdesc
->status
);
415 pkt_len
= rxdesc
->frame_length
;
420 if (!(desc_status
& RDFEND
))
421 mdp
->stats
.rx_length_errors
++;
423 if (desc_status
& (RD_RFS1
| RD_RFS2
| RD_RFS3
| RD_RFS4
|
424 RD_RFS5
| RD_RFS6
| RD_RFS10
)) {
425 mdp
->stats
.rx_errors
++;
426 if (desc_status
& RD_RFS1
)
427 mdp
->stats
.rx_crc_errors
++;
428 if (desc_status
& RD_RFS2
)
429 mdp
->stats
.rx_frame_errors
++;
430 if (desc_status
& RD_RFS3
)
431 mdp
->stats
.rx_length_errors
++;
432 if (desc_status
& RD_RFS4
)
433 mdp
->stats
.rx_length_errors
++;
434 if (desc_status
& RD_RFS6
)
435 mdp
->stats
.rx_missed_errors
++;
436 if (desc_status
& RD_RFS10
)
437 mdp
->stats
.rx_over_errors
++;
439 swaps((char *)(rxdesc
->addr
& ~0x3), pkt_len
+ 2);
440 skb
= mdp
->rx_skbuff
[entry
];
441 mdp
->rx_skbuff
[entry
] = NULL
;
442 skb_put(skb
, pkt_len
);
443 skb
->protocol
= eth_type_trans(skb
, ndev
);
445 ndev
->last_rx
= jiffies
;
446 mdp
->stats
.rx_packets
++;
447 mdp
->stats
.rx_bytes
+= pkt_len
;
449 rxdesc
->status
|= cpu_to_le32(RD_RACT
);
450 entry
= (++mdp
->cur_rx
) % RX_RING_SIZE
;
453 /* Refill the Rx ring buffers. */
454 for (; mdp
->cur_rx
- mdp
->dirty_rx
> 0; mdp
->dirty_rx
++) {
455 entry
= mdp
->dirty_rx
% RX_RING_SIZE
;
456 rxdesc
= &mdp
->rx_ring
[entry
];
457 if (mdp
->rx_skbuff
[entry
] == NULL
) {
458 skb
= dev_alloc_skb(mdp
->rx_buf_sz
);
459 mdp
->rx_skbuff
[entry
] = skb
;
461 break; /* Better luck next round. */
463 skb_reserve(skb
, RX_OFFSET
);
464 rxdesc
->addr
= (u32
)skb
->data
& ~0x3UL
;
466 /* The size of the buffer is 16 byte boundary. */
467 rxdesc
->buffer_length
= (mdp
->rx_buf_sz
+ 16) & ~0x0F;
468 if (entry
>= RX_RING_SIZE
- 1)
470 cpu_to_le32(RD_RACT
| RD_RFP
| RC_RDEL
);
473 cpu_to_le32(RD_RACT
| RD_RFP
);
476 /* Restart Rx engine if stopped. */
477 /* If we don't need to check status, don't. -KDU */
478 ctrl_outl(EDRRR_R
, ndev
->base_addr
+ EDRRR
);
483 /* error control function */
484 static void sh_eth_error(struct net_device
*ndev
, int intr_status
)
486 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
487 u32 ioaddr
= ndev
->base_addr
;
490 if (intr_status
& EESR_ECI
) {
491 felic_stat
= ctrl_inl(ioaddr
+ ECSR
);
492 ctrl_outl(felic_stat
, ioaddr
+ ECSR
); /* clear int */
493 if (felic_stat
& ECSR_ICD
)
494 mdp
->stats
.tx_carrier_errors
++;
495 if (felic_stat
& ECSR_LCHNG
) {
497 u32 link_stat
= (ctrl_inl(ioaddr
+ PSR
));
498 if (!(link_stat
& PHY_ST_LINK
)) {
499 /* Link Down : disable tx and rx */
500 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) &
501 ~(ECMR_RE
| ECMR_TE
), ioaddr
+ ECMR
);
504 ctrl_outl(ctrl_inl(ioaddr
+ EESIPR
) &
505 ~DMAC_M_ECI
, ioaddr
+ EESIPR
);
507 ctrl_outl(ctrl_inl(ioaddr
+ ECSR
),
509 ctrl_outl(ctrl_inl(ioaddr
+ EESIPR
) |
510 DMAC_M_ECI
, ioaddr
+ EESIPR
);
511 /* enable tx and rx */
512 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) |
513 (ECMR_RE
| ECMR_TE
), ioaddr
+ ECMR
);
518 if (intr_status
& EESR_TWB
) {
519 /* Write buck end. unused write back interrupt */
520 if (intr_status
& EESR_TABT
) /* Transmit Abort int */
521 mdp
->stats
.tx_aborted_errors
++;
524 if (intr_status
& EESR_RABT
) {
525 /* Receive Abort int */
526 if (intr_status
& EESR_RFRMER
) {
527 /* Receive Frame Overflow int */
528 mdp
->stats
.rx_frame_errors
++;
529 printk(KERN_ERR
"Receive Frame Overflow\n");
533 if (intr_status
& EESR_ADE
) {
534 if (intr_status
& EESR_TDE
) {
535 if (intr_status
& EESR_TFE
)
536 mdp
->stats
.tx_fifo_errors
++;
540 if (intr_status
& EESR_RDE
) {
541 /* Receive Descriptor Empty int */
542 mdp
->stats
.rx_over_errors
++;
544 if (ctrl_inl(ioaddr
+ EDRRR
) ^ EDRRR_R
)
545 ctrl_outl(EDRRR_R
, ioaddr
+ EDRRR
);
546 printk(KERN_ERR
"Receive Descriptor Empty\n");
548 if (intr_status
& EESR_RFE
) {
549 /* Receive FIFO Overflow int */
550 mdp
->stats
.rx_fifo_errors
++;
551 printk(KERN_ERR
"Receive FIFO Overflow\n");
554 (EESR_TWB
| EESR_TABT
| EESR_ADE
| EESR_TDE
| EESR_TFE
)) {
556 u32 edtrr
= ctrl_inl(ndev
->base_addr
+ EDTRR
);
558 printk(KERN_ERR
"%s:TX error. status=%8.8x cur_tx=%8.8x ",
559 ndev
->name
, intr_status
, mdp
->cur_tx
);
560 printk(KERN_ERR
"dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
561 mdp
->dirty_tx
, (u32
) ndev
->state
, edtrr
);
562 /* dirty buffer free */
566 if (edtrr
^ EDTRR_TRNS
) {
568 ctrl_outl(EDTRR_TRNS
, ndev
->base_addr
+ EDTRR
);
571 netif_wake_queue(ndev
);
575 static irqreturn_t
sh_eth_interrupt(int irq
, void *netdev
)
577 struct net_device
*ndev
= netdev
;
578 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
579 u32 ioaddr
, boguscnt
= RX_RING_SIZE
;
582 ioaddr
= ndev
->base_addr
;
583 spin_lock(&mdp
->lock
);
585 intr_status
= ctrl_inl(ioaddr
+ EESR
);
586 /* Clear interrupt */
587 ctrl_outl(intr_status
, ioaddr
+ EESR
);
589 if (intr_status
& (EESR_FRC
| EESR_RINT8
|
590 EESR_RINT5
| EESR_RINT4
| EESR_RINT3
| EESR_RINT2
|
593 if (intr_status
& (EESR_FTC
|
594 EESR_TINT4
| EESR_TINT3
| EESR_TINT2
| EESR_TINT1
)) {
597 netif_wake_queue(ndev
);
600 if (intr_status
& EESR_ERR_CHECK
)
601 sh_eth_error(ndev
, intr_status
);
603 if (--boguscnt
< 0) {
605 "%s: Too much work at interrupt, status=0x%4.4x.\n",
606 ndev
->name
, intr_status
);
609 spin_unlock(&mdp
->lock
);
614 static void sh_eth_timer(unsigned long data
)
616 struct net_device
*ndev
= (struct net_device
*)data
;
617 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
619 mod_timer(&mdp
->timer
, jiffies
+ (10 * HZ
));
622 /* PHY state control function */
623 static void sh_eth_adjust_link(struct net_device
*ndev
)
625 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
626 struct phy_device
*phydev
= mdp
->phydev
;
627 u32 ioaddr
= ndev
->base_addr
;
630 if (phydev
->link
!= PHY_DOWN
) {
631 if (phydev
->duplex
!= mdp
->duplex
) {
633 mdp
->duplex
= phydev
->duplex
;
636 if (phydev
->speed
!= mdp
->speed
) {
638 mdp
->speed
= phydev
->speed
;
640 if (mdp
->link
== PHY_DOWN
) {
641 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_TXF
)
642 | ECMR_DM
, ioaddr
+ ECMR
);
644 mdp
->link
= phydev
->link
;
646 } else if (mdp
->link
) {
648 mdp
->link
= PHY_DOWN
;
654 phy_print_status(phydev
);
657 /* PHY init function */
658 static int sh_eth_phy_init(struct net_device
*ndev
)
660 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
661 char phy_id
[BUS_ID_SIZE
];
662 struct phy_device
*phydev
= NULL
;
664 snprintf(phy_id
, BUS_ID_SIZE
, PHY_ID_FMT
,
665 mdp
->mii_bus
->id
, mdp
->phy_id
);
667 mdp
->link
= PHY_DOWN
;
671 /* Try connect to PHY */
672 phydev
= phy_connect(ndev
, phy_id
, &sh_eth_adjust_link
,
673 0, PHY_INTERFACE_MODE_MII
);
674 if (IS_ERR(phydev
)) {
675 dev_err(&ndev
->dev
, "phy_connect failed\n");
676 return PTR_ERR(phydev
);
678 dev_info(&ndev
->dev
, "attached phy %i to driver %s\n",
679 phydev
->addr
, phydev
->drv
->name
);
681 mdp
->phydev
= phydev
;
686 /* PHY control start function */
687 static int sh_eth_phy_start(struct net_device
*ndev
)
689 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
692 ret
= sh_eth_phy_init(ndev
);
696 /* reset phy - this also wakes it from PDOWN */
697 phy_write(mdp
->phydev
, MII_BMCR
, BMCR_RESET
);
698 phy_start(mdp
->phydev
);
703 /* network device open function */
704 static int sh_eth_open(struct net_device
*ndev
)
707 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
709 ret
= request_irq(ndev
->irq
, &sh_eth_interrupt
, 0, ndev
->name
, ndev
);
711 printk(KERN_ERR
"Can not assign IRQ number to %s\n", CARDNAME
);
716 ret
= sh_eth_ring_init(ndev
);
721 ret
= sh_eth_dev_init(ndev
);
725 /* PHY control start*/
726 ret
= sh_eth_phy_start(ndev
);
730 /* Set the timer to check for link beat. */
731 init_timer(&mdp
->timer
);
732 mdp
->timer
.expires
= (jiffies
+ (24 * HZ
)) / 10;/* 2.4 sec. */
733 setup_timer(&mdp
->timer
, sh_eth_timer
, ndev
);
738 free_irq(ndev
->irq
, ndev
);
742 /* Timeout function */
743 static void sh_eth_tx_timeout(struct net_device
*ndev
)
745 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
746 u32 ioaddr
= ndev
->base_addr
;
747 struct sh_eth_rxdesc
*rxdesc
;
750 netif_stop_queue(ndev
);
752 /* worning message out. */
753 printk(KERN_WARNING
"%s: transmit timed out, status %8.8x,"
754 " resetting...\n", ndev
->name
, (int)ctrl_inl(ioaddr
+ EESR
));
756 /* tx_errors count up */
757 mdp
->stats
.tx_errors
++;
760 del_timer_sync(&mdp
->timer
);
762 /* Free all the skbuffs in the Rx queue. */
763 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
764 rxdesc
= &mdp
->rx_ring
[i
];
766 rxdesc
->addr
= 0xBADF00D0;
767 if (mdp
->rx_skbuff
[i
])
768 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
769 mdp
->rx_skbuff
[i
] = NULL
;
771 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
772 if (mdp
->tx_skbuff
[i
])
773 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
774 mdp
->tx_skbuff
[i
] = NULL
;
778 sh_eth_dev_init(ndev
);
781 mdp
->timer
.expires
= (jiffies
+ (24 * HZ
)) / 10;/* 2.4 sec. */
782 add_timer(&mdp
->timer
);
785 /* Packet transmit function */
786 static int sh_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
788 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
789 struct sh_eth_txdesc
*txdesc
;
793 spin_lock_irqsave(&mdp
->lock
, flags
);
794 if ((mdp
->cur_tx
- mdp
->dirty_tx
) >= (TX_RING_SIZE
- 4)) {
795 if (!sh_eth_txfree(ndev
)) {
796 netif_stop_queue(ndev
);
797 spin_unlock_irqrestore(&mdp
->lock
, flags
);
801 spin_unlock_irqrestore(&mdp
->lock
, flags
);
803 entry
= mdp
->cur_tx
% TX_RING_SIZE
;
804 mdp
->tx_skbuff
[entry
] = skb
;
805 txdesc
= &mdp
->tx_ring
[entry
];
806 txdesc
->addr
= (u32
)(skb
->data
);
808 swaps((char *)(txdesc
->addr
& ~0x3), skb
->len
+ 2);
810 __flush_purge_region(skb
->data
, skb
->len
);
811 if (skb
->len
< ETHERSMALL
)
812 txdesc
->buffer_length
= ETHERSMALL
;
814 txdesc
->buffer_length
= skb
->len
;
816 if (entry
>= TX_RING_SIZE
- 1)
817 txdesc
->status
|= cpu_to_le32(TD_TACT
| TD_TDLE
);
819 txdesc
->status
|= cpu_to_le32(TD_TACT
);
823 ctrl_outl(EDTRR_TRNS
, ndev
->base_addr
+ EDTRR
);
824 ndev
->trans_start
= jiffies
;
829 /* device close function */
830 static int sh_eth_close(struct net_device
*ndev
)
832 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
833 u32 ioaddr
= ndev
->base_addr
;
836 netif_stop_queue(ndev
);
838 /* Disable interrupts by clearing the interrupt mask. */
839 ctrl_outl(0x0000, ioaddr
+ EESIPR
);
841 /* Stop the chip's Tx and Rx processes. */
842 ctrl_outl(0, ioaddr
+ EDTRR
);
843 ctrl_outl(0, ioaddr
+ EDRRR
);
847 phy_stop(mdp
->phydev
);
848 phy_disconnect(mdp
->phydev
);
851 free_irq(ndev
->irq
, ndev
);
853 del_timer_sync(&mdp
->timer
);
855 /* Free all the skbuffs in the Rx queue. */
856 sh_eth_ring_free(ndev
);
858 /* free DMA buffer */
859 ringsize
= sizeof(struct sh_eth_rxdesc
) * RX_RING_SIZE
;
860 dma_free_coherent(NULL
, ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
862 /* free DMA buffer */
863 ringsize
= sizeof(struct sh_eth_txdesc
) * TX_RING_SIZE
;
864 dma_free_coherent(NULL
, ringsize
, mdp
->tx_ring
, mdp
->tx_desc_dma
);
869 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
)
871 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
872 u32 ioaddr
= ndev
->base_addr
;
874 mdp
->stats
.tx_dropped
+= ctrl_inl(ioaddr
+ TROCR
);
875 ctrl_outl(0, ioaddr
+ TROCR
); /* (write clear) */
876 mdp
->stats
.collisions
+= ctrl_inl(ioaddr
+ CDCR
);
877 ctrl_outl(0, ioaddr
+ CDCR
); /* (write clear) */
878 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ LCCR
);
879 ctrl_outl(0, ioaddr
+ LCCR
); /* (write clear) */
880 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ CNDCR
);
881 ctrl_outl(0, ioaddr
+ CNDCR
); /* (write clear) */
886 /* ioctl to device funciotn*/
887 static int sh_eth_do_ioctl(struct net_device
*ndev
, struct ifreq
*rq
,
890 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
891 struct phy_device
*phydev
= mdp
->phydev
;
893 if (!netif_running(ndev
))
899 return phy_mii_ioctl(phydev
, if_mii(rq
), cmd
);
903 /* Multicast reception directions set */
904 static void sh_eth_set_multicast_list(struct net_device
*ndev
)
906 u32 ioaddr
= ndev
->base_addr
;
908 if (ndev
->flags
& IFF_PROMISC
) {
909 /* Set promiscuous. */
910 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_MCT
) | ECMR_PRM
,
913 /* Normal, unicast/broadcast-only mode. */
914 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_PRM
) | ECMR_MCT
,
919 /* SuperH's TSU register init function */
920 static void sh_eth_tsu_init(u32 ioaddr
)
922 ctrl_outl(0, ioaddr
+ TSU_FWEN0
); /* Disable forward(0->1) */
923 ctrl_outl(0, ioaddr
+ TSU_FWEN1
); /* Disable forward(1->0) */
924 ctrl_outl(0, ioaddr
+ TSU_FCM
); /* forward fifo 3k-3k */
925 ctrl_outl(0xc, ioaddr
+ TSU_BSYSL0
);
926 ctrl_outl(0xc, ioaddr
+ TSU_BSYSL1
);
927 ctrl_outl(0, ioaddr
+ TSU_PRISL0
);
928 ctrl_outl(0, ioaddr
+ TSU_PRISL1
);
929 ctrl_outl(0, ioaddr
+ TSU_FWSL0
);
930 ctrl_outl(0, ioaddr
+ TSU_FWSL1
);
931 ctrl_outl(TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
, ioaddr
+ TSU_FWSLC
);
932 ctrl_outl(0, ioaddr
+ TSU_QTAGM0
); /* Disable QTAG(0->1) */
933 ctrl_outl(0, ioaddr
+ TSU_QTAGM1
); /* Disable QTAG(1->0) */
934 ctrl_outl(0, ioaddr
+ TSU_FWSR
); /* all interrupt status clear */
935 ctrl_outl(0, ioaddr
+ TSU_FWINMK
); /* Disable all interrupt */
936 ctrl_outl(0, ioaddr
+ TSU_TEN
); /* Disable all CAM entry */
937 ctrl_outl(0, ioaddr
+ TSU_POST1
); /* Disable CAM entry [ 0- 7] */
938 ctrl_outl(0, ioaddr
+ TSU_POST2
); /* Disable CAM entry [ 8-15] */
939 ctrl_outl(0, ioaddr
+ TSU_POST3
); /* Disable CAM entry [16-23] */
940 ctrl_outl(0, ioaddr
+ TSU_POST4
); /* Disable CAM entry [24-31] */
943 /* MDIO bus release function */
944 static int sh_mdio_release(struct net_device
*ndev
)
946 struct mii_bus
*bus
= dev_get_drvdata(&ndev
->dev
);
948 /* unregister mdio bus */
949 mdiobus_unregister(bus
);
951 /* remove mdio bus info from net_device */
952 dev_set_drvdata(&ndev
->dev
, NULL
);
954 /* free bitbang info */
955 free_mdio_bitbang(bus
);
960 /* MDIO bus init function */
961 static int sh_mdio_init(struct net_device
*ndev
, int id
)
964 struct bb_info
*bitbang
;
965 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
967 /* create bit control struct for PHY */
968 bitbang
= kzalloc(sizeof(struct bb_info
), GFP_KERNEL
);
975 bitbang
->addr
= ndev
->base_addr
+ PIR
;
976 bitbang
->mdi_msk
= 0x08;
977 bitbang
->mdo_msk
= 0x04;
978 bitbang
->mmd_msk
= 0x02;/* MMD */
979 bitbang
->mdc_msk
= 0x01;
980 bitbang
->ctrl
.ops
= &bb_ops
;
982 /* MII contorller setting */
983 mdp
->mii_bus
= alloc_mdio_bitbang(&bitbang
->ctrl
);
986 goto out_free_bitbang
;
989 /* Hook up MII support for ethtool */
990 mdp
->mii_bus
->name
= "sh_mii";
991 mdp
->mii_bus
->dev
= &ndev
->dev
;
992 mdp
->mii_bus
->id
[0] = id
;
995 mdp
->mii_bus
->irq
= kmalloc(sizeof(int)*PHY_MAX_ADDR
, GFP_KERNEL
);
996 if (!mdp
->mii_bus
->irq
) {
1001 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1002 mdp
->mii_bus
->irq
[i
] = PHY_POLL
;
1004 /* regist mdio bus */
1005 ret
= mdiobus_register(mdp
->mii_bus
);
1009 dev_set_drvdata(&ndev
->dev
, mdp
->mii_bus
);
1014 kfree(mdp
->mii_bus
->irq
);
1017 kfree(mdp
->mii_bus
);
1026 static int sh_eth_drv_probe(struct platform_device
*pdev
)
1028 int ret
, i
, devno
= 0;
1029 struct resource
*res
;
1030 struct net_device
*ndev
= NULL
;
1031 struct sh_eth_private
*mdp
;
1034 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1035 if (unlikely(res
== NULL
)) {
1036 dev_err(&pdev
->dev
, "invalid resource\n");
1041 ndev
= alloc_etherdev(sizeof(struct sh_eth_private
));
1043 printk(KERN_ERR
"%s: could not allocate device.\n", CARDNAME
);
1048 /* The sh Ether-specific entries in the device structure. */
1049 ndev
->base_addr
= res
->start
;
1055 ndev
->irq
= platform_get_irq(pdev
, 0);
1056 if (ndev
->irq
< 0) {
1061 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1063 /* Fill in the fields of the device structure with ethernet values. */
1066 mdp
= netdev_priv(ndev
);
1067 spin_lock_init(&mdp
->lock
);
1070 mdp
->phy_id
= (int)pdev
->dev
.platform_data
;
1073 ndev
->open
= sh_eth_open
;
1074 ndev
->hard_start_xmit
= sh_eth_start_xmit
;
1075 ndev
->stop
= sh_eth_close
;
1076 ndev
->get_stats
= sh_eth_get_stats
;
1077 ndev
->set_multicast_list
= sh_eth_set_multicast_list
;
1078 ndev
->do_ioctl
= sh_eth_do_ioctl
;
1079 ndev
->tx_timeout
= sh_eth_tx_timeout
;
1080 ndev
->watchdog_timeo
= TX_TIMEOUT
;
1082 mdp
->post_rx
= POST_RX
>> (devno
<< 1);
1083 mdp
->post_fw
= POST_FW
>> (devno
<< 1);
1085 /* read and set MAC address */
1086 read_mac_address(ndev
);
1088 /* First device only init */
1091 ctrl_outl(ARSTR_ARSTR
, ndev
->base_addr
+ ARSTR
);
1094 /* TSU init (Init only)*/
1095 sh_eth_tsu_init(SH_TSU_ADDR
);
1098 /* network device register */
1099 ret
= register_netdev(ndev
);
1104 ret
= sh_mdio_init(ndev
, pdev
->id
);
1106 goto out_unregister
;
1108 /* pritnt device infomation */
1109 printk(KERN_INFO
"%s: %s at 0x%x, ",
1110 ndev
->name
, CARDNAME
, (u32
) ndev
->base_addr
);
1112 for (i
= 0; i
< 5; i
++)
1113 printk(KERN_INFO
"%2.2x:", ndev
->dev_addr
[i
]);
1114 printk(KERN_INFO
"%2.2x, IRQ %d.\n", ndev
->dev_addr
[i
], ndev
->irq
);
1116 platform_set_drvdata(pdev
, ndev
);
1121 unregister_netdev(ndev
);
1132 static int sh_eth_drv_remove(struct platform_device
*pdev
)
1134 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1136 sh_mdio_release(ndev
);
1137 unregister_netdev(ndev
);
1138 flush_scheduled_work();
1141 platform_set_drvdata(pdev
, NULL
);
1146 static struct platform_driver sh_eth_driver
= {
1147 .probe
= sh_eth_drv_probe
,
1148 .remove
= sh_eth_drv_remove
,
1154 static int __init
sh_eth_init(void)
1156 return platform_driver_register(&sh_eth_driver
);
1159 static void __exit
sh_eth_cleanup(void)
1161 platform_driver_unregister(&sh_eth_driver
);
1164 module_init(sh_eth_init
);
1165 module_exit(sh_eth_cleanup
);
1167 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1168 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1169 MODULE_LICENSE("GPL v2");