2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/string.h>
23 #include <linux/ptrace.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/spinlock.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/bitops.h>
39 #include <linux/platform_device.h>
41 #include <linux/vmalloc.h>
42 #include <asm/pgtable.h>
44 #include <asm/pgtable.h>
46 #include <asm/uaccess.h>
50 /*************************************************/
52 static char version
[] __devinitdata
=
53 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")" "\n";
55 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
56 MODULE_DESCRIPTION("Freescale Ethernet Driver");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION
);
60 int fs_enet_debug
= -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
61 module_param(fs_enet_debug
, int, 0);
62 MODULE_PARM_DESC(fs_enet_debug
,
63 "Freescale bitmapped debugging message enable value");
66 static void fs_set_multicast_list(struct net_device
*dev
)
68 struct fs_enet_private
*fep
= netdev_priv(dev
);
70 (*fep
->ops
->set_multicast_list
)(dev
);
73 /* NAPI receive function */
74 static int fs_enet_rx_napi(struct net_device
*dev
, int *budget
)
76 struct fs_enet_private
*fep
= netdev_priv(dev
);
77 const struct fs_platform_info
*fpi
= fep
->fpi
;
79 struct sk_buff
*skb
, *skbn
, *skbt
;
83 int rx_work_limit
= 0; /* pacify gcc */
85 rx_work_limit
= min(dev
->quota
, *budget
);
87 if (!netif_running(dev
))
91 * First, grab all of the stats for the incoming packet.
92 * These get messed up if we get called due to a busy condition.
96 /* clear RX status bits for napi*/
97 (*fep
->ops
->napi_clear_rx_event
)(dev
);
99 while (((sc
= CBDR_SC(bdp
)) & BD_ENET_RX_EMPTY
) == 0) {
101 curidx
= bdp
- fep
->rx_bd_base
;
104 * Since we have allocated space to hold a complete frame,
105 * the last indicator should be set.
107 if ((sc
& BD_ENET_RX_LAST
) == 0)
108 printk(KERN_WARNING DRV_MODULE_NAME
109 ": %s rcv is not +last\n",
115 if (sc
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
| BD_ENET_RX_CL
|
116 BD_ENET_RX_NO
| BD_ENET_RX_CR
| BD_ENET_RX_OV
)) {
117 fep
->stats
.rx_errors
++;
118 /* Frame too long or too short. */
119 if (sc
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
))
120 fep
->stats
.rx_length_errors
++;
121 /* Frame alignment */
122 if (sc
& (BD_ENET_RX_NO
| BD_ENET_RX_CL
))
123 fep
->stats
.rx_frame_errors
++;
125 if (sc
& BD_ENET_RX_CR
)
126 fep
->stats
.rx_crc_errors
++;
128 if (sc
& BD_ENET_RX_OV
)
129 fep
->stats
.rx_crc_errors
++;
131 skb
= fep
->rx_skbuff
[curidx
];
133 dma_unmap_single(fep
->dev
, CBDR_BUFADDR(bdp
),
134 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
141 /* napi, got packet but no quota */
142 if (--rx_work_limit
< 0)
145 skb
= fep
->rx_skbuff
[curidx
];
147 dma_unmap_single(fep
->dev
, CBDR_BUFADDR(bdp
),
148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
152 * Process the incoming frame.
154 fep
->stats
.rx_packets
++;
155 pkt_len
= CBDR_DATLEN(bdp
) - 4; /* remove CRC */
156 fep
->stats
.rx_bytes
+= pkt_len
+ 4;
158 if (pkt_len
<= fpi
->rx_copybreak
) {
159 /* +2 to make IP header L1 cache aligned */
160 skbn
= dev_alloc_skb(pkt_len
+ 2);
162 skb_reserve(skbn
, 2); /* align IP header */
163 memcpy(skbn
->data
, skb
->data
, pkt_len
);
170 skbn
= dev_alloc_skb(ENET_RX_FRSIZE
);
174 skb_put(skb
, pkt_len
); /* Make room */
175 skb
->protocol
= eth_type_trans(skb
, dev
);
177 netif_receive_skb(skb
);
179 printk(KERN_WARNING DRV_MODULE_NAME
180 ": %s Memory squeeze, dropping packet.\n",
182 fep
->stats
.rx_dropped
++;
187 fep
->rx_skbuff
[curidx
] = skbn
;
188 CBDW_BUFADDR(bdp
, dma_map_single(fep
->dev
, skbn
->data
,
189 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
192 CBDW_SC(bdp
, (sc
& ~BD_ENET_RX_STATS
) | BD_ENET_RX_EMPTY
);
195 * Update BD pointer to next entry.
197 if ((sc
& BD_ENET_RX_WRAP
) == 0)
200 bdp
= fep
->rx_bd_base
;
202 (*fep
->ops
->rx_bd_done
)(dev
);
207 dev
->quota
-= received
;
210 if (rx_work_limit
< 0)
211 return 1; /* not done */
214 netif_rx_complete(dev
);
216 (*fep
->ops
->napi_enable_rx
)(dev
);
221 /* non NAPI receive function */
222 static int fs_enet_rx_non_napi(struct net_device
*dev
)
224 struct fs_enet_private
*fep
= netdev_priv(dev
);
225 const struct fs_platform_info
*fpi
= fep
->fpi
;
227 struct sk_buff
*skb
, *skbn
, *skbt
;
232 * First, grab all of the stats for the incoming packet.
233 * These get messed up if we get called due to a busy condition.
237 while (((sc
= CBDR_SC(bdp
)) & BD_ENET_RX_EMPTY
) == 0) {
239 curidx
= bdp
- fep
->rx_bd_base
;
242 * Since we have allocated space to hold a complete frame,
243 * the last indicator should be set.
245 if ((sc
& BD_ENET_RX_LAST
) == 0)
246 printk(KERN_WARNING DRV_MODULE_NAME
247 ": %s rcv is not +last\n",
253 if (sc
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
| BD_ENET_RX_CL
|
254 BD_ENET_RX_NO
| BD_ENET_RX_CR
| BD_ENET_RX_OV
)) {
255 fep
->stats
.rx_errors
++;
256 /* Frame too long or too short. */
257 if (sc
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
))
258 fep
->stats
.rx_length_errors
++;
259 /* Frame alignment */
260 if (sc
& (BD_ENET_RX_NO
| BD_ENET_RX_CL
))
261 fep
->stats
.rx_frame_errors
++;
263 if (sc
& BD_ENET_RX_CR
)
264 fep
->stats
.rx_crc_errors
++;
266 if (sc
& BD_ENET_RX_OV
)
267 fep
->stats
.rx_crc_errors
++;
269 skb
= fep
->rx_skbuff
[curidx
];
271 dma_unmap_single(fep
->dev
, CBDR_BUFADDR(bdp
),
272 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
279 skb
= fep
->rx_skbuff
[curidx
];
281 dma_unmap_single(fep
->dev
, CBDR_BUFADDR(bdp
),
282 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
286 * Process the incoming frame.
288 fep
->stats
.rx_packets
++;
289 pkt_len
= CBDR_DATLEN(bdp
) - 4; /* remove CRC */
290 fep
->stats
.rx_bytes
+= pkt_len
+ 4;
292 if (pkt_len
<= fpi
->rx_copybreak
) {
293 /* +2 to make IP header L1 cache aligned */
294 skbn
= dev_alloc_skb(pkt_len
+ 2);
296 skb_reserve(skbn
, 2); /* align IP header */
297 memcpy(skbn
->data
, skb
->data
, pkt_len
);
304 skbn
= dev_alloc_skb(ENET_RX_FRSIZE
);
308 skb_put(skb
, pkt_len
); /* Make room */
309 skb
->protocol
= eth_type_trans(skb
, dev
);
313 printk(KERN_WARNING DRV_MODULE_NAME
314 ": %s Memory squeeze, dropping packet.\n",
316 fep
->stats
.rx_dropped
++;
321 fep
->rx_skbuff
[curidx
] = skbn
;
322 CBDW_BUFADDR(bdp
, dma_map_single(fep
->dev
, skbn
->data
,
323 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
326 CBDW_SC(bdp
, (sc
& ~BD_ENET_RX_STATS
) | BD_ENET_RX_EMPTY
);
329 * Update BD pointer to next entry.
331 if ((sc
& BD_ENET_RX_WRAP
) == 0)
334 bdp
= fep
->rx_bd_base
;
336 (*fep
->ops
->rx_bd_done
)(dev
);
344 static void fs_enet_tx(struct net_device
*dev
)
346 struct fs_enet_private
*fep
= netdev_priv(dev
);
349 int dirtyidx
, do_wake
, do_restart
;
352 spin_lock(&fep
->lock
);
355 do_wake
= do_restart
= 0;
356 while (((sc
= CBDR_SC(bdp
)) & BD_ENET_TX_READY
) == 0) {
358 dirtyidx
= bdp
- fep
->tx_bd_base
;
360 if (fep
->tx_free
== fep
->tx_ring
)
363 skb
= fep
->tx_skbuff
[dirtyidx
];
368 if (sc
& (BD_ENET_TX_HB
| BD_ENET_TX_LC
|
369 BD_ENET_TX_RL
| BD_ENET_TX_UN
| BD_ENET_TX_CSL
)) {
371 if (sc
& BD_ENET_TX_HB
) /* No heartbeat */
372 fep
->stats
.tx_heartbeat_errors
++;
373 if (sc
& BD_ENET_TX_LC
) /* Late collision */
374 fep
->stats
.tx_window_errors
++;
375 if (sc
& BD_ENET_TX_RL
) /* Retrans limit */
376 fep
->stats
.tx_aborted_errors
++;
377 if (sc
& BD_ENET_TX_UN
) /* Underrun */
378 fep
->stats
.tx_fifo_errors
++;
379 if (sc
& BD_ENET_TX_CSL
) /* Carrier lost */
380 fep
->stats
.tx_carrier_errors
++;
382 if (sc
& (BD_ENET_TX_LC
| BD_ENET_TX_RL
| BD_ENET_TX_UN
)) {
383 fep
->stats
.tx_errors
++;
387 fep
->stats
.tx_packets
++;
389 if (sc
& BD_ENET_TX_READY
)
390 printk(KERN_WARNING DRV_MODULE_NAME
391 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
395 * Deferred means some collisions occurred during transmit,
396 * but we eventually sent the packet OK.
398 if (sc
& BD_ENET_TX_DEF
)
399 fep
->stats
.collisions
++;
402 dma_unmap_single(fep
->dev
, CBDR_BUFADDR(bdp
),
403 skb
->len
, DMA_TO_DEVICE
);
406 * Free the sk buffer associated with this last transmit.
408 dev_kfree_skb_irq(skb
);
409 fep
->tx_skbuff
[dirtyidx
] = NULL
;
412 * Update pointer to next buffer descriptor to be transmitted.
414 if ((sc
& BD_ENET_TX_WRAP
) == 0)
417 bdp
= fep
->tx_bd_base
;
420 * Since we have freed up a buffer, the ring is no longer
430 (*fep
->ops
->tx_restart
)(dev
);
432 spin_unlock(&fep
->lock
);
435 netif_wake_queue(dev
);
439 * The interrupt handler.
440 * This is called from the MPC core interrupt.
443 fs_enet_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
445 struct net_device
*dev
= dev_id
;
446 struct fs_enet_private
*fep
;
447 const struct fs_platform_info
*fpi
;
453 fep
= netdev_priv(dev
);
457 while ((int_events
= (*fep
->ops
->get_int_events
)(dev
)) != 0) {
461 int_clr_events
= int_events
;
463 int_clr_events
&= ~fep
->ev_napi_rx
;
465 (*fep
->ops
->clear_int_events
)(dev
, int_clr_events
);
467 if (int_events
& fep
->ev_err
)
468 (*fep
->ops
->ev_error
)(dev
, int_events
);
470 if (int_events
& fep
->ev_rx
) {
472 fs_enet_rx_non_napi(dev
);
474 napi_ok
= netif_rx_schedule_prep(dev
);
476 (*fep
->ops
->napi_disable_rx
)(dev
);
477 (*fep
->ops
->clear_int_events
)(dev
, fep
->ev_napi_rx
);
479 /* NOTE: it is possible for FCCs in NAPI mode */
480 /* to submit a spurious interrupt while in poll */
482 __netif_rx_schedule(dev
);
486 if (int_events
& fep
->ev_tx
)
491 return IRQ_RETVAL(handled
);
494 void fs_init_bds(struct net_device
*dev
)
496 struct fs_enet_private
*fep
= netdev_priv(dev
);
503 fep
->dirty_tx
= fep
->cur_tx
= fep
->tx_bd_base
;
504 fep
->tx_free
= fep
->tx_ring
;
505 fep
->cur_rx
= fep
->rx_bd_base
;
508 * Initialize the receive buffer descriptors.
510 for (i
= 0, bdp
= fep
->rx_bd_base
; i
< fep
->rx_ring
; i
++, bdp
++) {
511 skb
= dev_alloc_skb(ENET_RX_FRSIZE
);
513 printk(KERN_WARNING DRV_MODULE_NAME
514 ": %s Memory squeeze, unable to allocate skb\n",
518 fep
->rx_skbuff
[i
] = skb
;
521 dma_map_single(fep
->dev
, skb
->data
,
522 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
524 CBDW_DATLEN(bdp
, 0); /* zero */
525 CBDW_SC(bdp
, BD_ENET_RX_EMPTY
|
526 ((i
< fep
->rx_ring
- 1) ? 0 : BD_SC_WRAP
));
529 * if we failed, fillup remainder
531 for (; i
< fep
->rx_ring
; i
++, bdp
++) {
532 fep
->rx_skbuff
[i
] = NULL
;
533 CBDW_SC(bdp
, (i
< fep
->rx_ring
- 1) ? 0 : BD_SC_WRAP
);
537 * ...and the same for transmit.
539 for (i
= 0, bdp
= fep
->tx_bd_base
; i
< fep
->tx_ring
; i
++, bdp
++) {
540 fep
->tx_skbuff
[i
] = NULL
;
541 CBDW_BUFADDR(bdp
, 0);
543 CBDW_SC(bdp
, (i
< fep
->tx_ring
- 1) ? 0 : BD_SC_WRAP
);
547 void fs_cleanup_bds(struct net_device
*dev
)
549 struct fs_enet_private
*fep
= netdev_priv(dev
);
555 * Reset SKB transmit buffers.
557 for (i
= 0, bdp
= fep
->tx_bd_base
; i
< fep
->tx_ring
; i
++, bdp
++) {
558 if ((skb
= fep
->tx_skbuff
[i
]) == NULL
)
562 dma_unmap_single(fep
->dev
, CBDR_BUFADDR(bdp
),
563 skb
->len
, DMA_TO_DEVICE
);
565 fep
->tx_skbuff
[i
] = NULL
;
570 * Reset SKB receive buffers
572 for (i
= 0, bdp
= fep
->rx_bd_base
; i
< fep
->rx_ring
; i
++, bdp
++) {
573 if ((skb
= fep
->rx_skbuff
[i
]) == NULL
)
577 dma_unmap_single(fep
->dev
, CBDR_BUFADDR(bdp
),
578 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE
),
581 fep
->rx_skbuff
[i
] = NULL
;
587 /**********************************************************************************/
589 static int fs_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
591 struct fs_enet_private
*fep
= netdev_priv(dev
);
597 spin_lock_irqsave(&fep
->tx_lock
, flags
);
600 * Fill in a Tx ring entry
604 if (!fep
->tx_free
|| (CBDR_SC(bdp
) & BD_ENET_TX_READY
)) {
605 netif_stop_queue(dev
);
606 spin_unlock_irqrestore(&fep
->tx_lock
, flags
);
609 * Ooops. All transmit buffers are full. Bail out.
610 * This should not happen, since the tx queue should be stopped.
612 printk(KERN_WARNING DRV_MODULE_NAME
613 ": %s tx queue full!.\n", dev
->name
);
614 return NETDEV_TX_BUSY
;
617 curidx
= bdp
- fep
->tx_bd_base
;
619 * Clear all of the status flags.
621 CBDC_SC(bdp
, BD_ENET_TX_STATS
);
626 fep
->tx_skbuff
[curidx
] = skb
;
628 fep
->stats
.tx_bytes
+= skb
->len
;
631 * Push the data cache so the CPM does not get stale memory data.
633 CBDW_BUFADDR(bdp
, dma_map_single(fep
->dev
,
634 skb
->data
, skb
->len
, DMA_TO_DEVICE
));
635 CBDW_DATLEN(bdp
, skb
->len
);
637 dev
->trans_start
= jiffies
;
640 * If this was the last BD in the ring, start at the beginning again.
642 if ((CBDR_SC(bdp
) & BD_ENET_TX_WRAP
) == 0)
645 fep
->cur_tx
= fep
->tx_bd_base
;
648 netif_stop_queue(dev
);
650 /* Trigger transmission start */
651 sc
= BD_ENET_TX_READY
| BD_ENET_TX_INTR
|
652 BD_ENET_TX_LAST
| BD_ENET_TX_TC
;
654 /* note that while FEC does not have this bit
655 * it marks it as available for software use
656 * yay for hw reuse :) */
658 sc
|= BD_ENET_TX_PAD
;
661 (*fep
->ops
->tx_kickstart
)(dev
);
663 spin_unlock_irqrestore(&fep
->tx_lock
, flags
);
668 static int fs_request_irq(struct net_device
*dev
, int irq
, const char *name
,
669 irqreturn_t (*irqf
)(int irq
, void *dev_id
, struct pt_regs
*regs
))
671 struct fs_enet_private
*fep
= netdev_priv(dev
);
673 (*fep
->ops
->pre_request_irq
)(dev
, irq
);
674 return request_irq(irq
, irqf
, IRQF_SHARED
, name
, dev
);
677 static void fs_free_irq(struct net_device
*dev
, int irq
)
679 struct fs_enet_private
*fep
= netdev_priv(dev
);
682 (*fep
->ops
->post_free_irq
)(dev
, irq
);
685 /**********************************************************************************/
687 /* This interrupt occurs when the PHY detects a link change. */
689 fs_mii_link_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
691 struct net_device
*dev
= dev_id
;
692 struct fs_enet_private
*fep
;
693 const struct fs_platform_info
*fpi
;
695 fep
= netdev_priv(dev
);
699 * Acknowledge the interrupt if possible. If we have not
700 * found the PHY yet we can't process or acknowledge the
701 * interrupt now. Instead we ignore this interrupt for now,
702 * which we can do since it is edge triggered. It will be
703 * acknowledged later by fs_enet_open().
709 fs_mii_link_status_change_check(dev
, 0);
714 static void fs_timeout(struct net_device
*dev
)
716 struct fs_enet_private
*fep
= netdev_priv(dev
);
720 fep
->stats
.tx_errors
++;
722 spin_lock_irqsave(&fep
->lock
, flags
);
724 if (dev
->flags
& IFF_UP
) {
725 (*fep
->ops
->stop
)(dev
);
726 (*fep
->ops
->restart
)(dev
);
729 wake
= fep
->tx_free
&& !(CBDR_SC(fep
->cur_tx
) & BD_ENET_TX_READY
);
730 spin_unlock_irqrestore(&fep
->lock
, flags
);
733 netif_wake_queue(dev
);
736 static int fs_enet_open(struct net_device
*dev
)
738 struct fs_enet_private
*fep
= netdev_priv(dev
);
739 const struct fs_platform_info
*fpi
= fep
->fpi
;
742 /* Install our interrupt handler. */
743 r
= fs_request_irq(dev
, fep
->interrupt
, "fs_enet-mac", fs_enet_interrupt
);
745 printk(KERN_ERR DRV_MODULE_NAME
746 ": %s Could not allocate FEC IRQ!", dev
->name
);
750 /* Install our phy interrupt handler */
751 if (fpi
->phy_irq
!= -1) {
753 r
= fs_request_irq(dev
, fpi
->phy_irq
, "fs_enet-phy", fs_mii_link_interrupt
);
755 printk(KERN_ERR DRV_MODULE_NAME
756 ": %s Could not allocate PHY IRQ!", dev
->name
);
757 fs_free_irq(dev
, fep
->interrupt
);
763 netif_carrier_off(dev
);
764 fs_mii_link_status_change_check(dev
, 1);
769 static int fs_enet_close(struct net_device
*dev
)
771 struct fs_enet_private
*fep
= netdev_priv(dev
);
772 const struct fs_platform_info
*fpi
= fep
->fpi
;
775 netif_stop_queue(dev
);
776 netif_carrier_off(dev
);
777 fs_mii_shutdown(dev
);
779 spin_lock_irqsave(&fep
->lock
, flags
);
780 (*fep
->ops
->stop
)(dev
);
781 spin_unlock_irqrestore(&fep
->lock
, flags
);
783 /* release any irqs */
784 if (fpi
->phy_irq
!= -1)
785 fs_free_irq(dev
, fpi
->phy_irq
);
786 fs_free_irq(dev
, fep
->interrupt
);
791 static struct net_device_stats
*fs_enet_get_stats(struct net_device
*dev
)
793 struct fs_enet_private
*fep
= netdev_priv(dev
);
797 /*************************************************************************/
799 static void fs_get_drvinfo(struct net_device
*dev
,
800 struct ethtool_drvinfo
*info
)
802 strcpy(info
->driver
, DRV_MODULE_NAME
);
803 strcpy(info
->version
, DRV_MODULE_VERSION
);
806 static int fs_get_regs_len(struct net_device
*dev
)
808 struct fs_enet_private
*fep
= netdev_priv(dev
);
810 return (*fep
->ops
->get_regs_len
)(dev
);
813 static void fs_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
816 struct fs_enet_private
*fep
= netdev_priv(dev
);
822 spin_lock_irqsave(&fep
->lock
, flags
);
823 r
= (*fep
->ops
->get_regs
)(dev
, p
, &len
);
824 spin_unlock_irqrestore(&fep
->lock
, flags
);
830 static int fs_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
832 struct fs_enet_private
*fep
= netdev_priv(dev
);
836 spin_lock_irqsave(&fep
->lock
, flags
);
837 rc
= mii_ethtool_gset(&fep
->mii_if
, cmd
);
838 spin_unlock_irqrestore(&fep
->lock
, flags
);
843 static int fs_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
845 struct fs_enet_private
*fep
= netdev_priv(dev
);
849 spin_lock_irqsave(&fep
->lock
, flags
);
850 rc
= mii_ethtool_sset(&fep
->mii_if
, cmd
);
851 spin_unlock_irqrestore(&fep
->lock
, flags
);
856 static int fs_nway_reset(struct net_device
*dev
)
858 struct fs_enet_private
*fep
= netdev_priv(dev
);
859 return mii_nway_restart(&fep
->mii_if
);
862 static u32
fs_get_msglevel(struct net_device
*dev
)
864 struct fs_enet_private
*fep
= netdev_priv(dev
);
865 return fep
->msg_enable
;
868 static void fs_set_msglevel(struct net_device
*dev
, u32 value
)
870 struct fs_enet_private
*fep
= netdev_priv(dev
);
871 fep
->msg_enable
= value
;
874 static struct ethtool_ops fs_ethtool_ops
= {
875 .get_drvinfo
= fs_get_drvinfo
,
876 .get_regs_len
= fs_get_regs_len
,
877 .get_settings
= fs_get_settings
,
878 .set_settings
= fs_set_settings
,
879 .nway_reset
= fs_nway_reset
,
880 .get_link
= ethtool_op_get_link
,
881 .get_msglevel
= fs_get_msglevel
,
882 .set_msglevel
= fs_set_msglevel
,
883 .get_tx_csum
= ethtool_op_get_tx_csum
,
884 .set_tx_csum
= ethtool_op_set_tx_csum
, /* local! */
885 .get_sg
= ethtool_op_get_sg
,
886 .set_sg
= ethtool_op_set_sg
,
887 .get_regs
= fs_get_regs
,
890 static int fs_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
892 struct fs_enet_private
*fep
= netdev_priv(dev
);
893 struct mii_ioctl_data
*mii
= (struct mii_ioctl_data
*)&rq
->ifr_data
;
897 if (!netif_running(dev
))
900 spin_lock_irqsave(&fep
->lock
, flags
);
901 rc
= generic_mii_ioctl(&fep
->mii_if
, mii
, cmd
, NULL
);
902 spin_unlock_irqrestore(&fep
->lock
, flags
);
906 extern int fs_mii_connect(struct net_device
*dev
);
907 extern void fs_mii_disconnect(struct net_device
*dev
);
909 static struct net_device
*fs_init_instance(struct device
*dev
,
910 const struct fs_platform_info
*fpi
)
912 struct net_device
*ndev
= NULL
;
913 struct fs_enet_private
*fep
= NULL
;
914 int privsize
, i
, r
, err
= 0, registered
= 0;
917 if ((unsigned int)fpi
->fs_no
>= FS_MAX_INDEX
)
918 return ERR_PTR(-EINVAL
);
920 privsize
= sizeof(*fep
) + (sizeof(struct sk_buff
**) *
921 (fpi
->rx_ring
+ fpi
->tx_ring
));
923 ndev
= alloc_etherdev(privsize
);
928 SET_MODULE_OWNER(ndev
);
930 fep
= netdev_priv(ndev
);
931 memset(fep
, 0, privsize
); /* clear everything */
934 dev_set_drvdata(dev
, ndev
);
936 if (fpi
->init_ioports
)
939 #ifdef CONFIG_FS_ENET_HAS_FEC
940 if (fs_get_fec_index(fpi
->fs_no
) >= 0)
941 fep
->ops
= &fs_fec_ops
;
944 #ifdef CONFIG_FS_ENET_HAS_SCC
945 if (fs_get_scc_index(fpi
->fs_no
) >=0 )
946 fep
->ops
= &fs_scc_ops
;
949 #ifdef CONFIG_FS_ENET_HAS_FCC
950 if (fs_get_fcc_index(fpi
->fs_no
) >= 0)
951 fep
->ops
= &fs_fcc_ops
;
954 if (fep
->ops
== NULL
) {
955 printk(KERN_ERR DRV_MODULE_NAME
956 ": %s No matching ops found (%d).\n",
957 ndev
->name
, fpi
->fs_no
);
962 r
= (*fep
->ops
->setup_data
)(ndev
);
964 printk(KERN_ERR DRV_MODULE_NAME
965 ": %s setup_data failed\n",
971 /* point rx_skbuff, tx_skbuff */
972 fep
->rx_skbuff
= (struct sk_buff
**)&fep
[1];
973 fep
->tx_skbuff
= fep
->rx_skbuff
+ fpi
->rx_ring
;
976 spin_lock_init(&fep
->lock
);
977 spin_lock_init(&fep
->tx_lock
);
980 * Set the Ethernet address.
982 for (i
= 0; i
< 6; i
++)
983 ndev
->dev_addr
[i
] = fpi
->macaddr
[i
];
985 r
= (*fep
->ops
->allocate_bd
)(ndev
);
987 if (fep
->ring_base
== NULL
) {
988 printk(KERN_ERR DRV_MODULE_NAME
989 ": %s buffer descriptor alloc failed (%d).\n", ndev
->name
, r
);
995 * Set receive and transmit descriptor base.
997 fep
->rx_bd_base
= fep
->ring_base
;
998 fep
->tx_bd_base
= fep
->rx_bd_base
+ fpi
->rx_ring
;
1000 /* initialize ring size variables */
1001 fep
->tx_ring
= fpi
->tx_ring
;
1002 fep
->rx_ring
= fpi
->rx_ring
;
1005 * The FEC Ethernet specific entries in the device structure.
1007 ndev
->open
= fs_enet_open
;
1008 ndev
->hard_start_xmit
= fs_enet_start_xmit
;
1009 ndev
->tx_timeout
= fs_timeout
;
1010 ndev
->watchdog_timeo
= 2 * HZ
;
1011 ndev
->stop
= fs_enet_close
;
1012 ndev
->get_stats
= fs_enet_get_stats
;
1013 ndev
->set_multicast_list
= fs_set_multicast_list
;
1014 if (fpi
->use_napi
) {
1015 ndev
->poll
= fs_enet_rx_napi
;
1016 ndev
->weight
= fpi
->napi_weight
;
1018 ndev
->ethtool_ops
= &fs_ethtool_ops
;
1019 ndev
->do_ioctl
= fs_ioctl
;
1021 init_timer(&fep
->phy_timer_list
);
1023 netif_carrier_off(ndev
);
1025 err
= register_netdev(ndev
);
1027 printk(KERN_ERR DRV_MODULE_NAME
1028 ": %s register_netdev failed.\n", ndev
->name
);
1033 err
= fs_mii_connect(ndev
);
1035 printk(KERN_ERR DRV_MODULE_NAME
1036 ": %s fs_mii_connect failed.\n", ndev
->name
);
1046 unregister_netdev(ndev
);
1049 (*fep
->ops
->free_bd
)(ndev
);
1050 (*fep
->ops
->cleanup_data
)(ndev
);
1056 dev_set_drvdata(dev
, NULL
);
1058 return ERR_PTR(err
);
1061 static int fs_cleanup_instance(struct net_device
*ndev
)
1063 struct fs_enet_private
*fep
;
1064 const struct fs_platform_info
*fpi
;
1070 fep
= netdev_priv(ndev
);
1076 fs_mii_disconnect(ndev
);
1078 unregister_netdev(ndev
);
1080 dma_free_coherent(fep
->dev
, (fpi
->tx_ring
+ fpi
->rx_ring
) * sizeof(cbd_t
),
1081 fep
->ring_base
, fep
->ring_mem_addr
);
1084 (*fep
->ops
->cleanup_data
)(ndev
);
1088 dev_set_drvdata(dev
, NULL
);
1097 /**************************************************************************************/
1099 /* handy pointer to the immap */
1100 void *fs_enet_immap
= NULL
;
1102 static int setup_immap(void)
1104 phys_addr_t paddr
= 0;
1105 unsigned long size
= 0;
1109 size
= 0x10000; /* map 64K */
1113 paddr
= CPM_MAP_ADDR
;
1114 size
= 0x40000; /* map 256 K */
1116 fs_enet_immap
= ioremap(paddr
, size
);
1117 if (fs_enet_immap
== NULL
)
1118 return -EBADF
; /* XXX ahem; maybe just BUG_ON? */
1123 static void cleanup_immap(void)
1125 if (fs_enet_immap
!= NULL
) {
1126 iounmap(fs_enet_immap
);
1127 fs_enet_immap
= NULL
;
1131 /**************************************************************************************/
1133 static int __devinit
fs_enet_probe(struct device
*dev
)
1135 struct net_device
*ndev
;
1137 /* no fixup - no device */
1138 if (dev
->platform_data
== NULL
) {
1139 printk(KERN_INFO
"fs_enet: "
1140 "probe called with no platform data; "
1141 "remove unused devices\n");
1145 ndev
= fs_init_instance(dev
, dev
->platform_data
);
1147 return PTR_ERR(ndev
);
1151 static int fs_enet_remove(struct device
*dev
)
1153 return fs_cleanup_instance(dev_get_drvdata(dev
));
1156 static struct device_driver fs_enet_fec_driver
= {
1157 .name
= "fsl-cpm-fec",
1158 .bus
= &platform_bus_type
,
1159 .probe
= fs_enet_probe
,
1160 .remove
= fs_enet_remove
,
1162 /* .suspend = fs_enet_suspend, TODO */
1163 /* .resume = fs_enet_resume, TODO */
1167 static struct device_driver fs_enet_scc_driver
= {
1168 .name
= "fsl-cpm-scc",
1169 .bus
= &platform_bus_type
,
1170 .probe
= fs_enet_probe
,
1171 .remove
= fs_enet_remove
,
1173 /* .suspend = fs_enet_suspend, TODO */
1174 /* .resume = fs_enet_resume, TODO */
1178 static struct device_driver fs_enet_fcc_driver
= {
1179 .name
= "fsl-cpm-fcc",
1180 .bus
= &platform_bus_type
,
1181 .probe
= fs_enet_probe
,
1182 .remove
= fs_enet_remove
,
1184 /* .suspend = fs_enet_suspend, TODO */
1185 /* .resume = fs_enet_resume, TODO */
1189 static int __init
fs_init(void)
1199 r
= driver_register(&fs_enet_fec_driver
);
1203 r
= driver_register(&fs_enet_fcc_driver
);
1207 r
= driver_register(&fs_enet_scc_driver
);
1218 static void __exit
fs_cleanup(void)
1220 driver_unregister(&fs_enet_fec_driver
);
1221 driver_unregister(&fs_enet_fcc_driver
);
1222 driver_unregister(&fs_enet_scc_driver
);
1226 /**************************************************************************************/
1228 module_init(fs_init
);
1229 module_exit(fs_cleanup
);