1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit
= 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak
;
42 static int flowctrl
=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media
[MAX_UNITS
];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #ifndef _COMPAT_WITH_OLD_KERNEL
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
110 /* These identify the driver base version and may not be removed. */
111 static const char version
[] __devinitconst
=
112 KERN_INFO DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
113 " Written by Donald Becker\n";
115 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117 MODULE_LICENSE("GPL");
119 module_param(debug
, int, 0);
120 module_param(rx_copybreak
, int, 0);
121 module_param_array(media
, charp
, NULL
, 0);
122 module_param(flowctrl
, int, 0);
123 MODULE_PARM_DESC(debug
, "Sundance Alta debug level (0-5)");
124 MODULE_PARM_DESC(rx_copybreak
, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125 MODULE_PARM_DESC(flowctrl
, "Sundance Alta flow control [0|1]");
130 I. Board Compatibility
132 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134 II. Board-specific settings
136 III. Driver operation
140 This driver uses two statically allocated fixed-size descriptor lists
141 formed into rings by a branch from the final descriptor to the beginning of
142 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143 Some chips explicitly use only 2^N sized rings, while others use a
144 'next descriptor' pointer that the driver forms into rings.
146 IIIb/c. Transmit/Receive Structure
148 This driver uses a zero-copy receive and transmit scheme.
149 The driver allocates full frame size skbuffs for the Rx ring buffers at
150 open() time and passes the skb->data field to the chip as receive data
151 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152 a fresh skbuff is allocated and the frame is copied to the new skbuff.
153 When the incoming frame is larger, the skbuff is passed directly up the
154 protocol stack. Buffers consumed this way are replaced by newly allocated
155 skbuffs in a later phase of receives.
157 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158 using a full-sized skbuff for small frames vs. the copying costs of larger
159 frames. New boards are typically used in generously configured machines
160 and the underfilled buffers have negligible impact compared to the benefit of
161 a single allocation size, so the default value of zero results in never
162 copying packets. When copying is done, the cost is usually mitigated by using
163 a combined copy/checksum routine. Copying also preloads the cache, which is
164 most useful with small frames.
166 A subtle aspect of the operation is that the IP header at offset 14 in an
167 ethernet frame isn't longword aligned for further processing.
168 Unaligned buffers are permitted by the Sundance hardware, so
169 frames are received into the skbuff at an offset of "+2", 16-byte aligning
172 IIId. Synchronization
174 The driver runs as two independent, single-threaded flows of control. One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag. The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
193 The Sundance ST201 datasheet, preliminary version.
194 The Kendin KS8723 datasheet, preliminary version.
195 The ICplus IP100 datasheet, preliminary version.
196 http://www.scyld.com/expert/100mbps.html
197 http://www.scyld.com/expert/NWay.html
203 /* Work-around for Kendin chip bugs. */
204 #ifndef CONFIG_SUNDANCE_MMIO
208 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl
) = {
209 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 { 0x1186, 0x1002, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 4 },
214 { 0x13F0, 0x0201, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 5 },
215 { 0x13F0, 0x0200, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 6 },
218 MODULE_DEVICE_TABLE(pci
, sundance_pci_tbl
);
227 static const struct pci_id_info pci_id_tbl
[] __devinitdata
= {
228 {"D-Link DFE-550TX FAST Ethernet Adapter"},
229 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 {"D-Link DFE-580TX 4 port Server Adapter"},
231 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 {"D-Link DL10050-based FAST Ethernet Adapter"},
233 {"Sundance Technology Alta"},
234 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
235 { } /* terminate list. */
238 /* This driver was written to use PCI memory space, however x86-oriented
239 hardware often uses I/O space accesses. */
241 /* Offsets to the device registers.
242 Unlike software-only systems, device drivers interact with complex hardware.
243 It's not useful to define symbolic names for every register bit in the
244 device. The name can only partially document the semantics and make
245 the driver longer and more difficult to read.
246 In general, only the important configuration values or bits changed
247 multiple times should be defined symbolically.
252 TxDMABurstThresh
= 0x08,
253 TxDMAUrgentThresh
= 0x09,
254 TxDMAPollPeriod
= 0x0a,
259 RxDMABurstThresh
= 0x14,
260 RxDMAUrgentThresh
= 0x15,
261 RxDMAPollPeriod
= 0x16,
280 MulticastFilter0
= 0x60,
281 MulticastFilter1
= 0x64,
288 StatsCarrierError
= 0x74,
289 StatsLateColl
= 0x75,
290 StatsMultiColl
= 0x76,
294 StatsTxXSDefer
= 0x7a,
300 /* Aliased and bogus values! */
303 enum ASICCtrl_HiWord_bit
{
304 GlobalReset
= 0x0001,
309 NetworkReset
= 0x0020,
314 /* Bits in the interrupt status/mask registers. */
315 enum intr_status_bits
{
316 IntrSummary
=0x0001, IntrPCIErr
=0x0002, IntrMACCtrl
=0x0008,
317 IntrTxDone
=0x0004, IntrRxDone
=0x0010, IntrRxStart
=0x0020,
319 StatsMax
=0x0080, LinkChange
=0x0100,
320 IntrTxDMADone
=0x0200, IntrRxDMADone
=0x0400,
323 /* Bits in the RxMode register. */
325 AcceptAllIPMulti
=0x20, AcceptMultiHash
=0x10, AcceptAll
=0x08,
326 AcceptBroadcast
=0x04, AcceptMulticast
=0x02, AcceptMyPhys
=0x01,
328 /* Bits in MACCtrl. */
329 enum mac_ctrl0_bits
{
330 EnbFullDuplex
=0x20, EnbRcvLargeFrame
=0x40,
331 EnbFlowCtrl
=0x100, EnbPassRxCRC
=0x200,
333 enum mac_ctrl1_bits
{
334 StatsEnable
=0x0020, StatsDisable
=0x0040, StatsEnabled
=0x0080,
335 TxEnable
=0x0100, TxDisable
=0x0200, TxEnabled
=0x0400,
336 RxEnable
=0x0800, RxDisable
=0x1000, RxEnabled
=0x2000,
339 /* The Rx and Tx buffer descriptors. */
340 /* Note that using only 32 bit fields simplifies conversion to big-endian
345 struct desc_frag
{ __le32 addr
, length
; } frag
[1];
348 /* Bits in netdev_desc.status */
349 enum desc_status_bits
{
351 DescEndPacket
=0x4000,
355 DescIntrOnDMADone
=0x80000000,
356 DisableAlign
= 0x00000001,
359 #define PRIV_ALIGN 15 /* Required alignment mask */
360 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
361 within the structure. */
363 struct netdev_private
{
364 /* Descriptor rings first for alignment. */
365 struct netdev_desc
*rx_ring
;
366 struct netdev_desc
*tx_ring
;
367 struct sk_buff
* rx_skbuff
[RX_RING_SIZE
];
368 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
369 dma_addr_t tx_ring_dma
;
370 dma_addr_t rx_ring_dma
;
371 struct timer_list timer
; /* Media monitoring timer. */
372 /* Frequently used values: keep some adjacent for cache effect. */
374 spinlock_t rx_lock
; /* Group with Tx control cache line. */
377 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
378 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
379 struct netdev_desc
*last_tx
; /* Last Tx descriptor used. */
380 unsigned int cur_tx
, dirty_tx
;
381 /* These values are keep track of the transceiver/media in use. */
382 unsigned int flowctrl
:1;
383 unsigned int default_port
:4; /* Last dev->if_port value. */
384 unsigned int an_enable
:1;
386 struct tasklet_struct rx_tasklet
;
387 struct tasklet_struct tx_tasklet
;
390 /* Multicast and receive mode. */
391 spinlock_t mcastlock
; /* SMP lock multicast updates. */
393 /* MII transceiver section. */
394 struct mii_if_info mii_if
;
395 int mii_preamble_required
;
396 unsigned char phys
[MII_CNT
]; /* MII device addresses, only first one used. */
397 struct pci_dev
*pci_dev
;
401 /* The station address location in the EEPROM. */
402 #define EEPROM_SA_OFFSET 0x10
403 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
404 IntrDrvRqst | IntrTxDone | StatsMax | \
407 static int change_mtu(struct net_device
*dev
, int new_mtu
);
408 static int eeprom_read(void __iomem
*ioaddr
, int location
);
409 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
410 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
411 static int mdio_wait_link(struct net_device
*dev
, int wait
);
412 static int netdev_open(struct net_device
*dev
);
413 static void check_duplex(struct net_device
*dev
);
414 static void netdev_timer(unsigned long data
);
415 static void tx_timeout(struct net_device
*dev
);
416 static void init_ring(struct net_device
*dev
);
417 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
418 static int reset_tx (struct net_device
*dev
);
419 static irqreturn_t
intr_handler(int irq
, void *dev_instance
);
420 static void rx_poll(unsigned long data
);
421 static void tx_poll(unsigned long data
);
422 static void refill_rx (struct net_device
*dev
);
423 static void netdev_error(struct net_device
*dev
, int intr_status
);
424 static void netdev_error(struct net_device
*dev
, int intr_status
);
425 static void set_rx_mode(struct net_device
*dev
);
426 static int __set_mac_addr(struct net_device
*dev
);
427 static struct net_device_stats
*get_stats(struct net_device
*dev
);
428 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
429 static int netdev_close(struct net_device
*dev
);
430 static const struct ethtool_ops ethtool_ops
;
432 static void sundance_reset(struct net_device
*dev
, unsigned long reset_cmd
)
434 struct netdev_private
*np
= netdev_priv(dev
);
435 void __iomem
*ioaddr
= np
->base
+ ASICCtrl
;
438 /* ST201 documentation states ASICCtrl is a 32bit register */
439 iowrite32 (reset_cmd
| ioread32 (ioaddr
), ioaddr
);
440 /* ST201 documentation states reset can take up to 1 ms */
442 while (ioread32 (ioaddr
) & (ResetBusy
<< 16)) {
443 if (--countdown
== 0) {
444 printk(KERN_WARNING
"%s : reset not completed !!\n", dev
->name
);
451 static const struct net_device_ops netdev_ops
= {
452 .ndo_open
= netdev_open
,
453 .ndo_stop
= netdev_close
,
454 .ndo_start_xmit
= start_tx
,
455 .ndo_get_stats
= get_stats
,
456 .ndo_set_multicast_list
= set_rx_mode
,
457 .ndo_do_ioctl
= netdev_ioctl
,
458 .ndo_tx_timeout
= tx_timeout
,
459 .ndo_change_mtu
= change_mtu
,
460 .ndo_set_mac_address
= eth_mac_addr
,
461 .ndo_validate_addr
= eth_validate_addr
,
464 static int __devinit
sundance_probe1 (struct pci_dev
*pdev
,
465 const struct pci_device_id
*ent
)
467 struct net_device
*dev
;
468 struct netdev_private
*np
;
470 int chip_idx
= ent
->driver_data
;
473 void __iomem
*ioaddr
;
482 int phy
, phy_end
, phy_idx
= 0;
484 /* when built into the kernel, we only print version if device is found */
486 static int printed_version
;
487 if (!printed_version
++)
491 if (pci_enable_device(pdev
))
493 pci_set_master(pdev
);
497 dev
= alloc_etherdev(sizeof(*np
));
500 SET_NETDEV_DEV(dev
, &pdev
->dev
);
502 if (pci_request_regions(pdev
, DRV_NAME
))
505 ioaddr
= pci_iomap(pdev
, bar
, netdev_io_size
);
509 for (i
= 0; i
< 3; i
++)
510 ((__le16
*)dev
->dev_addr
)[i
] =
511 cpu_to_le16(eeprom_read(ioaddr
, i
+ EEPROM_SA_OFFSET
));
512 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
514 dev
->base_addr
= (unsigned long)ioaddr
;
517 np
= netdev_priv(dev
);
520 np
->chip_id
= chip_idx
;
521 np
->msg_enable
= (1 << debug
) - 1;
522 spin_lock_init(&np
->lock
);
523 tasklet_init(&np
->rx_tasklet
, rx_poll
, (unsigned long)dev
);
524 tasklet_init(&np
->tx_tasklet
, tx_poll
, (unsigned long)dev
);
526 ring_space
= pci_alloc_consistent(pdev
, TX_TOTAL_SIZE
, &ring_dma
);
528 goto err_out_cleardev
;
529 np
->tx_ring
= (struct netdev_desc
*)ring_space
;
530 np
->tx_ring_dma
= ring_dma
;
532 ring_space
= pci_alloc_consistent(pdev
, RX_TOTAL_SIZE
, &ring_dma
);
534 goto err_out_unmap_tx
;
535 np
->rx_ring
= (struct netdev_desc
*)ring_space
;
536 np
->rx_ring_dma
= ring_dma
;
538 np
->mii_if
.dev
= dev
;
539 np
->mii_if
.mdio_read
= mdio_read
;
540 np
->mii_if
.mdio_write
= mdio_write
;
541 np
->mii_if
.phy_id_mask
= 0x1f;
542 np
->mii_if
.reg_num_mask
= 0x1f;
544 /* The chip-specific entries in the device structure. */
545 dev
->netdev_ops
= &netdev_ops
;
546 SET_ETHTOOL_OPS(dev
, ðtool_ops
);
547 dev
->watchdog_timeo
= TX_TIMEOUT
;
549 pci_set_drvdata(pdev
, dev
);
551 i
= register_netdev(dev
);
553 goto err_out_unmap_rx
;
555 printk(KERN_INFO
"%s: %s at %p, %pM, IRQ %d.\n",
556 dev
->name
, pci_id_tbl
[chip_idx
].name
, ioaddr
,
559 np
->phys
[0] = 1; /* Default setting */
560 np
->mii_preamble_required
++;
563 * It seems some phys doesn't deal well with address 0 being accessed
566 if (sundance_pci_tbl
[np
->chip_id
].device
== 0x0200) {
571 phy_end
= 32; /* wraps to zero, due to 'phy & 0x1f' */
573 for (; phy
<= phy_end
&& phy_idx
< MII_CNT
; phy
++) {
574 int phyx
= phy
& 0x1f;
575 int mii_status
= mdio_read(dev
, phyx
, MII_BMSR
);
576 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
577 np
->phys
[phy_idx
++] = phyx
;
578 np
->mii_if
.advertising
= mdio_read(dev
, phyx
, MII_ADVERTISE
);
579 if ((mii_status
& 0x0040) == 0)
580 np
->mii_preamble_required
++;
581 printk(KERN_INFO
"%s: MII PHY found at address %d, status "
582 "0x%4.4x advertising %4.4x.\n",
583 dev
->name
, phyx
, mii_status
, np
->mii_if
.advertising
);
586 np
->mii_preamble_required
--;
589 printk(KERN_INFO
"%s: No MII transceiver found, aborting. ASIC status %x\n",
590 dev
->name
, ioread32(ioaddr
+ ASICCtrl
));
591 goto err_out_unregister
;
594 np
->mii_if
.phy_id
= np
->phys
[0];
596 /* Parse override configuration */
598 if (card_idx
< MAX_UNITS
) {
599 if (media
[card_idx
] != NULL
) {
601 if (strcmp (media
[card_idx
], "100mbps_fd") == 0 ||
602 strcmp (media
[card_idx
], "4") == 0) {
604 np
->mii_if
.full_duplex
= 1;
605 } else if (strcmp (media
[card_idx
], "100mbps_hd") == 0 ||
606 strcmp (media
[card_idx
], "3") == 0) {
608 np
->mii_if
.full_duplex
= 0;
609 } else if (strcmp (media
[card_idx
], "10mbps_fd") == 0 ||
610 strcmp (media
[card_idx
], "2") == 0) {
612 np
->mii_if
.full_duplex
= 1;
613 } else if (strcmp (media
[card_idx
], "10mbps_hd") == 0 ||
614 strcmp (media
[card_idx
], "1") == 0) {
616 np
->mii_if
.full_duplex
= 0;
626 if (ioread32 (ioaddr
+ ASICCtrl
) & 0x80) {
627 /* Default 100Mbps Full */
630 np
->mii_if
.full_duplex
= 1;
635 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_RESET
);
637 /* If flow control enabled, we need to advertise it.*/
639 mdio_write (dev
, np
->phys
[0], MII_ADVERTISE
, np
->mii_if
.advertising
| 0x0400);
640 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_ANENABLE
|BMCR_ANRESTART
);
641 /* Force media type */
642 if (!np
->an_enable
) {
644 mii_ctl
|= (np
->speed
== 100) ? BMCR_SPEED100
: 0;
645 mii_ctl
|= (np
->mii_if
.full_duplex
) ? BMCR_FULLDPLX
: 0;
646 mdio_write (dev
, np
->phys
[0], MII_BMCR
, mii_ctl
);
647 printk (KERN_INFO
"Override speed=%d, %s duplex\n",
648 np
->speed
, np
->mii_if
.full_duplex
? "Full" : "Half");
652 /* Perhaps move the reset here? */
653 /* Reset the chip to erase previous misconfiguration. */
654 if (netif_msg_hw(np
))
655 printk("ASIC Control is %x.\n", ioread32(ioaddr
+ ASICCtrl
));
656 sundance_reset(dev
, 0x00ff << 16);
657 if (netif_msg_hw(np
))
658 printk("ASIC Control is now %x.\n", ioread32(ioaddr
+ ASICCtrl
));
664 unregister_netdev(dev
);
666 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
, np
->rx_ring_dma
);
668 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
, np
->tx_ring_dma
);
670 pci_set_drvdata(pdev
, NULL
);
671 pci_iounmap(pdev
, ioaddr
);
673 pci_release_regions(pdev
);
679 static int change_mtu(struct net_device
*dev
, int new_mtu
)
681 if ((new_mtu
< 68) || (new_mtu
> 8191)) /* Set by RxDMAFrameLen */
683 if (netif_running(dev
))
689 #define eeprom_delay(ee_addr) ioread32(ee_addr)
690 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
691 static int __devinit
eeprom_read(void __iomem
*ioaddr
, int location
)
693 int boguscnt
= 10000; /* Typical 1900 ticks. */
694 iowrite16(0x0200 | (location
& 0xff), ioaddr
+ EECtrl
);
696 eeprom_delay(ioaddr
+ EECtrl
);
697 if (! (ioread16(ioaddr
+ EECtrl
) & 0x8000)) {
698 return ioread16(ioaddr
+ EEData
);
700 } while (--boguscnt
> 0);
704 /* MII transceiver control section.
705 Read and write the MII registers using software-generated serial
706 MDIO protocol. See the MII specifications or DP83840A data sheet
709 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
710 met by back-to-back 33Mhz PCI cycles. */
711 #define mdio_delay() ioread8(mdio_addr)
714 MDIO_ShiftClk
=0x0001, MDIO_Data
=0x0002, MDIO_EnbOutput
=0x0004,
716 #define MDIO_EnbIn (0)
717 #define MDIO_WRITE0 (MDIO_EnbOutput)
718 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
720 /* Generate the preamble required for initial synchronization and
721 a few older transceivers. */
722 static void mdio_sync(void __iomem
*mdio_addr
)
726 /* Establish sync by sending at least 32 logic ones. */
727 while (--bits
>= 0) {
728 iowrite8(MDIO_WRITE1
, mdio_addr
);
730 iowrite8(MDIO_WRITE1
| MDIO_ShiftClk
, mdio_addr
);
735 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
737 struct netdev_private
*np
= netdev_priv(dev
);
738 void __iomem
*mdio_addr
= np
->base
+ MIICtrl
;
739 int mii_cmd
= (0xf6 << 10) | (phy_id
<< 5) | location
;
742 if (np
->mii_preamble_required
)
743 mdio_sync(mdio_addr
);
745 /* Shift the read command bits out. */
746 for (i
= 15; i
>= 0; i
--) {
747 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
749 iowrite8(dataval
, mdio_addr
);
751 iowrite8(dataval
| MDIO_ShiftClk
, mdio_addr
);
754 /* Read the two transition, 16 data, and wire-idle bits. */
755 for (i
= 19; i
> 0; i
--) {
756 iowrite8(MDIO_EnbIn
, mdio_addr
);
758 retval
= (retval
<< 1) | ((ioread8(mdio_addr
) & MDIO_Data
) ? 1 : 0);
759 iowrite8(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
762 return (retval
>>1) & 0xffff;
765 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
)
767 struct netdev_private
*np
= netdev_priv(dev
);
768 void __iomem
*mdio_addr
= np
->base
+ MIICtrl
;
769 int mii_cmd
= (0x5002 << 16) | (phy_id
<< 23) | (location
<<18) | value
;
772 if (np
->mii_preamble_required
)
773 mdio_sync(mdio_addr
);
775 /* Shift the command bits out. */
776 for (i
= 31; i
>= 0; i
--) {
777 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
779 iowrite8(dataval
, mdio_addr
);
781 iowrite8(dataval
| MDIO_ShiftClk
, mdio_addr
);
784 /* Clear out extra bits. */
785 for (i
= 2; i
> 0; i
--) {
786 iowrite8(MDIO_EnbIn
, mdio_addr
);
788 iowrite8(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
794 static int mdio_wait_link(struct net_device
*dev
, int wait
)
798 struct netdev_private
*np
;
800 np
= netdev_priv(dev
);
801 phy_id
= np
->phys
[0];
804 bmsr
= mdio_read(dev
, phy_id
, MII_BMSR
);
808 } while (--wait
> 0);
812 static int netdev_open(struct net_device
*dev
)
814 struct netdev_private
*np
= netdev_priv(dev
);
815 void __iomem
*ioaddr
= np
->base
;
819 /* Do we need to reset the chip??? */
821 i
= request_irq(dev
->irq
, intr_handler
, IRQF_SHARED
, dev
->name
, dev
);
825 if (netif_msg_ifup(np
))
826 printk(KERN_DEBUG
"%s: netdev_open() irq %d.\n",
827 dev
->name
, dev
->irq
);
830 iowrite32(np
->rx_ring_dma
, ioaddr
+ RxListPtr
);
831 /* The Tx list pointer is written as packets are queued. */
833 /* Initialize other registers. */
835 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
836 iowrite16(dev
->mtu
+ 18, ioaddr
+ MaxFrameSize
);
838 iowrite16(dev
->mtu
+ 14, ioaddr
+ MaxFrameSize
);
841 iowrite32(ioread32(ioaddr
+ ASICCtrl
) | 0x0C, ioaddr
+ ASICCtrl
);
843 /* Configure the PCI bus bursts and FIFO thresholds. */
845 if (dev
->if_port
== 0)
846 dev
->if_port
= np
->default_port
;
848 spin_lock_init(&np
->mcastlock
);
851 iowrite16(0, ioaddr
+ IntrEnable
);
852 iowrite16(0, ioaddr
+ DownCounter
);
853 /* Set the chip to poll every N*320nsec. */
854 iowrite8(100, ioaddr
+ RxDMAPollPeriod
);
855 iowrite8(127, ioaddr
+ TxDMAPollPeriod
);
856 /* Fix DFE-580TX packet drop issue */
857 if (np
->pci_dev
->revision
>= 0x14)
858 iowrite8(0x01, ioaddr
+ DebugCtrl1
);
859 netif_start_queue(dev
);
861 spin_lock_irqsave(&np
->lock
, flags
);
863 spin_unlock_irqrestore(&np
->lock
, flags
);
865 iowrite16 (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
867 if (netif_msg_ifup(np
))
868 printk(KERN_DEBUG
"%s: Done netdev_open(), status: Rx %x Tx %x "
869 "MAC Control %x, %4.4x %4.4x.\n",
870 dev
->name
, ioread32(ioaddr
+ RxStatus
), ioread8(ioaddr
+ TxStatus
),
871 ioread32(ioaddr
+ MACCtrl0
),
872 ioread16(ioaddr
+ MACCtrl1
), ioread16(ioaddr
+ MACCtrl0
));
874 /* Set the timer to check for link beat. */
875 init_timer(&np
->timer
);
876 np
->timer
.expires
= jiffies
+ 3*HZ
;
877 np
->timer
.data
= (unsigned long)dev
;
878 np
->timer
.function
= &netdev_timer
; /* timer handler */
879 add_timer(&np
->timer
);
881 /* Enable interrupts by setting the interrupt mask. */
882 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
887 static void check_duplex(struct net_device
*dev
)
889 struct netdev_private
*np
= netdev_priv(dev
);
890 void __iomem
*ioaddr
= np
->base
;
891 int mii_lpa
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
892 int negotiated
= mii_lpa
& np
->mii_if
.advertising
;
896 if (!np
->an_enable
|| mii_lpa
== 0xffff) {
897 if (np
->mii_if
.full_duplex
)
898 iowrite16 (ioread16 (ioaddr
+ MACCtrl0
) | EnbFullDuplex
,
903 /* Autonegotiation */
904 duplex
= (negotiated
& 0x0100) || (negotiated
& 0x01C0) == 0x0040;
905 if (np
->mii_if
.full_duplex
!= duplex
) {
906 np
->mii_if
.full_duplex
= duplex
;
907 if (netif_msg_link(np
))
908 printk(KERN_INFO
"%s: Setting %s-duplex based on MII #%d "
909 "negotiated capability %4.4x.\n", dev
->name
,
910 duplex
? "full" : "half", np
->phys
[0], negotiated
);
911 iowrite16(ioread16(ioaddr
+ MACCtrl0
) | (duplex
? 0x20 : 0), ioaddr
+ MACCtrl0
);
915 static void netdev_timer(unsigned long data
)
917 struct net_device
*dev
= (struct net_device
*)data
;
918 struct netdev_private
*np
= netdev_priv(dev
);
919 void __iomem
*ioaddr
= np
->base
;
920 int next_tick
= 10*HZ
;
922 if (netif_msg_timer(np
)) {
923 printk(KERN_DEBUG
"%s: Media selection timer tick, intr status %4.4x, "
925 dev
->name
, ioread16(ioaddr
+ IntrEnable
),
926 ioread8(ioaddr
+ TxStatus
), ioread32(ioaddr
+ RxStatus
));
929 np
->timer
.expires
= jiffies
+ next_tick
;
930 add_timer(&np
->timer
);
933 static void tx_timeout(struct net_device
*dev
)
935 struct netdev_private
*np
= netdev_priv(dev
);
936 void __iomem
*ioaddr
= np
->base
;
939 netif_stop_queue(dev
);
940 tasklet_disable(&np
->tx_tasklet
);
941 iowrite16(0, ioaddr
+ IntrEnable
);
942 printk(KERN_WARNING
"%s: Transmit timed out, TxStatus %2.2x "
944 " resetting...\n", dev
->name
, ioread8(ioaddr
+ TxStatus
),
945 ioread8(ioaddr
+ TxFrameId
));
949 for (i
=0; i
<TX_RING_SIZE
; i
++) {
950 printk(KERN_DEBUG
"%02x %08llx %08x %08x(%02x) %08x %08x\n", i
,
951 (unsigned long long)(np
->tx_ring_dma
+ i
*sizeof(*np
->tx_ring
)),
952 le32_to_cpu(np
->tx_ring
[i
].next_desc
),
953 le32_to_cpu(np
->tx_ring
[i
].status
),
954 (le32_to_cpu(np
->tx_ring
[i
].status
) >> 2) & 0xff,
955 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
956 le32_to_cpu(np
->tx_ring
[i
].frag
[0].length
));
958 printk(KERN_DEBUG
"TxListPtr=%08x netif_queue_stopped=%d\n",
959 ioread32(np
->base
+ TxListPtr
),
960 netif_queue_stopped(dev
));
961 printk(KERN_DEBUG
"cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
962 np
->cur_tx
, np
->cur_tx
% TX_RING_SIZE
,
963 np
->dirty_tx
, np
->dirty_tx
% TX_RING_SIZE
);
964 printk(KERN_DEBUG
"cur_rx=%d dirty_rx=%d\n", np
->cur_rx
, np
->dirty_rx
);
965 printk(KERN_DEBUG
"cur_task=%d\n", np
->cur_task
);
967 spin_lock_irqsave(&np
->lock
, flag
);
969 /* Stop and restart the chip's Tx processes . */
971 spin_unlock_irqrestore(&np
->lock
, flag
);
975 dev
->trans_start
= jiffies
; /* prevent tx timeout */
976 dev
->stats
.tx_errors
++;
977 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
978 netif_wake_queue(dev
);
980 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
981 tasklet_enable(&np
->tx_tasklet
);
985 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
986 static void init_ring(struct net_device
*dev
)
988 struct netdev_private
*np
= netdev_priv(dev
);
991 np
->cur_rx
= np
->cur_tx
= 0;
992 np
->dirty_rx
= np
->dirty_tx
= 0;
995 np
->rx_buf_sz
= (dev
->mtu
<= 1520 ? PKT_BUF_SZ
: dev
->mtu
+ 16);
997 /* Initialize all Rx descriptors. */
998 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
999 np
->rx_ring
[i
].next_desc
= cpu_to_le32(np
->rx_ring_dma
+
1000 ((i
+1)%RX_RING_SIZE
)*sizeof(*np
->rx_ring
));
1001 np
->rx_ring
[i
].status
= 0;
1002 np
->rx_ring
[i
].frag
[0].length
= 0;
1003 np
->rx_skbuff
[i
] = NULL
;
1006 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1007 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1008 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
);
1009 np
->rx_skbuff
[i
] = skb
;
1012 skb
->dev
= dev
; /* Mark as being used by this device. */
1013 skb_reserve(skb
, 2); /* 16 byte align the IP header. */
1014 np
->rx_ring
[i
].frag
[0].addr
= cpu_to_le32(
1015 pci_map_single(np
->pci_dev
, skb
->data
, np
->rx_buf_sz
,
1016 PCI_DMA_FROMDEVICE
));
1017 np
->rx_ring
[i
].frag
[0].length
= cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1019 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
1021 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1022 np
->tx_skbuff
[i
] = NULL
;
1023 np
->tx_ring
[i
].status
= 0;
1028 static void tx_poll (unsigned long data
)
1030 struct net_device
*dev
= (struct net_device
*)data
;
1031 struct netdev_private
*np
= netdev_priv(dev
);
1032 unsigned head
= np
->cur_task
% TX_RING_SIZE
;
1033 struct netdev_desc
*txdesc
=
1034 &np
->tx_ring
[(np
->cur_tx
- 1) % TX_RING_SIZE
];
1036 /* Chain the next pointer */
1037 for (; np
->cur_tx
- np
->cur_task
> 0; np
->cur_task
++) {
1038 int entry
= np
->cur_task
% TX_RING_SIZE
;
1039 txdesc
= &np
->tx_ring
[entry
];
1041 np
->last_tx
->next_desc
= cpu_to_le32(np
->tx_ring_dma
+
1042 entry
*sizeof(struct netdev_desc
));
1044 np
->last_tx
= txdesc
;
1046 /* Indicate the latest descriptor of tx ring */
1047 txdesc
->status
|= cpu_to_le32(DescIntrOnTx
);
1049 if (ioread32 (np
->base
+ TxListPtr
) == 0)
1050 iowrite32 (np
->tx_ring_dma
+ head
* sizeof(struct netdev_desc
),
1051 np
->base
+ TxListPtr
);
1056 start_tx (struct sk_buff
*skb
, struct net_device
*dev
)
1058 struct netdev_private
*np
= netdev_priv(dev
);
1059 struct netdev_desc
*txdesc
;
1062 /* Calculate the next Tx descriptor entry. */
1063 entry
= np
->cur_tx
% TX_RING_SIZE
;
1064 np
->tx_skbuff
[entry
] = skb
;
1065 txdesc
= &np
->tx_ring
[entry
];
1067 txdesc
->next_desc
= 0;
1068 txdesc
->status
= cpu_to_le32 ((entry
<< 2) | DisableAlign
);
1069 txdesc
->frag
[0].addr
= cpu_to_le32 (pci_map_single (np
->pci_dev
, skb
->data
,
1072 txdesc
->frag
[0].length
= cpu_to_le32 (skb
->len
| LastFrag
);
1074 /* Increment cur_tx before tasklet_schedule() */
1077 /* Schedule a tx_poll() task */
1078 tasklet_schedule(&np
->tx_tasklet
);
1080 /* On some architectures: explicitly flush cache lines here. */
1081 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 1 &&
1082 !netif_queue_stopped(dev
)) {
1085 netif_stop_queue (dev
);
1087 if (netif_msg_tx_queued(np
)) {
1089 "%s: Transmit frame #%d queued in slot %d.\n",
1090 dev
->name
, np
->cur_tx
, entry
);
1092 return NETDEV_TX_OK
;
1095 /* Reset hardware tx and free all of tx buffers */
1097 reset_tx (struct net_device
*dev
)
1099 struct netdev_private
*np
= netdev_priv(dev
);
1100 void __iomem
*ioaddr
= np
->base
;
1101 struct sk_buff
*skb
;
1103 int irq
= in_interrupt();
1105 /* Reset tx logic, TxListPtr will be cleaned */
1106 iowrite16 (TxDisable
, ioaddr
+ MACCtrl1
);
1107 sundance_reset(dev
, (NetworkReset
|FIFOReset
|DMAReset
|TxReset
) << 16);
1109 /* free all tx skbuff */
1110 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1111 np
->tx_ring
[i
].next_desc
= 0;
1113 skb
= np
->tx_skbuff
[i
];
1115 pci_unmap_single(np
->pci_dev
,
1116 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
1117 skb
->len
, PCI_DMA_TODEVICE
);
1119 dev_kfree_skb_irq (skb
);
1121 dev_kfree_skb (skb
);
1122 np
->tx_skbuff
[i
] = NULL
;
1123 dev
->stats
.tx_dropped
++;
1126 np
->cur_tx
= np
->dirty_tx
= 0;
1130 iowrite8(127, ioaddr
+ TxDMAPollPeriod
);
1132 iowrite16 (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
1136 /* The interrupt handler cleans up after the Tx thread,
1137 and schedule a Rx thread work */
1138 static irqreturn_t
intr_handler(int irq
, void *dev_instance
)
1140 struct net_device
*dev
= (struct net_device
*)dev_instance
;
1141 struct netdev_private
*np
= netdev_priv(dev
);
1142 void __iomem
*ioaddr
= np
->base
;
1151 int intr_status
= ioread16(ioaddr
+ IntrStatus
);
1152 iowrite16(intr_status
, ioaddr
+ IntrStatus
);
1154 if (netif_msg_intr(np
))
1155 printk(KERN_DEBUG
"%s: Interrupt, status %4.4x.\n",
1156 dev
->name
, intr_status
);
1158 if (!(intr_status
& DEFAULT_INTR
))
1163 if (intr_status
& (IntrRxDMADone
)) {
1164 iowrite16(DEFAULT_INTR
& ~(IntrRxDone
|IntrRxDMADone
),
1165 ioaddr
+ IntrEnable
);
1167 np
->budget
= RX_BUDGET
;
1168 tasklet_schedule(&np
->rx_tasklet
);
1170 if (intr_status
& (IntrTxDone
| IntrDrvRqst
)) {
1171 tx_status
= ioread16 (ioaddr
+ TxStatus
);
1172 for (tx_cnt
=32; tx_status
& 0x80; --tx_cnt
) {
1173 if (netif_msg_tx_done(np
))
1175 ("%s: Transmit status is %2.2x.\n",
1176 dev
->name
, tx_status
);
1177 if (tx_status
& 0x1e) {
1178 if (netif_msg_tx_err(np
))
1179 printk("%s: Transmit error status %4.4x.\n",
1180 dev
->name
, tx_status
);
1181 dev
->stats
.tx_errors
++;
1182 if (tx_status
& 0x10)
1183 dev
->stats
.tx_fifo_errors
++;
1184 if (tx_status
& 0x08)
1185 dev
->stats
.collisions
++;
1186 if (tx_status
& 0x04)
1187 dev
->stats
.tx_fifo_errors
++;
1188 if (tx_status
& 0x02)
1189 dev
->stats
.tx_window_errors
++;
1192 ** This reset has been verified on
1193 ** DFE-580TX boards ! phdm@macqel.be.
1195 if (tx_status
& 0x10) { /* TxUnderrun */
1196 /* Restart Tx FIFO and transmitter */
1197 sundance_reset(dev
, (NetworkReset
|FIFOReset
|TxReset
) << 16);
1198 /* No need to reset the Tx pointer here */
1200 /* Restart the Tx. Need to make sure tx enabled */
1203 iowrite16(ioread16(ioaddr
+ MACCtrl1
) | TxEnable
, ioaddr
+ MACCtrl1
);
1204 if (ioread16(ioaddr
+ MACCtrl1
) & TxEnabled
)
1209 /* Yup, this is a documentation bug. It cost me *hours*. */
1210 iowrite16 (0, ioaddr
+ TxStatus
);
1212 iowrite32(5000, ioaddr
+ DownCounter
);
1215 tx_status
= ioread16 (ioaddr
+ TxStatus
);
1217 hw_frame_id
= (tx_status
>> 8) & 0xff;
1219 hw_frame_id
= ioread8(ioaddr
+ TxFrameId
);
1222 if (np
->pci_dev
->revision
>= 0x14) {
1223 spin_lock(&np
->lock
);
1224 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1225 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1226 struct sk_buff
*skb
;
1228 sw_frame_id
= (le32_to_cpu(
1229 np
->tx_ring
[entry
].status
) >> 2) & 0xff;
1230 if (sw_frame_id
== hw_frame_id
&&
1231 !(le32_to_cpu(np
->tx_ring
[entry
].status
)
1234 if (sw_frame_id
== (hw_frame_id
+ 1) %
1237 skb
= np
->tx_skbuff
[entry
];
1238 /* Free the original skb. */
1239 pci_unmap_single(np
->pci_dev
,
1240 le32_to_cpu(np
->tx_ring
[entry
].frag
[0].addr
),
1241 skb
->len
, PCI_DMA_TODEVICE
);
1242 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1243 np
->tx_skbuff
[entry
] = NULL
;
1244 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1245 np
->tx_ring
[entry
].frag
[0].length
= 0;
1247 spin_unlock(&np
->lock
);
1249 spin_lock(&np
->lock
);
1250 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1251 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1252 struct sk_buff
*skb
;
1253 if (!(le32_to_cpu(np
->tx_ring
[entry
].status
)
1256 skb
= np
->tx_skbuff
[entry
];
1257 /* Free the original skb. */
1258 pci_unmap_single(np
->pci_dev
,
1259 le32_to_cpu(np
->tx_ring
[entry
].frag
[0].addr
),
1260 skb
->len
, PCI_DMA_TODEVICE
);
1261 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1262 np
->tx_skbuff
[entry
] = NULL
;
1263 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1264 np
->tx_ring
[entry
].frag
[0].length
= 0;
1266 spin_unlock(&np
->lock
);
1269 if (netif_queue_stopped(dev
) &&
1270 np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
1271 /* The ring is no longer full, clear busy flag. */
1272 netif_wake_queue (dev
);
1274 /* Abnormal error summary/uncommon events handlers. */
1275 if (intr_status
& (IntrPCIErr
| LinkChange
| StatsMax
))
1276 netdev_error(dev
, intr_status
);
1278 if (netif_msg_intr(np
))
1279 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#4.4x.\n",
1280 dev
->name
, ioread16(ioaddr
+ IntrStatus
));
1281 return IRQ_RETVAL(handled
);
1284 static void rx_poll(unsigned long data
)
1286 struct net_device
*dev
= (struct net_device
*)data
;
1287 struct netdev_private
*np
= netdev_priv(dev
);
1288 int entry
= np
->cur_rx
% RX_RING_SIZE
;
1289 int boguscnt
= np
->budget
;
1290 void __iomem
*ioaddr
= np
->base
;
1293 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1295 struct netdev_desc
*desc
= &(np
->rx_ring
[entry
]);
1296 u32 frame_status
= le32_to_cpu(desc
->status
);
1299 if (--boguscnt
< 0) {
1302 if (!(frame_status
& DescOwn
))
1304 pkt_len
= frame_status
& 0x1fff; /* Chip omits the CRC. */
1305 if (netif_msg_rx_status(np
))
1306 printk(KERN_DEBUG
" netdev_rx() status was %8.8x.\n",
1308 if (frame_status
& 0x001f4000) {
1309 /* There was a error. */
1310 if (netif_msg_rx_err(np
))
1311 printk(KERN_DEBUG
" netdev_rx() Rx error was %8.8x.\n",
1313 dev
->stats
.rx_errors
++;
1314 if (frame_status
& 0x00100000)
1315 dev
->stats
.rx_length_errors
++;
1316 if (frame_status
& 0x00010000)
1317 dev
->stats
.rx_fifo_errors
++;
1318 if (frame_status
& 0x00060000)
1319 dev
->stats
.rx_frame_errors
++;
1320 if (frame_status
& 0x00080000)
1321 dev
->stats
.rx_crc_errors
++;
1322 if (frame_status
& 0x00100000) {
1323 printk(KERN_WARNING
"%s: Oversized Ethernet frame,"
1325 dev
->name
, frame_status
);
1328 struct sk_buff
*skb
;
1329 #ifndef final_version
1330 if (netif_msg_rx_status(np
))
1331 printk(KERN_DEBUG
" netdev_rx() normal Rx pkt length %d"
1332 ", bogus_cnt %d.\n",
1335 /* Check if the packet is long enough to accept without copying
1336 to a minimally-sized skbuff. */
1337 if (pkt_len
< rx_copybreak
&&
1338 (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
1339 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1340 pci_dma_sync_single_for_cpu(np
->pci_dev
,
1341 le32_to_cpu(desc
->frag
[0].addr
),
1343 PCI_DMA_FROMDEVICE
);
1345 skb_copy_to_linear_data(skb
, np
->rx_skbuff
[entry
]->data
, pkt_len
);
1346 pci_dma_sync_single_for_device(np
->pci_dev
,
1347 le32_to_cpu(desc
->frag
[0].addr
),
1349 PCI_DMA_FROMDEVICE
);
1350 skb_put(skb
, pkt_len
);
1352 pci_unmap_single(np
->pci_dev
,
1353 le32_to_cpu(desc
->frag
[0].addr
),
1355 PCI_DMA_FROMDEVICE
);
1356 skb_put(skb
= np
->rx_skbuff
[entry
], pkt_len
);
1357 np
->rx_skbuff
[entry
] = NULL
;
1359 skb
->protocol
= eth_type_trans(skb
, dev
);
1360 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1363 entry
= (entry
+ 1) % RX_RING_SIZE
;
1368 np
->budget
-= received
;
1369 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
1377 np
->budget
-= received
;
1378 if (np
->budget
<= 0)
1379 np
->budget
= RX_BUDGET
;
1380 tasklet_schedule(&np
->rx_tasklet
);
1384 static void refill_rx (struct net_device
*dev
)
1386 struct netdev_private
*np
= netdev_priv(dev
);
1390 /* Refill the Rx ring buffers. */
1391 for (;(np
->cur_rx
- np
->dirty_rx
+ RX_RING_SIZE
) % RX_RING_SIZE
> 0;
1392 np
->dirty_rx
= (np
->dirty_rx
+ 1) % RX_RING_SIZE
) {
1393 struct sk_buff
*skb
;
1394 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1395 if (np
->rx_skbuff
[entry
] == NULL
) {
1396 skb
= dev_alloc_skb(np
->rx_buf_sz
);
1397 np
->rx_skbuff
[entry
] = skb
;
1399 break; /* Better luck next round. */
1400 skb
->dev
= dev
; /* Mark as being used by this device. */
1401 skb_reserve(skb
, 2); /* Align IP on 16 byte boundaries */
1402 np
->rx_ring
[entry
].frag
[0].addr
= cpu_to_le32(
1403 pci_map_single(np
->pci_dev
, skb
->data
,
1404 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
));
1406 /* Perhaps we need not reset this field. */
1407 np
->rx_ring
[entry
].frag
[0].length
=
1408 cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1409 np
->rx_ring
[entry
].status
= 0;
1414 static void netdev_error(struct net_device
*dev
, int intr_status
)
1416 struct netdev_private
*np
= netdev_priv(dev
);
1417 void __iomem
*ioaddr
= np
->base
;
1418 u16 mii_ctl
, mii_advertise
, mii_lpa
;
1421 if (intr_status
& LinkChange
) {
1422 if (mdio_wait_link(dev
, 10) == 0) {
1423 printk(KERN_INFO
"%s: Link up\n", dev
->name
);
1424 if (np
->an_enable
) {
1425 mii_advertise
= mdio_read(dev
, np
->phys
[0],
1427 mii_lpa
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
1428 mii_advertise
&= mii_lpa
;
1429 printk(KERN_INFO
"%s: Link changed: ",
1431 if (mii_advertise
& ADVERTISE_100FULL
) {
1433 printk("100Mbps, full duplex\n");
1434 } else if (mii_advertise
& ADVERTISE_100HALF
) {
1436 printk("100Mbps, half duplex\n");
1437 } else if (mii_advertise
& ADVERTISE_10FULL
) {
1439 printk("10Mbps, full duplex\n");
1440 } else if (mii_advertise
& ADVERTISE_10HALF
) {
1442 printk("10Mbps, half duplex\n");
1447 mii_ctl
= mdio_read(dev
, np
->phys
[0], MII_BMCR
);
1448 speed
= (mii_ctl
& BMCR_SPEED100
) ? 100 : 10;
1450 printk(KERN_INFO
"%s: Link changed: %dMbps ,",
1452 printk("%s duplex.\n",
1453 (mii_ctl
& BMCR_FULLDPLX
) ?
1457 if (np
->flowctrl
&& np
->mii_if
.full_duplex
) {
1458 iowrite16(ioread16(ioaddr
+ MulticastFilter1
+2) | 0x0200,
1459 ioaddr
+ MulticastFilter1
+2);
1460 iowrite16(ioread16(ioaddr
+ MACCtrl0
) | EnbFlowCtrl
,
1463 netif_carrier_on(dev
);
1465 printk(KERN_INFO
"%s: Link down\n", dev
->name
);
1466 netif_carrier_off(dev
);
1469 if (intr_status
& StatsMax
) {
1472 if (intr_status
& IntrPCIErr
) {
1473 printk(KERN_ERR
"%s: Something Wicked happened! %4.4x.\n",
1474 dev
->name
, intr_status
);
1475 /* We must do a global reset of DMA to continue. */
1479 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1481 struct netdev_private
*np
= netdev_priv(dev
);
1482 void __iomem
*ioaddr
= np
->base
;
1485 /* We should lock this segment of code for SMP eventually, although
1486 the vulnerability window is very small and statistics are
1488 /* The chip only need report frame silently dropped. */
1489 dev
->stats
.rx_missed_errors
+= ioread8(ioaddr
+ RxMissed
);
1490 dev
->stats
.tx_packets
+= ioread16(ioaddr
+ TxFramesOK
);
1491 dev
->stats
.rx_packets
+= ioread16(ioaddr
+ RxFramesOK
);
1492 dev
->stats
.collisions
+= ioread8(ioaddr
+ StatsLateColl
);
1493 dev
->stats
.collisions
+= ioread8(ioaddr
+ StatsMultiColl
);
1494 dev
->stats
.collisions
+= ioread8(ioaddr
+ StatsOneColl
);
1495 dev
->stats
.tx_carrier_errors
+= ioread8(ioaddr
+ StatsCarrierError
);
1496 ioread8(ioaddr
+ StatsTxDefer
);
1497 for (i
= StatsTxDefer
; i
<= StatsMcastRx
; i
++)
1498 ioread8(ioaddr
+ i
);
1499 dev
->stats
.tx_bytes
+= ioread16(ioaddr
+ TxOctetsLow
);
1500 dev
->stats
.tx_bytes
+= ioread16(ioaddr
+ TxOctetsHigh
) << 16;
1501 dev
->stats
.rx_bytes
+= ioread16(ioaddr
+ RxOctetsLow
);
1502 dev
->stats
.rx_bytes
+= ioread16(ioaddr
+ RxOctetsHigh
) << 16;
1507 static void set_rx_mode(struct net_device
*dev
)
1509 struct netdev_private
*np
= netdev_priv(dev
);
1510 void __iomem
*ioaddr
= np
->base
;
1511 u16 mc_filter
[4]; /* Multicast hash filter */
1515 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1516 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1517 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptAll
| AcceptMyPhys
;
1518 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
1519 (dev
->flags
& IFF_ALLMULTI
)) {
1520 /* Too many to match, or accept all multicasts. */
1521 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1522 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1523 } else if (!netdev_mc_empty(dev
)) {
1524 struct netdev_hw_addr
*ha
;
1528 memset (mc_filter
, 0, sizeof (mc_filter
));
1529 netdev_for_each_mc_addr(ha
, dev
) {
1530 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
1531 for (index
=0, bit
=0; bit
< 6; bit
++, crc
<<= 1)
1532 if (crc
& 0x80000000) index
|= 1 << bit
;
1533 mc_filter
[index
/16] |= (1 << (index
% 16));
1535 rx_mode
= AcceptBroadcast
| AcceptMultiHash
| AcceptMyPhys
;
1537 iowrite8(AcceptBroadcast
| AcceptMyPhys
, ioaddr
+ RxMode
);
1540 if (np
->mii_if
.full_duplex
&& np
->flowctrl
)
1541 mc_filter
[3] |= 0x0200;
1543 for (i
= 0; i
< 4; i
++)
1544 iowrite16(mc_filter
[i
], ioaddr
+ MulticastFilter0
+ i
*2);
1545 iowrite8(rx_mode
, ioaddr
+ RxMode
);
1548 static int __set_mac_addr(struct net_device
*dev
)
1550 struct netdev_private
*np
= netdev_priv(dev
);
1553 addr16
= (dev
->dev_addr
[0] | (dev
->dev_addr
[1] << 8));
1554 iowrite16(addr16
, np
->base
+ StationAddr
);
1555 addr16
= (dev
->dev_addr
[2] | (dev
->dev_addr
[3] << 8));
1556 iowrite16(addr16
, np
->base
+ StationAddr
+2);
1557 addr16
= (dev
->dev_addr
[4] | (dev
->dev_addr
[5] << 8));
1558 iowrite16(addr16
, np
->base
+ StationAddr
+4);
1562 static int check_if_running(struct net_device
*dev
)
1564 if (!netif_running(dev
))
1569 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1571 struct netdev_private
*np
= netdev_priv(dev
);
1572 strcpy(info
->driver
, DRV_NAME
);
1573 strcpy(info
->version
, DRV_VERSION
);
1574 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
1577 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1579 struct netdev_private
*np
= netdev_priv(dev
);
1580 spin_lock_irq(&np
->lock
);
1581 mii_ethtool_gset(&np
->mii_if
, ecmd
);
1582 spin_unlock_irq(&np
->lock
);
1586 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1588 struct netdev_private
*np
= netdev_priv(dev
);
1590 spin_lock_irq(&np
->lock
);
1591 res
= mii_ethtool_sset(&np
->mii_if
, ecmd
);
1592 spin_unlock_irq(&np
->lock
);
1596 static int nway_reset(struct net_device
*dev
)
1598 struct netdev_private
*np
= netdev_priv(dev
);
1599 return mii_nway_restart(&np
->mii_if
);
1602 static u32
get_link(struct net_device
*dev
)
1604 struct netdev_private
*np
= netdev_priv(dev
);
1605 return mii_link_ok(&np
->mii_if
);
1608 static u32
get_msglevel(struct net_device
*dev
)
1610 struct netdev_private
*np
= netdev_priv(dev
);
1611 return np
->msg_enable
;
1614 static void set_msglevel(struct net_device
*dev
, u32 val
)
1616 struct netdev_private
*np
= netdev_priv(dev
);
1617 np
->msg_enable
= val
;
1620 static const struct ethtool_ops ethtool_ops
= {
1621 .begin
= check_if_running
,
1622 .get_drvinfo
= get_drvinfo
,
1623 .get_settings
= get_settings
,
1624 .set_settings
= set_settings
,
1625 .nway_reset
= nway_reset
,
1626 .get_link
= get_link
,
1627 .get_msglevel
= get_msglevel
,
1628 .set_msglevel
= set_msglevel
,
1631 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1633 struct netdev_private
*np
= netdev_priv(dev
);
1636 if (!netif_running(dev
))
1639 spin_lock_irq(&np
->lock
);
1640 rc
= generic_mii_ioctl(&np
->mii_if
, if_mii(rq
), cmd
, NULL
);
1641 spin_unlock_irq(&np
->lock
);
1646 static int netdev_close(struct net_device
*dev
)
1648 struct netdev_private
*np
= netdev_priv(dev
);
1649 void __iomem
*ioaddr
= np
->base
;
1650 struct sk_buff
*skb
;
1653 /* Wait and kill tasklet */
1654 tasklet_kill(&np
->rx_tasklet
);
1655 tasklet_kill(&np
->tx_tasklet
);
1661 netif_stop_queue(dev
);
1663 if (netif_msg_ifdown(np
)) {
1664 printk(KERN_DEBUG
"%s: Shutting down ethercard, status was Tx %2.2x "
1665 "Rx %4.4x Int %2.2x.\n",
1666 dev
->name
, ioread8(ioaddr
+ TxStatus
),
1667 ioread32(ioaddr
+ RxStatus
), ioread16(ioaddr
+ IntrStatus
));
1668 printk(KERN_DEBUG
"%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1669 dev
->name
, np
->cur_tx
, np
->dirty_tx
, np
->cur_rx
, np
->dirty_rx
);
1672 /* Disable interrupts by clearing the interrupt mask. */
1673 iowrite16(0x0000, ioaddr
+ IntrEnable
);
1675 /* Disable Rx and Tx DMA for safely release resource */
1676 iowrite32(0x500, ioaddr
+ DMACtrl
);
1678 /* Stop the chip's Tx and Rx processes. */
1679 iowrite16(TxDisable
| RxDisable
| StatsDisable
, ioaddr
+ MACCtrl1
);
1681 for (i
= 2000; i
> 0; i
--) {
1682 if ((ioread32(ioaddr
+ DMACtrl
) & 0xc000) == 0)
1687 iowrite16(GlobalReset
| DMAReset
| FIFOReset
| NetworkReset
,
1688 ioaddr
+ASICCtrl
+ 2);
1690 for (i
= 2000; i
> 0; i
--) {
1691 if ((ioread16(ioaddr
+ ASICCtrl
+2) & ResetBusy
) == 0)
1697 if (netif_msg_hw(np
)) {
1698 printk(KERN_DEBUG
" Tx ring at %8.8x:\n",
1699 (int)(np
->tx_ring_dma
));
1700 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1701 printk(KERN_DEBUG
" #%d desc. %4.4x %8.8x %8.8x.\n",
1702 i
, np
->tx_ring
[i
].status
, np
->tx_ring
[i
].frag
[0].addr
,
1703 np
->tx_ring
[i
].frag
[0].length
);
1704 printk(KERN_DEBUG
" Rx ring %8.8x:\n",
1705 (int)(np
->rx_ring_dma
));
1706 for (i
= 0; i
< /*RX_RING_SIZE*/4 ; i
++) {
1707 printk(KERN_DEBUG
" #%d desc. %4.4x %4.4x %8.8x\n",
1708 i
, np
->rx_ring
[i
].status
, np
->rx_ring
[i
].frag
[0].addr
,
1709 np
->rx_ring
[i
].frag
[0].length
);
1712 #endif /* __i386__ debugging only */
1714 free_irq(dev
->irq
, dev
);
1716 del_timer_sync(&np
->timer
);
1718 /* Free all the skbuffs in the Rx queue. */
1719 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1720 np
->rx_ring
[i
].status
= 0;
1721 skb
= np
->rx_skbuff
[i
];
1723 pci_unmap_single(np
->pci_dev
,
1724 le32_to_cpu(np
->rx_ring
[i
].frag
[0].addr
),
1725 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1727 np
->rx_skbuff
[i
] = NULL
;
1729 np
->rx_ring
[i
].frag
[0].addr
= cpu_to_le32(0xBADF00D0); /* poison */
1731 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1732 np
->tx_ring
[i
].next_desc
= 0;
1733 skb
= np
->tx_skbuff
[i
];
1735 pci_unmap_single(np
->pci_dev
,
1736 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
1737 skb
->len
, PCI_DMA_TODEVICE
);
1739 np
->tx_skbuff
[i
] = NULL
;
1746 static void __devexit
sundance_remove1 (struct pci_dev
*pdev
)
1748 struct net_device
*dev
= pci_get_drvdata(pdev
);
1751 struct netdev_private
*np
= netdev_priv(dev
);
1753 unregister_netdev(dev
);
1754 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
,
1756 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
,
1758 pci_iounmap(pdev
, np
->base
);
1759 pci_release_regions(pdev
);
1761 pci_set_drvdata(pdev
, NULL
);
1765 static struct pci_driver sundance_driver
= {
1767 .id_table
= sundance_pci_tbl
,
1768 .probe
= sundance_probe1
,
1769 .remove
= __devexit_p(sundance_remove1
),
1772 static int __init
sundance_init(void)
1774 /* when a module, this is printed whether or not devices are found in probe */
1778 return pci_register_driver(&sundance_driver
);
1781 static void __exit
sundance_exit(void)
1783 pci_unregister_driver(&sundance_driver
);
1786 module_init(sundance_init
);
1787 module_exit(sundance_exit
);