[PATCH] sundance: remove if (1) { ... } block in sundance_probe1
[deliverable/linux.git] / drivers / net / sundance.c
CommitLineData
1da177e4
LT
1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19
20
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
23
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
27
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
31
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
35
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
44
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
51
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
56
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
64
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
68
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
71
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
74
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
82
83 Version LK1.08 (D-Link):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
86
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
90
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
93
94*/
95
96#define DRV_NAME "sundance"
97#define DRV_VERSION "1.01+LK1.09a"
98#define DRV_RELDATE "10-Jul-2003"
99
100
101/* The user-configurable values.
102 These may be modified when a driver module is loaded.*/
103static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
104/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
105 Typical is a 64 element hash table based on the Ethernet CRC. */
106static int multicast_filter_limit = 32;
107
108/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
109 Setting to > 1518 effectively disables this feature.
110 This chip can receive into offset buffers, so the Alpha does not
111 need a copy-align. */
112static int rx_copybreak;
113static int flowctrl=1;
114
115/* media[] specifies the media type the NIC operates at.
116 autosense Autosensing active media.
117 10mbps_hd 10Mbps half duplex.
118 10mbps_fd 10Mbps full duplex.
119 100mbps_hd 100Mbps half duplex.
120 100mbps_fd 100Mbps full duplex.
121 0 Autosensing active media.
122 1 10Mbps half duplex.
123 2 10Mbps full duplex.
124 3 100Mbps half duplex.
125 4 100Mbps full duplex.
126*/
127#define MAX_UNITS 8
128static char *media[MAX_UNITS];
129
130
131/* Operational parameters that are set at compile time. */
132
133/* Keep the ring sizes a power of two for compile efficiency.
134 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
135 Making the Tx ring too large decreases the effectiveness of channel
136 bonding and packet priority, and more than 128 requires modifying the
137 Tx error recovery.
138 Large receive rings merely waste memory. */
139#define TX_RING_SIZE 32
140#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
141#define RX_RING_SIZE 64
142#define RX_BUDGET 32
143#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
144#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
145
146/* Operational parameters that usually are not changed. */
147/* Time in jiffies before concluding the transmitter is hung. */
148#define TX_TIMEOUT (4*HZ)
149#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
150
151/* Include files, designed to support most kernel versions 2.0.0 and later. */
152#include <linux/module.h>
153#include <linux/kernel.h>
154#include <linux/string.h>
155#include <linux/timer.h>
156#include <linux/errno.h>
157#include <linux/ioport.h>
158#include <linux/slab.h>
159#include <linux/interrupt.h>
160#include <linux/pci.h>
161#include <linux/netdevice.h>
162#include <linux/etherdevice.h>
163#include <linux/skbuff.h>
164#include <linux/init.h>
165#include <linux/bitops.h>
166#include <asm/uaccess.h>
167#include <asm/processor.h> /* Processor type for cache alignment. */
168#include <asm/io.h>
169#include <linux/delay.h>
170#include <linux/spinlock.h>
171#ifndef _COMPAT_WITH_OLD_KERNEL
172#include <linux/crc32.h>
173#include <linux/ethtool.h>
174#include <linux/mii.h>
175#else
176#include "crc32.h"
177#include "ethtool.h"
178#include "mii.h"
179#include "compat.h"
180#endif
181
182/* These identify the driver base version and may not be removed. */
183static char version[] __devinitdata =
184KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
185KERN_INFO " http://www.scyld.com/network/sundance.html\n";
186
187MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
188MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
189MODULE_LICENSE("GPL");
190
191module_param(debug, int, 0);
192module_param(rx_copybreak, int, 0);
193module_param_array(media, charp, NULL, 0);
194module_param(flowctrl, int, 0);
195MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
196MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
197MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
198
199/*
200 Theory of Operation
201
202I. Board Compatibility
203
204This driver is designed for the Sundance Technologies "Alta" ST201 chip.
205
206II. Board-specific settings
207
208III. Driver operation
209
210IIIa. Ring buffers
211
212This driver uses two statically allocated fixed-size descriptor lists
213formed into rings by a branch from the final descriptor to the beginning of
214the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
215Some chips explicitly use only 2^N sized rings, while others use a
216'next descriptor' pointer that the driver forms into rings.
217
218IIIb/c. Transmit/Receive Structure
219
220This driver uses a zero-copy receive and transmit scheme.
221The driver allocates full frame size skbuffs for the Rx ring buffers at
222open() time and passes the skb->data field to the chip as receive data
223buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
224a fresh skbuff is allocated and the frame is copied to the new skbuff.
225When the incoming frame is larger, the skbuff is passed directly up the
226protocol stack. Buffers consumed this way are replaced by newly allocated
227skbuffs in a later phase of receives.
228
229The RX_COPYBREAK value is chosen to trade-off the memory wasted by
230using a full-sized skbuff for small frames vs. the copying costs of larger
231frames. New boards are typically used in generously configured machines
232and the underfilled buffers have negligible impact compared to the benefit of
233a single allocation size, so the default value of zero results in never
234copying packets. When copying is done, the cost is usually mitigated by using
235a combined copy/checksum routine. Copying also preloads the cache, which is
236most useful with small frames.
237
238A subtle aspect of the operation is that the IP header at offset 14 in an
239ethernet frame isn't longword aligned for further processing.
240Unaligned buffers are permitted by the Sundance hardware, so
241frames are received into the skbuff at an offset of "+2", 16-byte aligning
242the IP header.
243
244IIId. Synchronization
245
246The driver runs as two independent, single-threaded flows of control. One
247is the send-packet routine, which enforces single-threaded use by the
248dev->tbusy flag. The other thread is the interrupt handler, which is single
249threaded by the hardware and interrupt handling software.
250
251The send packet thread has partial control over the Tx ring and 'dev->tbusy'
252flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
253queue slot is empty, it clears the tbusy flag when finished otherwise it sets
254the 'lp->tx_full' flag.
255
256The interrupt handler has exclusive control over the Rx ring and records stats
257from the Tx ring. After reaping the stats, it marks the Tx queue entry as
258empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
259clears both the tx_full and tbusy flags.
260
261IV. Notes
262
263IVb. References
264
265The Sundance ST201 datasheet, preliminary version.
266http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
267http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
268
269IVc. Errata
270
271*/
272
273/* Work-around for Kendin chip bugs. */
274#ifndef CONFIG_SUNDANCE_MMIO
275#define USE_IO_OPS 1
276#endif
277
278static struct pci_device_id sundance_pci_tbl[] = {
279 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
280 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
281 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
282 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
283 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
284 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
285 {0,}
286};
287MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
288
289enum {
290 netdev_io_size = 128
291};
292
293struct pci_id_info {
294 const char *name;
295};
296static struct pci_id_info pci_id_tbl[] = {
297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
299 {"D-Link DFE-580TX 4 port Server Adapter"},
300 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
301 {"D-Link DL10050-based FAST Ethernet Adapter"},
302 {"Sundance Technology Alta"},
303 {NULL,}, /* 0 terminated list. */
304};
305
306/* This driver was written to use PCI memory space, however x86-oriented
307 hardware often uses I/O space accesses. */
308
309/* Offsets to the device registers.
310 Unlike software-only systems, device drivers interact with complex hardware.
311 It's not useful to define symbolic names for every register bit in the
312 device. The name can only partially document the semantics and make
313 the driver longer and more difficult to read.
314 In general, only the important configuration values or bits changed
315 multiple times should be defined symbolically.
316*/
317enum alta_offsets {
318 DMACtrl = 0x00,
319 TxListPtr = 0x04,
320 TxDMABurstThresh = 0x08,
321 TxDMAUrgentThresh = 0x09,
322 TxDMAPollPeriod = 0x0a,
323 RxDMAStatus = 0x0c,
324 RxListPtr = 0x10,
325 DebugCtrl0 = 0x1a,
326 DebugCtrl1 = 0x1c,
327 RxDMABurstThresh = 0x14,
328 RxDMAUrgentThresh = 0x15,
329 RxDMAPollPeriod = 0x16,
330 LEDCtrl = 0x1a,
331 ASICCtrl = 0x30,
332 EEData = 0x34,
333 EECtrl = 0x36,
334 TxStartThresh = 0x3c,
335 RxEarlyThresh = 0x3e,
336 FlashAddr = 0x40,
337 FlashData = 0x44,
338 TxStatus = 0x46,
339 TxFrameId = 0x47,
340 DownCounter = 0x18,
341 IntrClear = 0x4a,
342 IntrEnable = 0x4c,
343 IntrStatus = 0x4e,
344 MACCtrl0 = 0x50,
345 MACCtrl1 = 0x52,
346 StationAddr = 0x54,
347 MaxFrameSize = 0x5A,
348 RxMode = 0x5c,
349 MIICtrl = 0x5e,
350 MulticastFilter0 = 0x60,
351 MulticastFilter1 = 0x64,
352 RxOctetsLow = 0x68,
353 RxOctetsHigh = 0x6a,
354 TxOctetsLow = 0x6c,
355 TxOctetsHigh = 0x6e,
356 TxFramesOK = 0x70,
357 RxFramesOK = 0x72,
358 StatsCarrierError = 0x74,
359 StatsLateColl = 0x75,
360 StatsMultiColl = 0x76,
361 StatsOneColl = 0x77,
362 StatsTxDefer = 0x78,
363 RxMissed = 0x79,
364 StatsTxXSDefer = 0x7a,
365 StatsTxAbort = 0x7b,
366 StatsBcastTx = 0x7c,
367 StatsBcastRx = 0x7d,
368 StatsMcastTx = 0x7e,
369 StatsMcastRx = 0x7f,
370 /* Aliased and bogus values! */
371 RxStatus = 0x0c,
372};
373enum ASICCtrl_HiWord_bit {
374 GlobalReset = 0x0001,
375 RxReset = 0x0002,
376 TxReset = 0x0004,
377 DMAReset = 0x0008,
378 FIFOReset = 0x0010,
379 NetworkReset = 0x0020,
380 HostReset = 0x0040,
381 ResetBusy = 0x0400,
382};
383
384/* Bits in the interrupt status/mask registers. */
385enum intr_status_bits {
386 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
387 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
388 IntrDrvRqst=0x0040,
389 StatsMax=0x0080, LinkChange=0x0100,
390 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
391};
392
393/* Bits in the RxMode register. */
394enum rx_mode_bits {
395 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
396 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
397};
398/* Bits in MACCtrl. */
399enum mac_ctrl0_bits {
400 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
401 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
402};
403enum mac_ctrl1_bits {
404 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
405 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
406 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
407};
408
409/* The Rx and Tx buffer descriptors. */
410/* Note that using only 32 bit fields simplifies conversion to big-endian
411 architectures. */
412struct netdev_desc {
413 u32 next_desc;
414 u32 status;
415 struct desc_frag { u32 addr, length; } frag[1];
416};
417
418/* Bits in netdev_desc.status */
419enum desc_status_bits {
420 DescOwn=0x8000,
421 DescEndPacket=0x4000,
422 DescEndRing=0x2000,
423 LastFrag=0x80000000,
424 DescIntrOnTx=0x8000,
425 DescIntrOnDMADone=0x80000000,
426 DisableAlign = 0x00000001,
427};
428
429#define PRIV_ALIGN 15 /* Required alignment mask */
430/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
431 within the structure. */
432#define MII_CNT 4
433struct netdev_private {
434 /* Descriptor rings first for alignment. */
435 struct netdev_desc *rx_ring;
436 struct netdev_desc *tx_ring;
437 struct sk_buff* rx_skbuff[RX_RING_SIZE];
438 struct sk_buff* tx_skbuff[TX_RING_SIZE];
439 dma_addr_t tx_ring_dma;
440 dma_addr_t rx_ring_dma;
441 struct net_device_stats stats;
442 struct timer_list timer; /* Media monitoring timer. */
443 /* Frequently used values: keep some adjacent for cache effect. */
444 spinlock_t lock;
445 spinlock_t rx_lock; /* Group with Tx control cache line. */
446 int msg_enable;
447 int chip_id;
448 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
451 unsigned int cur_tx, dirty_tx;
452 /* These values are keep track of the transceiver/media in use. */
453 unsigned int flowctrl:1;
454 unsigned int default_port:4; /* Last dev->if_port value. */
455 unsigned int an_enable:1;
456 unsigned int speed;
457 struct tasklet_struct rx_tasklet;
458 struct tasklet_struct tx_tasklet;
459 int budget;
460 int cur_task;
461 /* Multicast and receive mode. */
462 spinlock_t mcastlock; /* SMP lock multicast updates. */
463 u16 mcast_filter[4];
464 /* MII transceiver section. */
465 struct mii_if_info mii_if;
466 int mii_preamble_required;
467 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
468 struct pci_dev *pci_dev;
469 void __iomem *base;
470 unsigned char pci_rev_id;
471};
472
473/* The station address location in the EEPROM. */
474#define EEPROM_SA_OFFSET 0x10
475#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
476 IntrDrvRqst | IntrTxDone | StatsMax | \
477 LinkChange)
478
479static int change_mtu(struct net_device *dev, int new_mtu);
480static int eeprom_read(void __iomem *ioaddr, int location);
481static int mdio_read(struct net_device *dev, int phy_id, int location);
482static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
483static int netdev_open(struct net_device *dev);
484static void check_duplex(struct net_device *dev);
485static void netdev_timer(unsigned long data);
486static void tx_timeout(struct net_device *dev);
487static void init_ring(struct net_device *dev);
488static int start_tx(struct sk_buff *skb, struct net_device *dev);
489static int reset_tx (struct net_device *dev);
490static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
491static void rx_poll(unsigned long data);
492static void tx_poll(unsigned long data);
493static void refill_rx (struct net_device *dev);
494static void netdev_error(struct net_device *dev, int intr_status);
495static void netdev_error(struct net_device *dev, int intr_status);
496static void set_rx_mode(struct net_device *dev);
497static int __set_mac_addr(struct net_device *dev);
498static struct net_device_stats *get_stats(struct net_device *dev);
499static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500static int netdev_close(struct net_device *dev);
501static struct ethtool_ops ethtool_ops;
502
503static int __devinit sundance_probe1 (struct pci_dev *pdev,
504 const struct pci_device_id *ent)
505{
506 struct net_device *dev;
507 struct netdev_private *np;
508 static int card_idx;
509 int chip_idx = ent->driver_data;
510 int irq;
511 int i;
512 void __iomem *ioaddr;
513 u16 mii_ctl;
514 void *ring_space;
515 dma_addr_t ring_dma;
516#ifdef USE_IO_OPS
517 int bar = 0;
518#else
519 int bar = 1;
520#endif
67ec2f80 521 int phy, phy_idx = 0;
1da177e4
LT
522
523
524/* when built into the kernel, we only print version if device is found */
525#ifndef MODULE
526 static int printed_version;
527 if (!printed_version++)
528 printk(version);
529#endif
530
531 if (pci_enable_device(pdev))
532 return -EIO;
533 pci_set_master(pdev);
534
535 irq = pdev->irq;
536
537 dev = alloc_etherdev(sizeof(*np));
538 if (!dev)
539 return -ENOMEM;
540 SET_MODULE_OWNER(dev);
541 SET_NETDEV_DEV(dev, &pdev->dev);
542
543 if (pci_request_regions(pdev, DRV_NAME))
544 goto err_out_netdev;
545
546 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
547 if (!ioaddr)
548 goto err_out_res;
549
550 for (i = 0; i < 3; i++)
551 ((u16 *)dev->dev_addr)[i] =
552 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
30d60a82 553 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
554
555 dev->base_addr = (unsigned long)ioaddr;
556 dev->irq = irq;
557
558 np = netdev_priv(dev);
559 np->base = ioaddr;
560 np->pci_dev = pdev;
561 np->chip_id = chip_idx;
562 np->msg_enable = (1 << debug) - 1;
563 spin_lock_init(&np->lock);
564 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
565 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
566
567 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
568 if (!ring_space)
569 goto err_out_cleardev;
570 np->tx_ring = (struct netdev_desc *)ring_space;
571 np->tx_ring_dma = ring_dma;
572
573 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
574 if (!ring_space)
575 goto err_out_unmap_tx;
576 np->rx_ring = (struct netdev_desc *)ring_space;
577 np->rx_ring_dma = ring_dma;
578
579 np->mii_if.dev = dev;
580 np->mii_if.mdio_read = mdio_read;
581 np->mii_if.mdio_write = mdio_write;
582 np->mii_if.phy_id_mask = 0x1f;
583 np->mii_if.reg_num_mask = 0x1f;
584
585 /* The chip-specific entries in the device structure. */
586 dev->open = &netdev_open;
587 dev->hard_start_xmit = &start_tx;
588 dev->stop = &netdev_close;
589 dev->get_stats = &get_stats;
590 dev->set_multicast_list = &set_rx_mode;
591 dev->do_ioctl = &netdev_ioctl;
592 SET_ETHTOOL_OPS(dev, &ethtool_ops);
593 dev->tx_timeout = &tx_timeout;
594 dev->watchdog_timeo = TX_TIMEOUT;
595 dev->change_mtu = &change_mtu;
596 pci_set_drvdata(pdev, dev);
597
598 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
599
600 i = register_netdev(dev);
601 if (i)
602 goto err_out_unmap_rx;
603
604 printk(KERN_INFO "%s: %s at %p, ",
605 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
606 for (i = 0; i < 5; i++)
607 printk("%2.2x:", dev->dev_addr[i]);
608 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
609
67ec2f80
JL
610 np->phys[0] = 1; /* Default setting */
611 np->mii_preamble_required++;
612 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
613 int mii_status = mdio_read(dev, phy, MII_BMSR);
614 if (mii_status != 0xffff && mii_status != 0x0000) {
615 np->phys[phy_idx++] = phy;
616 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
617 if ((mii_status & 0x0040) == 0)
618 np->mii_preamble_required++;
619 printk(KERN_INFO "%s: MII PHY found at address %d, status "
620 "0x%4.4x advertising %4.4x.\n",
621 dev->name, phy, mii_status, np->mii_if.advertising);
1da177e4 622 }
67ec2f80
JL
623 }
624 np->mii_preamble_required--;
1da177e4 625
67ec2f80
JL
626 if (phy_idx == 0) {
627 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
628 dev->name, ioread32(ioaddr + ASICCtrl));
629 goto err_out_unregister;
1da177e4
LT
630 }
631
67ec2f80
JL
632 np->mii_if.phy_id = np->phys[0];
633
1da177e4
LT
634 /* Parse override configuration */
635 np->an_enable = 1;
636 if (card_idx < MAX_UNITS) {
637 if (media[card_idx] != NULL) {
638 np->an_enable = 0;
639 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
640 strcmp (media[card_idx], "4") == 0) {
641 np->speed = 100;
642 np->mii_if.full_duplex = 1;
643 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
644 || strcmp (media[card_idx], "3") == 0) {
645 np->speed = 100;
646 np->mii_if.full_duplex = 0;
647 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
648 strcmp (media[card_idx], "2") == 0) {
649 np->speed = 10;
650 np->mii_if.full_duplex = 1;
651 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
652 strcmp (media[card_idx], "1") == 0) {
653 np->speed = 10;
654 np->mii_if.full_duplex = 0;
655 } else {
656 np->an_enable = 1;
657 }
658 }
659 if (flowctrl == 1)
660 np->flowctrl = 1;
661 }
662
663 /* Fibre PHY? */
664 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
665 /* Default 100Mbps Full */
666 if (np->an_enable) {
667 np->speed = 100;
668 np->mii_if.full_duplex = 1;
669 np->an_enable = 0;
670 }
671 }
672 /* Reset PHY */
673 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
674 mdelay (300);
675 /* If flow control enabled, we need to advertise it.*/
676 if (np->flowctrl)
677 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
678 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
679 /* Force media type */
680 if (!np->an_enable) {
681 mii_ctl = 0;
682 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
683 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
684 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
685 printk (KERN_INFO "Override speed=%d, %s duplex\n",
686 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
687
688 }
689
690 /* Perhaps move the reset here? */
691 /* Reset the chip to erase previous misconfiguration. */
692 if (netif_msg_hw(np))
693 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
694 iowrite16(0x007f, ioaddr + ASICCtrl + 2);
695 if (netif_msg_hw(np))
696 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
697
698 card_idx++;
699 return 0;
700
701err_out_unregister:
702 unregister_netdev(dev);
703err_out_unmap_rx:
704 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
705err_out_unmap_tx:
706 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
707err_out_cleardev:
708 pci_set_drvdata(pdev, NULL);
709 pci_iounmap(pdev, ioaddr);
710err_out_res:
711 pci_release_regions(pdev);
712err_out_netdev:
713 free_netdev (dev);
714 return -ENODEV;
715}
716
717static int change_mtu(struct net_device *dev, int new_mtu)
718{
719 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
720 return -EINVAL;
721 if (netif_running(dev))
722 return -EBUSY;
723 dev->mtu = new_mtu;
724 return 0;
725}
726
727#define eeprom_delay(ee_addr) ioread32(ee_addr)
728/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
729static int __devinit eeprom_read(void __iomem *ioaddr, int location)
730{
731 int boguscnt = 10000; /* Typical 1900 ticks. */
732 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
733 do {
734 eeprom_delay(ioaddr + EECtrl);
735 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
736 return ioread16(ioaddr + EEData);
737 }
738 } while (--boguscnt > 0);
739 return 0;
740}
741
742/* MII transceiver control section.
743 Read and write the MII registers using software-generated serial
744 MDIO protocol. See the MII specifications or DP83840A data sheet
745 for details.
746
747 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
748 met by back-to-back 33Mhz PCI cycles. */
749#define mdio_delay() ioread8(mdio_addr)
750
751enum mii_reg_bits {
752 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
753};
754#define MDIO_EnbIn (0)
755#define MDIO_WRITE0 (MDIO_EnbOutput)
756#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
757
758/* Generate the preamble required for initial synchronization and
759 a few older transceivers. */
760static void mdio_sync(void __iomem *mdio_addr)
761{
762 int bits = 32;
763
764 /* Establish sync by sending at least 32 logic ones. */
765 while (--bits >= 0) {
766 iowrite8(MDIO_WRITE1, mdio_addr);
767 mdio_delay();
768 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
769 mdio_delay();
770 }
771}
772
773static int mdio_read(struct net_device *dev, int phy_id, int location)
774{
775 struct netdev_private *np = netdev_priv(dev);
776 void __iomem *mdio_addr = np->base + MIICtrl;
777 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
778 int i, retval = 0;
779
780 if (np->mii_preamble_required)
781 mdio_sync(mdio_addr);
782
783 /* Shift the read command bits out. */
784 for (i = 15; i >= 0; i--) {
785 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
786
787 iowrite8(dataval, mdio_addr);
788 mdio_delay();
789 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
790 mdio_delay();
791 }
792 /* Read the two transition, 16 data, and wire-idle bits. */
793 for (i = 19; i > 0; i--) {
794 iowrite8(MDIO_EnbIn, mdio_addr);
795 mdio_delay();
796 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
797 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
798 mdio_delay();
799 }
800 return (retval>>1) & 0xffff;
801}
802
803static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
804{
805 struct netdev_private *np = netdev_priv(dev);
806 void __iomem *mdio_addr = np->base + MIICtrl;
807 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
808 int i;
809
810 if (np->mii_preamble_required)
811 mdio_sync(mdio_addr);
812
813 /* Shift the command bits out. */
814 for (i = 31; i >= 0; i--) {
815 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
816
817 iowrite8(dataval, mdio_addr);
818 mdio_delay();
819 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
820 mdio_delay();
821 }
822 /* Clear out extra bits. */
823 for (i = 2; i > 0; i--) {
824 iowrite8(MDIO_EnbIn, mdio_addr);
825 mdio_delay();
826 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
827 mdio_delay();
828 }
829 return;
830}
831
832static int netdev_open(struct net_device *dev)
833{
834 struct netdev_private *np = netdev_priv(dev);
835 void __iomem *ioaddr = np->base;
836 int i;
837
838 /* Do we need to reset the chip??? */
839
840 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
841 if (i)
842 return i;
843
844 if (netif_msg_ifup(np))
845 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
846 dev->name, dev->irq);
847 init_ring(dev);
848
849 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
850 /* The Tx list pointer is written as packets are queued. */
851
852 /* Initialize other registers. */
853 __set_mac_addr(dev);
854#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
855 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
856#else
857 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
858#endif
859 if (dev->mtu > 2047)
860 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
861
862 /* Configure the PCI bus bursts and FIFO thresholds. */
863
864 if (dev->if_port == 0)
865 dev->if_port = np->default_port;
866
867 spin_lock_init(&np->mcastlock);
868
869 set_rx_mode(dev);
870 iowrite16(0, ioaddr + IntrEnable);
871 iowrite16(0, ioaddr + DownCounter);
872 /* Set the chip to poll every N*320nsec. */
873 iowrite8(100, ioaddr + RxDMAPollPeriod);
874 iowrite8(127, ioaddr + TxDMAPollPeriod);
875 /* Fix DFE-580TX packet drop issue */
876 if (np->pci_rev_id >= 0x14)
877 iowrite8(0x01, ioaddr + DebugCtrl1);
878 netif_start_queue(dev);
879
880 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
881
882 if (netif_msg_ifup(np))
883 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
884 "MAC Control %x, %4.4x %4.4x.\n",
885 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
886 ioread32(ioaddr + MACCtrl0),
887 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
888
889 /* Set the timer to check for link beat. */
890 init_timer(&np->timer);
891 np->timer.expires = jiffies + 3*HZ;
892 np->timer.data = (unsigned long)dev;
893 np->timer.function = &netdev_timer; /* timer handler */
894 add_timer(&np->timer);
895
896 /* Enable interrupts by setting the interrupt mask. */
897 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
898
899 return 0;
900}
901
902static void check_duplex(struct net_device *dev)
903{
904 struct netdev_private *np = netdev_priv(dev);
905 void __iomem *ioaddr = np->base;
906 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
907 int negotiated = mii_lpa & np->mii_if.advertising;
908 int duplex;
909
910 /* Force media */
911 if (!np->an_enable || mii_lpa == 0xffff) {
912 if (np->mii_if.full_duplex)
913 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
914 ioaddr + MACCtrl0);
915 return;
916 }
917
918 /* Autonegotiation */
919 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
920 if (np->mii_if.full_duplex != duplex) {
921 np->mii_if.full_duplex = duplex;
922 if (netif_msg_link(np))
923 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
924 "negotiated capability %4.4x.\n", dev->name,
925 duplex ? "full" : "half", np->phys[0], negotiated);
926 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
927 }
928}
929
930static void netdev_timer(unsigned long data)
931{
932 struct net_device *dev = (struct net_device *)data;
933 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base;
935 int next_tick = 10*HZ;
936
937 if (netif_msg_timer(np)) {
938 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
939 "Tx %x Rx %x.\n",
940 dev->name, ioread16(ioaddr + IntrEnable),
941 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
942 }
943 check_duplex(dev);
944 np->timer.expires = jiffies + next_tick;
945 add_timer(&np->timer);
946}
947
948static void tx_timeout(struct net_device *dev)
949{
950 struct netdev_private *np = netdev_priv(dev);
951 void __iomem *ioaddr = np->base;
952 unsigned long flag;
953
954 netif_stop_queue(dev);
955 tasklet_disable(&np->tx_tasklet);
956 iowrite16(0, ioaddr + IntrEnable);
957 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
958 "TxFrameId %2.2x,"
959 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
960 ioread8(ioaddr + TxFrameId));
961
962 {
963 int i;
964 for (i=0; i<TX_RING_SIZE; i++) {
965 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
966 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
967 le32_to_cpu(np->tx_ring[i].next_desc),
968 le32_to_cpu(np->tx_ring[i].status),
969 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
970 le32_to_cpu(np->tx_ring[i].frag[0].addr),
971 le32_to_cpu(np->tx_ring[i].frag[0].length));
972 }
973 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
974 ioread32(np->base + TxListPtr),
975 netif_queue_stopped(dev));
976 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
977 np->cur_tx, np->cur_tx % TX_RING_SIZE,
978 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
979 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
980 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
981 }
982 spin_lock_irqsave(&np->lock, flag);
983
984 /* Stop and restart the chip's Tx processes . */
985 reset_tx(dev);
986 spin_unlock_irqrestore(&np->lock, flag);
987
988 dev->if_port = 0;
989
990 dev->trans_start = jiffies;
991 np->stats.tx_errors++;
992 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
993 netif_wake_queue(dev);
994 }
995 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
996 tasklet_enable(&np->tx_tasklet);
997}
998
999
1000/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1001static void init_ring(struct net_device *dev)
1002{
1003 struct netdev_private *np = netdev_priv(dev);
1004 int i;
1005
1006 np->cur_rx = np->cur_tx = 0;
1007 np->dirty_rx = np->dirty_tx = 0;
1008 np->cur_task = 0;
1009
1010 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1011
1012 /* Initialize all Rx descriptors. */
1013 for (i = 0; i < RX_RING_SIZE; i++) {
1014 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1015 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1016 np->rx_ring[i].status = 0;
1017 np->rx_ring[i].frag[0].length = 0;
1018 np->rx_skbuff[i] = NULL;
1019 }
1020
1021 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1022 for (i = 0; i < RX_RING_SIZE; i++) {
1023 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1024 np->rx_skbuff[i] = skb;
1025 if (skb == NULL)
1026 break;
1027 skb->dev = dev; /* Mark as being used by this device. */
1028 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1029 np->rx_ring[i].frag[0].addr = cpu_to_le32(
689be439 1030 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1da177e4
LT
1031 PCI_DMA_FROMDEVICE));
1032 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1033 }
1034 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1035
1036 for (i = 0; i < TX_RING_SIZE; i++) {
1037 np->tx_skbuff[i] = NULL;
1038 np->tx_ring[i].status = 0;
1039 }
1040 return;
1041}
1042
1043static void tx_poll (unsigned long data)
1044{
1045 struct net_device *dev = (struct net_device *)data;
1046 struct netdev_private *np = netdev_priv(dev);
1047 unsigned head = np->cur_task % TX_RING_SIZE;
1048 struct netdev_desc *txdesc =
1049 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1050
1051 /* Chain the next pointer */
1052 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1053 int entry = np->cur_task % TX_RING_SIZE;
1054 txdesc = &np->tx_ring[entry];
1055 if (np->last_tx) {
1056 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1057 entry*sizeof(struct netdev_desc));
1058 }
1059 np->last_tx = txdesc;
1060 }
1061 /* Indicate the latest descriptor of tx ring */
1062 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1063
1064 if (ioread32 (np->base + TxListPtr) == 0)
1065 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1066 np->base + TxListPtr);
1067 return;
1068}
1069
1070static int
1071start_tx (struct sk_buff *skb, struct net_device *dev)
1072{
1073 struct netdev_private *np = netdev_priv(dev);
1074 struct netdev_desc *txdesc;
1075 unsigned entry;
1076
1077 /* Calculate the next Tx descriptor entry. */
1078 entry = np->cur_tx % TX_RING_SIZE;
1079 np->tx_skbuff[entry] = skb;
1080 txdesc = &np->tx_ring[entry];
1081
1082 txdesc->next_desc = 0;
1083 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1084 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1085 skb->len,
1086 PCI_DMA_TODEVICE));
1087 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1088
1089 /* Increment cur_tx before tasklet_schedule() */
1090 np->cur_tx++;
1091 mb();
1092 /* Schedule a tx_poll() task */
1093 tasklet_schedule(&np->tx_tasklet);
1094
1095 /* On some architectures: explicitly flush cache lines here. */
1096 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1097 && !netif_queue_stopped(dev)) {
1098 /* do nothing */
1099 } else {
1100 netif_stop_queue (dev);
1101 }
1102 dev->trans_start = jiffies;
1103 if (netif_msg_tx_queued(np)) {
1104 printk (KERN_DEBUG
1105 "%s: Transmit frame #%d queued in slot %d.\n",
1106 dev->name, np->cur_tx, entry);
1107 }
1108 return 0;
1109}
1110
1111/* Reset hardware tx and free all of tx buffers */
1112static int
1113reset_tx (struct net_device *dev)
1114{
1115 struct netdev_private *np = netdev_priv(dev);
1116 void __iomem *ioaddr = np->base;
1117 struct sk_buff *skb;
1118 int i;
1119 int irq = in_interrupt();
1120
1121 /* Reset tx logic, TxListPtr will be cleaned */
1122 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1123 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1124 ioaddr + ASICCtrl + 2);
1125 for (i=50; i > 0; i--) {
1126 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1127 break;
1128 mdelay(1);
1129 }
1130 /* free all tx skbuff */
1131 for (i = 0; i < TX_RING_SIZE; i++) {
1132 skb = np->tx_skbuff[i];
1133 if (skb) {
1134 pci_unmap_single(np->pci_dev,
1135 np->tx_ring[i].frag[0].addr, skb->len,
1136 PCI_DMA_TODEVICE);
1137 if (irq)
1138 dev_kfree_skb_irq (skb);
1139 else
1140 dev_kfree_skb (skb);
1141 np->tx_skbuff[i] = NULL;
1142 np->stats.tx_dropped++;
1143 }
1144 }
1145 np->cur_tx = np->dirty_tx = 0;
1146 np->cur_task = 0;
1147 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1148 return 0;
1149}
1150
1151/* The interrupt handler cleans up after the Tx thread,
1152 and schedule a Rx thread work */
1153static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1154{
1155 struct net_device *dev = (struct net_device *)dev_instance;
1156 struct netdev_private *np = netdev_priv(dev);
1157 void __iomem *ioaddr = np->base;
1158 int hw_frame_id;
1159 int tx_cnt;
1160 int tx_status;
1161 int handled = 0;
1162
1163
1164 do {
1165 int intr_status = ioread16(ioaddr + IntrStatus);
1166 iowrite16(intr_status, ioaddr + IntrStatus);
1167
1168 if (netif_msg_intr(np))
1169 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1170 dev->name, intr_status);
1171
1172 if (!(intr_status & DEFAULT_INTR))
1173 break;
1174
1175 handled = 1;
1176
1177 if (intr_status & (IntrRxDMADone)) {
1178 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1179 ioaddr + IntrEnable);
1180 if (np->budget < 0)
1181 np->budget = RX_BUDGET;
1182 tasklet_schedule(&np->rx_tasklet);
1183 }
1184 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1185 tx_status = ioread16 (ioaddr + TxStatus);
1186 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1187 if (netif_msg_tx_done(np))
1188 printk
1189 ("%s: Transmit status is %2.2x.\n",
1190 dev->name, tx_status);
1191 if (tx_status & 0x1e) {
1192 np->stats.tx_errors++;
1193 if (tx_status & 0x10)
1194 np->stats.tx_fifo_errors++;
1195 if (tx_status & 0x08)
1196 np->stats.collisions++;
1197 if (tx_status & 0x02)
1198 np->stats.tx_window_errors++;
1199 /* This reset has not been verified!. */
1200 if (tx_status & 0x10) { /* Reset the Tx. */
1201 np->stats.tx_fifo_errors++;
1202 spin_lock(&np->lock);
1203 reset_tx(dev);
1204 spin_unlock(&np->lock);
1205 }
1206 if (tx_status & 0x1e) /* Restart the Tx. */
1207 iowrite16 (TxEnable,
1208 ioaddr + MACCtrl1);
1209 }
1210 /* Yup, this is a documentation bug. It cost me *hours*. */
1211 iowrite16 (0, ioaddr + TxStatus);
1212 if (tx_cnt < 0) {
1213 iowrite32(5000, ioaddr + DownCounter);
1214 break;
1215 }
1216 tx_status = ioread16 (ioaddr + TxStatus);
1217 }
1218 hw_frame_id = (tx_status >> 8) & 0xff;
1219 } else {
1220 hw_frame_id = ioread8(ioaddr + TxFrameId);
1221 }
1222
1223 if (np->pci_rev_id >= 0x14) {
1224 spin_lock(&np->lock);
1225 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1226 int entry = np->dirty_tx % TX_RING_SIZE;
1227 struct sk_buff *skb;
1228 int sw_frame_id;
1229 sw_frame_id = (le32_to_cpu(
1230 np->tx_ring[entry].status) >> 2) & 0xff;
1231 if (sw_frame_id == hw_frame_id &&
1232 !(le32_to_cpu(np->tx_ring[entry].status)
1233 & 0x00010000))
1234 break;
1235 if (sw_frame_id == (hw_frame_id + 1) %
1236 TX_RING_SIZE)
1237 break;
1238 skb = np->tx_skbuff[entry];
1239 /* Free the original skb. */
1240 pci_unmap_single(np->pci_dev,
1241 np->tx_ring[entry].frag[0].addr,
1242 skb->len, PCI_DMA_TODEVICE);
1243 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1244 np->tx_skbuff[entry] = NULL;
1245 np->tx_ring[entry].frag[0].addr = 0;
1246 np->tx_ring[entry].frag[0].length = 0;
1247 }
1248 spin_unlock(&np->lock);
1249 } else {
1250 spin_lock(&np->lock);
1251 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1252 int entry = np->dirty_tx % TX_RING_SIZE;
1253 struct sk_buff *skb;
1254 if (!(le32_to_cpu(np->tx_ring[entry].status)
1255 & 0x00010000))
1256 break;
1257 skb = np->tx_skbuff[entry];
1258 /* Free the original skb. */
1259 pci_unmap_single(np->pci_dev,
1260 np->tx_ring[entry].frag[0].addr,
1261 skb->len, PCI_DMA_TODEVICE);
1262 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1263 np->tx_skbuff[entry] = NULL;
1264 np->tx_ring[entry].frag[0].addr = 0;
1265 np->tx_ring[entry].frag[0].length = 0;
1266 }
1267 spin_unlock(&np->lock);
1268 }
1269
1270 if (netif_queue_stopped(dev) &&
1271 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1272 /* The ring is no longer full, clear busy flag. */
1273 netif_wake_queue (dev);
1274 }
1275 /* Abnormal error summary/uncommon events handlers. */
1276 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1277 netdev_error(dev, intr_status);
1278 } while (0);
1279 if (netif_msg_intr(np))
1280 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1281 dev->name, ioread16(ioaddr + IntrStatus));
1282 return IRQ_RETVAL(handled);
1283}
1284
1285static void rx_poll(unsigned long data)
1286{
1287 struct net_device *dev = (struct net_device *)data;
1288 struct netdev_private *np = netdev_priv(dev);
1289 int entry = np->cur_rx % RX_RING_SIZE;
1290 int boguscnt = np->budget;
1291 void __iomem *ioaddr = np->base;
1292 int received = 0;
1293
1294 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1295 while (1) {
1296 struct netdev_desc *desc = &(np->rx_ring[entry]);
1297 u32 frame_status = le32_to_cpu(desc->status);
1298 int pkt_len;
1299
1300 if (--boguscnt < 0) {
1301 goto not_done;
1302 }
1303 if (!(frame_status & DescOwn))
1304 break;
1305 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1306 if (netif_msg_rx_status(np))
1307 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1308 frame_status);
1309 if (frame_status & 0x001f4000) {
1310 /* There was a error. */
1311 if (netif_msg_rx_err(np))
1312 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1313 frame_status);
1314 np->stats.rx_errors++;
1315 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1316 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1317 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1318 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1319 if (frame_status & 0x00100000) {
1320 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1321 " status %8.8x.\n",
1322 dev->name, frame_status);
1323 }
1324 } else {
1325 struct sk_buff *skb;
1326#ifndef final_version
1327 if (netif_msg_rx_status(np))
1328 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1329 ", bogus_cnt %d.\n",
1330 pkt_len, boguscnt);
1331#endif
1332 /* Check if the packet is long enough to accept without copying
1333 to a minimally-sized skbuff. */
1334 if (pkt_len < rx_copybreak
1335 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1336 skb->dev = dev;
1337 skb_reserve(skb, 2); /* 16 byte align the IP header */
1338 pci_dma_sync_single_for_cpu(np->pci_dev,
1339 desc->frag[0].addr,
1340 np->rx_buf_sz,
1341 PCI_DMA_FROMDEVICE);
1342
689be439 1343 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1da177e4
LT
1344 pci_dma_sync_single_for_device(np->pci_dev,
1345 desc->frag[0].addr,
1346 np->rx_buf_sz,
1347 PCI_DMA_FROMDEVICE);
1348 skb_put(skb, pkt_len);
1349 } else {
1350 pci_unmap_single(np->pci_dev,
1351 desc->frag[0].addr,
1352 np->rx_buf_sz,
1353 PCI_DMA_FROMDEVICE);
1354 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1355 np->rx_skbuff[entry] = NULL;
1356 }
1357 skb->protocol = eth_type_trans(skb, dev);
1358 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1359 netif_rx(skb);
1360 dev->last_rx = jiffies;
1361 }
1362 entry = (entry + 1) % RX_RING_SIZE;
1363 received++;
1364 }
1365 np->cur_rx = entry;
1366 refill_rx (dev);
1367 np->budget -= received;
1368 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1369 return;
1370
1371not_done:
1372 np->cur_rx = entry;
1373 refill_rx (dev);
1374 if (!received)
1375 received = 1;
1376 np->budget -= received;
1377 if (np->budget <= 0)
1378 np->budget = RX_BUDGET;
1379 tasklet_schedule(&np->rx_tasklet);
1380 return;
1381}
1382
1383static void refill_rx (struct net_device *dev)
1384{
1385 struct netdev_private *np = netdev_priv(dev);
1386 int entry;
1387 int cnt = 0;
1388
1389 /* Refill the Rx ring buffers. */
1390 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1391 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1392 struct sk_buff *skb;
1393 entry = np->dirty_rx % RX_RING_SIZE;
1394 if (np->rx_skbuff[entry] == NULL) {
1395 skb = dev_alloc_skb(np->rx_buf_sz);
1396 np->rx_skbuff[entry] = skb;
1397 if (skb == NULL)
1398 break; /* Better luck next round. */
1399 skb->dev = dev; /* Mark as being used by this device. */
1400 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1401 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
689be439 1402 pci_map_single(np->pci_dev, skb->data,
1da177e4
LT
1403 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1404 }
1405 /* Perhaps we need not reset this field. */
1406 np->rx_ring[entry].frag[0].length =
1407 cpu_to_le32(np->rx_buf_sz | LastFrag);
1408 np->rx_ring[entry].status = 0;
1409 cnt++;
1410 }
1411 return;
1412}
1413static void netdev_error(struct net_device *dev, int intr_status)
1414{
1415 struct netdev_private *np = netdev_priv(dev);
1416 void __iomem *ioaddr = np->base;
1417 u16 mii_ctl, mii_advertise, mii_lpa;
1418 int speed;
1419
1420 if (intr_status & LinkChange) {
1421 if (np->an_enable) {
1422 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1423 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1424 mii_advertise &= mii_lpa;
1425 printk (KERN_INFO "%s: Link changed: ", dev->name);
1426 if (mii_advertise & ADVERTISE_100FULL) {
1427 np->speed = 100;
1428 printk ("100Mbps, full duplex\n");
1429 } else if (mii_advertise & ADVERTISE_100HALF) {
1430 np->speed = 100;
1431 printk ("100Mbps, half duplex\n");
1432 } else if (mii_advertise & ADVERTISE_10FULL) {
1433 np->speed = 10;
1434 printk ("10Mbps, full duplex\n");
1435 } else if (mii_advertise & ADVERTISE_10HALF) {
1436 np->speed = 10;
1437 printk ("10Mbps, half duplex\n");
1438 } else
1439 printk ("\n");
1440
1441 } else {
1442 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1443 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1444 np->speed = speed;
1445 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1446 dev->name, speed);
1447 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1448 "full" : "half");
1449 }
1450 check_duplex (dev);
1451 if (np->flowctrl && np->mii_if.full_duplex) {
1452 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1453 ioaddr + MulticastFilter1+2);
1454 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1455 ioaddr + MACCtrl0);
1456 }
1457 }
1458 if (intr_status & StatsMax) {
1459 get_stats(dev);
1460 }
1461 if (intr_status & IntrPCIErr) {
1462 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1463 dev->name, intr_status);
1464 /* We must do a global reset of DMA to continue. */
1465 }
1466}
1467
1468static struct net_device_stats *get_stats(struct net_device *dev)
1469{
1470 struct netdev_private *np = netdev_priv(dev);
1471 void __iomem *ioaddr = np->base;
1472 int i;
1473
1474 /* We should lock this segment of code for SMP eventually, although
1475 the vulnerability window is very small and statistics are
1476 non-critical. */
1477 /* The chip only need report frame silently dropped. */
1478 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1479 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1480 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1481 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1482 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1483 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1484 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1485 ioread8(ioaddr + StatsTxDefer);
1486 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1487 ioread8(ioaddr + i);
1488 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1489 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1490 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1491 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1492
1493 return &np->stats;
1494}
1495
1496static void set_rx_mode(struct net_device *dev)
1497{
1498 struct netdev_private *np = netdev_priv(dev);
1499 void __iomem *ioaddr = np->base;
1500 u16 mc_filter[4]; /* Multicast hash filter */
1501 u32 rx_mode;
1502 int i;
1503
1504 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1505 /* Unconditionally log net taps. */
1506 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1507 memset(mc_filter, 0xff, sizeof(mc_filter));
1508 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1509 } else if ((dev->mc_count > multicast_filter_limit)
1510 || (dev->flags & IFF_ALLMULTI)) {
1511 /* Too many to match, or accept all multicasts. */
1512 memset(mc_filter, 0xff, sizeof(mc_filter));
1513 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1514 } else if (dev->mc_count) {
1515 struct dev_mc_list *mclist;
1516 int bit;
1517 int index;
1518 int crc;
1519 memset (mc_filter, 0, sizeof (mc_filter));
1520 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1521 i++, mclist = mclist->next) {
1522 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1523 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1524 if (crc & 0x80000000) index |= 1 << bit;
1525 mc_filter[index/16] |= (1 << (index % 16));
1526 }
1527 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1528 } else {
1529 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1530 return;
1531 }
1532 if (np->mii_if.full_duplex && np->flowctrl)
1533 mc_filter[3] |= 0x0200;
1534
1535 for (i = 0; i < 4; i++)
1536 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1537 iowrite8(rx_mode, ioaddr + RxMode);
1538}
1539
1540static int __set_mac_addr(struct net_device *dev)
1541{
1542 struct netdev_private *np = netdev_priv(dev);
1543 u16 addr16;
1544
1545 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1546 iowrite16(addr16, np->base + StationAddr);
1547 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1548 iowrite16(addr16, np->base + StationAddr+2);
1549 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1550 iowrite16(addr16, np->base + StationAddr+4);
1551 return 0;
1552}
1553
1554static int check_if_running(struct net_device *dev)
1555{
1556 if (!netif_running(dev))
1557 return -EINVAL;
1558 return 0;
1559}
1560
1561static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1562{
1563 struct netdev_private *np = netdev_priv(dev);
1564 strcpy(info->driver, DRV_NAME);
1565 strcpy(info->version, DRV_VERSION);
1566 strcpy(info->bus_info, pci_name(np->pci_dev));
1567}
1568
1569static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1570{
1571 struct netdev_private *np = netdev_priv(dev);
1572 spin_lock_irq(&np->lock);
1573 mii_ethtool_gset(&np->mii_if, ecmd);
1574 spin_unlock_irq(&np->lock);
1575 return 0;
1576}
1577
1578static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1579{
1580 struct netdev_private *np = netdev_priv(dev);
1581 int res;
1582 spin_lock_irq(&np->lock);
1583 res = mii_ethtool_sset(&np->mii_if, ecmd);
1584 spin_unlock_irq(&np->lock);
1585 return res;
1586}
1587
1588static int nway_reset(struct net_device *dev)
1589{
1590 struct netdev_private *np = netdev_priv(dev);
1591 return mii_nway_restart(&np->mii_if);
1592}
1593
1594static u32 get_link(struct net_device *dev)
1595{
1596 struct netdev_private *np = netdev_priv(dev);
1597 return mii_link_ok(&np->mii_if);
1598}
1599
1600static u32 get_msglevel(struct net_device *dev)
1601{
1602 struct netdev_private *np = netdev_priv(dev);
1603 return np->msg_enable;
1604}
1605
1606static void set_msglevel(struct net_device *dev, u32 val)
1607{
1608 struct netdev_private *np = netdev_priv(dev);
1609 np->msg_enable = val;
1610}
1611
1612static struct ethtool_ops ethtool_ops = {
1613 .begin = check_if_running,
1614 .get_drvinfo = get_drvinfo,
1615 .get_settings = get_settings,
1616 .set_settings = set_settings,
1617 .nway_reset = nway_reset,
1618 .get_link = get_link,
1619 .get_msglevel = get_msglevel,
1620 .set_msglevel = set_msglevel,
30d60a82 1621 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1622};
1623
1624static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1625{
1626 struct netdev_private *np = netdev_priv(dev);
1627 void __iomem *ioaddr = np->base;
1628 int rc;
1629 int i;
1630
1631 if (!netif_running(dev))
1632 return -EINVAL;
1633
1634 spin_lock_irq(&np->lock);
1635 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1636 spin_unlock_irq(&np->lock);
1637 switch (cmd) {
1638 case SIOCDEVPRIVATE:
1639 for (i=0; i<TX_RING_SIZE; i++) {
1640 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1641 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1642 le32_to_cpu(np->tx_ring[i].next_desc),
1643 le32_to_cpu(np->tx_ring[i].status),
1644 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1645 & 0xff,
1646 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1647 le32_to_cpu(np->tx_ring[i].frag[0].length));
1648 }
1649 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1650 ioread32(np->base + TxListPtr),
1651 netif_queue_stopped(dev));
1652 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1653 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1654 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1655 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1656 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1657 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1658 return 0;
1659 }
1660
1661
1662 return rc;
1663}
1664
1665static int netdev_close(struct net_device *dev)
1666{
1667 struct netdev_private *np = netdev_priv(dev);
1668 void __iomem *ioaddr = np->base;
1669 struct sk_buff *skb;
1670 int i;
1671
1672 netif_stop_queue(dev);
1673
1674 if (netif_msg_ifdown(np)) {
1675 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1676 "Rx %4.4x Int %2.2x.\n",
1677 dev->name, ioread8(ioaddr + TxStatus),
1678 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1679 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1680 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1681 }
1682
1683 /* Disable interrupts by clearing the interrupt mask. */
1684 iowrite16(0x0000, ioaddr + IntrEnable);
1685
1686 /* Stop the chip's Tx and Rx processes. */
1687 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1688
1689 /* Wait and kill tasklet */
1690 tasklet_kill(&np->rx_tasklet);
1691 tasklet_kill(&np->tx_tasklet);
1692
1693#ifdef __i386__
1694 if (netif_msg_hw(np)) {
1695 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1696 (int)(np->tx_ring_dma));
1697 for (i = 0; i < TX_RING_SIZE; i++)
1698 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1699 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1700 np->tx_ring[i].frag[0].length);
1701 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1702 (int)(np->rx_ring_dma));
1703 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1704 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1705 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1706 np->rx_ring[i].frag[0].length);
1707 }
1708 }
1709#endif /* __i386__ debugging only */
1710
1711 free_irq(dev->irq, dev);
1712
1713 del_timer_sync(&np->timer);
1714
1715 /* Free all the skbuffs in the Rx queue. */
1716 for (i = 0; i < RX_RING_SIZE; i++) {
1717 np->rx_ring[i].status = 0;
1718 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1719 skb = np->rx_skbuff[i];
1720 if (skb) {
1721 pci_unmap_single(np->pci_dev,
1722 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1723 PCI_DMA_FROMDEVICE);
1724 dev_kfree_skb(skb);
1725 np->rx_skbuff[i] = NULL;
1726 }
1727 }
1728 for (i = 0; i < TX_RING_SIZE; i++) {
1729 skb = np->tx_skbuff[i];
1730 if (skb) {
1731 pci_unmap_single(np->pci_dev,
1732 np->tx_ring[i].frag[0].addr, skb->len,
1733 PCI_DMA_TODEVICE);
1734 dev_kfree_skb(skb);
1735 np->tx_skbuff[i] = NULL;
1736 }
1737 }
1738
1739 return 0;
1740}
1741
1742static void __devexit sundance_remove1 (struct pci_dev *pdev)
1743{
1744 struct net_device *dev = pci_get_drvdata(pdev);
1745
1746 if (dev) {
1747 struct netdev_private *np = netdev_priv(dev);
1748
1749 unregister_netdev(dev);
1750 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1751 np->rx_ring_dma);
1752 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1753 np->tx_ring_dma);
1754 pci_iounmap(pdev, np->base);
1755 pci_release_regions(pdev);
1756 free_netdev(dev);
1757 pci_set_drvdata(pdev, NULL);
1758 }
1759}
1760
1761static struct pci_driver sundance_driver = {
1762 .name = DRV_NAME,
1763 .id_table = sundance_pci_tbl,
1764 .probe = sundance_probe1,
1765 .remove = __devexit_p(sundance_remove1),
1766};
1767
1768static int __init sundance_init(void)
1769{
1770/* when a module, this is printed whether or not devices are found in probe */
1771#ifdef MODULE
1772 printk(version);
1773#endif
1774 return pci_module_init(&sundance_driver);
1775}
1776
1777static void __exit sundance_exit(void)
1778{
1779 pci_unregister_driver(&sundance_driver);
1780}
1781
1782module_init(sundance_init);
1783module_exit(sundance_exit);
1784
1785
This page took 0.132027 seconds and 5 git commands to generate.