Merge branch 'topic/nomm' into for-linus
[deliverable/linux.git] / drivers / net / sundance.c
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
22
23 */
24
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
28
29
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
36
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58
59
60 /* Operational parameters that are set at compile time. */
61
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
71 #define RX_BUDGET 32
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #ifndef _COMPAT_WITH_OLD_KERNEL
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
103 #else
104 #include "crc32.h"
105 #include "ethtool.h"
106 #include "mii.h"
107 #include "compat.h"
108 #endif
109
110 /* These identify the driver base version and may not be removed. */
111 static const char version[] __devinitconst =
112 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
113 " Written by Donald Becker\n";
114
115 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117 MODULE_LICENSE("GPL");
118
119 module_param(debug, int, 0);
120 module_param(rx_copybreak, int, 0);
121 module_param_array(media, charp, NULL, 0);
122 module_param(flowctrl, int, 0);
123 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
126
127 /*
128 Theory of Operation
129
130 I. Board Compatibility
131
132 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
133
134 II. Board-specific settings
135
136 III. Driver operation
137
138 IIIa. Ring buffers
139
140 This driver uses two statically allocated fixed-size descriptor lists
141 formed into rings by a branch from the final descriptor to the beginning of
142 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143 Some chips explicitly use only 2^N sized rings, while others use a
144 'next descriptor' pointer that the driver forms into rings.
145
146 IIIb/c. Transmit/Receive Structure
147
148 This driver uses a zero-copy receive and transmit scheme.
149 The driver allocates full frame size skbuffs for the Rx ring buffers at
150 open() time and passes the skb->data field to the chip as receive data
151 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152 a fresh skbuff is allocated and the frame is copied to the new skbuff.
153 When the incoming frame is larger, the skbuff is passed directly up the
154 protocol stack. Buffers consumed this way are replaced by newly allocated
155 skbuffs in a later phase of receives.
156
157 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158 using a full-sized skbuff for small frames vs. the copying costs of larger
159 frames. New boards are typically used in generously configured machines
160 and the underfilled buffers have negligible impact compared to the benefit of
161 a single allocation size, so the default value of zero results in never
162 copying packets. When copying is done, the cost is usually mitigated by using
163 a combined copy/checksum routine. Copying also preloads the cache, which is
164 most useful with small frames.
165
166 A subtle aspect of the operation is that the IP header at offset 14 in an
167 ethernet frame isn't longword aligned for further processing.
168 Unaligned buffers are permitted by the Sundance hardware, so
169 frames are received into the skbuff at an offset of "+2", 16-byte aligning
170 the IP header.
171
172 IIId. Synchronization
173
174 The driver runs as two independent, single-threaded flows of control. One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag. The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
178
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
183
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
188
189 IV. Notes
190
191 IVb. References
192
193 The Sundance ST201 datasheet, preliminary version.
194 The Kendin KS8723 datasheet, preliminary version.
195 The ICplus IP100 datasheet, preliminary version.
196 http://www.scyld.com/expert/100mbps.html
197 http://www.scyld.com/expert/NWay.html
198
199 IVc. Errata
200
201 */
202
203 /* Work-around for Kendin chip bugs. */
204 #ifndef CONFIG_SUNDANCE_MMIO
205 #define USE_IO_OPS 1
206 #endif
207
208 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
209 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
216 { }
217 };
218 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
219
220 enum {
221 netdev_io_size = 128
222 };
223
224 struct pci_id_info {
225 const char *name;
226 };
227 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
228 {"D-Link DFE-550TX FAST Ethernet Adapter"},
229 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 {"D-Link DFE-580TX 4 port Server Adapter"},
231 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 {"D-Link DL10050-based FAST Ethernet Adapter"},
233 {"Sundance Technology Alta"},
234 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
235 { } /* terminate list. */
236 };
237
238 /* This driver was written to use PCI memory space, however x86-oriented
239 hardware often uses I/O space accesses. */
240
241 /* Offsets to the device registers.
242 Unlike software-only systems, device drivers interact with complex hardware.
243 It's not useful to define symbolic names for every register bit in the
244 device. The name can only partially document the semantics and make
245 the driver longer and more difficult to read.
246 In general, only the important configuration values or bits changed
247 multiple times should be defined symbolically.
248 */
249 enum alta_offsets {
250 DMACtrl = 0x00,
251 TxListPtr = 0x04,
252 TxDMABurstThresh = 0x08,
253 TxDMAUrgentThresh = 0x09,
254 TxDMAPollPeriod = 0x0a,
255 RxDMAStatus = 0x0c,
256 RxListPtr = 0x10,
257 DebugCtrl0 = 0x1a,
258 DebugCtrl1 = 0x1c,
259 RxDMABurstThresh = 0x14,
260 RxDMAUrgentThresh = 0x15,
261 RxDMAPollPeriod = 0x16,
262 LEDCtrl = 0x1a,
263 ASICCtrl = 0x30,
264 EEData = 0x34,
265 EECtrl = 0x36,
266 FlashAddr = 0x40,
267 FlashData = 0x44,
268 TxStatus = 0x46,
269 TxFrameId = 0x47,
270 DownCounter = 0x18,
271 IntrClear = 0x4a,
272 IntrEnable = 0x4c,
273 IntrStatus = 0x4e,
274 MACCtrl0 = 0x50,
275 MACCtrl1 = 0x52,
276 StationAddr = 0x54,
277 MaxFrameSize = 0x5A,
278 RxMode = 0x5c,
279 MIICtrl = 0x5e,
280 MulticastFilter0 = 0x60,
281 MulticastFilter1 = 0x64,
282 RxOctetsLow = 0x68,
283 RxOctetsHigh = 0x6a,
284 TxOctetsLow = 0x6c,
285 TxOctetsHigh = 0x6e,
286 TxFramesOK = 0x70,
287 RxFramesOK = 0x72,
288 StatsCarrierError = 0x74,
289 StatsLateColl = 0x75,
290 StatsMultiColl = 0x76,
291 StatsOneColl = 0x77,
292 StatsTxDefer = 0x78,
293 RxMissed = 0x79,
294 StatsTxXSDefer = 0x7a,
295 StatsTxAbort = 0x7b,
296 StatsBcastTx = 0x7c,
297 StatsBcastRx = 0x7d,
298 StatsMcastTx = 0x7e,
299 StatsMcastRx = 0x7f,
300 /* Aliased and bogus values! */
301 RxStatus = 0x0c,
302 };
303 enum ASICCtrl_HiWord_bit {
304 GlobalReset = 0x0001,
305 RxReset = 0x0002,
306 TxReset = 0x0004,
307 DMAReset = 0x0008,
308 FIFOReset = 0x0010,
309 NetworkReset = 0x0020,
310 HostReset = 0x0040,
311 ResetBusy = 0x0400,
312 };
313
314 /* Bits in the interrupt status/mask registers. */
315 enum intr_status_bits {
316 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
318 IntrDrvRqst=0x0040,
319 StatsMax=0x0080, LinkChange=0x0100,
320 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321 };
322
323 /* Bits in the RxMode register. */
324 enum rx_mode_bits {
325 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
327 };
328 /* Bits in MACCtrl. */
329 enum mac_ctrl0_bits {
330 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
332 };
333 enum mac_ctrl1_bits {
334 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
335 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337 };
338
339 /* The Rx and Tx buffer descriptors. */
340 /* Note that using only 32 bit fields simplifies conversion to big-endian
341 architectures. */
342 struct netdev_desc {
343 __le32 next_desc;
344 __le32 status;
345 struct desc_frag { __le32 addr, length; } frag[1];
346 };
347
348 /* Bits in netdev_desc.status */
349 enum desc_status_bits {
350 DescOwn=0x8000,
351 DescEndPacket=0x4000,
352 DescEndRing=0x2000,
353 LastFrag=0x80000000,
354 DescIntrOnTx=0x8000,
355 DescIntrOnDMADone=0x80000000,
356 DisableAlign = 0x00000001,
357 };
358
359 #define PRIV_ALIGN 15 /* Required alignment mask */
360 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
361 within the structure. */
362 #define MII_CNT 4
363 struct netdev_private {
364 /* Descriptor rings first for alignment. */
365 struct netdev_desc *rx_ring;
366 struct netdev_desc *tx_ring;
367 struct sk_buff* rx_skbuff[RX_RING_SIZE];
368 struct sk_buff* tx_skbuff[TX_RING_SIZE];
369 dma_addr_t tx_ring_dma;
370 dma_addr_t rx_ring_dma;
371 struct timer_list timer; /* Media monitoring timer. */
372 /* Frequently used values: keep some adjacent for cache effect. */
373 spinlock_t lock;
374 spinlock_t rx_lock; /* Group with Tx control cache line. */
375 int msg_enable;
376 int chip_id;
377 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
378 unsigned int rx_buf_sz; /* Based on MTU+slack. */
379 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
380 unsigned int cur_tx, dirty_tx;
381 /* These values are keep track of the transceiver/media in use. */
382 unsigned int flowctrl:1;
383 unsigned int default_port:4; /* Last dev->if_port value. */
384 unsigned int an_enable:1;
385 unsigned int speed;
386 struct tasklet_struct rx_tasklet;
387 struct tasklet_struct tx_tasklet;
388 int budget;
389 int cur_task;
390 /* Multicast and receive mode. */
391 spinlock_t mcastlock; /* SMP lock multicast updates. */
392 u16 mcast_filter[4];
393 /* MII transceiver section. */
394 struct mii_if_info mii_if;
395 int mii_preamble_required;
396 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
397 struct pci_dev *pci_dev;
398 void __iomem *base;
399 };
400
401 /* The station address location in the EEPROM. */
402 #define EEPROM_SA_OFFSET 0x10
403 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
404 IntrDrvRqst | IntrTxDone | StatsMax | \
405 LinkChange)
406
407 static int change_mtu(struct net_device *dev, int new_mtu);
408 static int eeprom_read(void __iomem *ioaddr, int location);
409 static int mdio_read(struct net_device *dev, int phy_id, int location);
410 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
411 static int mdio_wait_link(struct net_device *dev, int wait);
412 static int netdev_open(struct net_device *dev);
413 static void check_duplex(struct net_device *dev);
414 static void netdev_timer(unsigned long data);
415 static void tx_timeout(struct net_device *dev);
416 static void init_ring(struct net_device *dev);
417 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
418 static int reset_tx (struct net_device *dev);
419 static irqreturn_t intr_handler(int irq, void *dev_instance);
420 static void rx_poll(unsigned long data);
421 static void tx_poll(unsigned long data);
422 static void refill_rx (struct net_device *dev);
423 static void netdev_error(struct net_device *dev, int intr_status);
424 static void netdev_error(struct net_device *dev, int intr_status);
425 static void set_rx_mode(struct net_device *dev);
426 static int __set_mac_addr(struct net_device *dev);
427 static struct net_device_stats *get_stats(struct net_device *dev);
428 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
429 static int netdev_close(struct net_device *dev);
430 static const struct ethtool_ops ethtool_ops;
431
432 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
433 {
434 struct netdev_private *np = netdev_priv(dev);
435 void __iomem *ioaddr = np->base + ASICCtrl;
436 int countdown;
437
438 /* ST201 documentation states ASICCtrl is a 32bit register */
439 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
440 /* ST201 documentation states reset can take up to 1 ms */
441 countdown = 10 + 1;
442 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
443 if (--countdown == 0) {
444 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
445 break;
446 }
447 udelay(100);
448 }
449 }
450
451 static const struct net_device_ops netdev_ops = {
452 .ndo_open = netdev_open,
453 .ndo_stop = netdev_close,
454 .ndo_start_xmit = start_tx,
455 .ndo_get_stats = get_stats,
456 .ndo_set_multicast_list = set_rx_mode,
457 .ndo_do_ioctl = netdev_ioctl,
458 .ndo_tx_timeout = tx_timeout,
459 .ndo_change_mtu = change_mtu,
460 .ndo_set_mac_address = eth_mac_addr,
461 .ndo_validate_addr = eth_validate_addr,
462 };
463
464 static int __devinit sundance_probe1 (struct pci_dev *pdev,
465 const struct pci_device_id *ent)
466 {
467 struct net_device *dev;
468 struct netdev_private *np;
469 static int card_idx;
470 int chip_idx = ent->driver_data;
471 int irq;
472 int i;
473 void __iomem *ioaddr;
474 u16 mii_ctl;
475 void *ring_space;
476 dma_addr_t ring_dma;
477 #ifdef USE_IO_OPS
478 int bar = 0;
479 #else
480 int bar = 1;
481 #endif
482 int phy, phy_end, phy_idx = 0;
483
484 /* when built into the kernel, we only print version if device is found */
485 #ifndef MODULE
486 static int printed_version;
487 if (!printed_version++)
488 printk(version);
489 #endif
490
491 if (pci_enable_device(pdev))
492 return -EIO;
493 pci_set_master(pdev);
494
495 irq = pdev->irq;
496
497 dev = alloc_etherdev(sizeof(*np));
498 if (!dev)
499 return -ENOMEM;
500 SET_NETDEV_DEV(dev, &pdev->dev);
501
502 if (pci_request_regions(pdev, DRV_NAME))
503 goto err_out_netdev;
504
505 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
506 if (!ioaddr)
507 goto err_out_res;
508
509 for (i = 0; i < 3; i++)
510 ((__le16 *)dev->dev_addr)[i] =
511 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
512 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
513
514 dev->base_addr = (unsigned long)ioaddr;
515 dev->irq = irq;
516
517 np = netdev_priv(dev);
518 np->base = ioaddr;
519 np->pci_dev = pdev;
520 np->chip_id = chip_idx;
521 np->msg_enable = (1 << debug) - 1;
522 spin_lock_init(&np->lock);
523 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
524 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
525
526 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
527 if (!ring_space)
528 goto err_out_cleardev;
529 np->tx_ring = (struct netdev_desc *)ring_space;
530 np->tx_ring_dma = ring_dma;
531
532 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
533 if (!ring_space)
534 goto err_out_unmap_tx;
535 np->rx_ring = (struct netdev_desc *)ring_space;
536 np->rx_ring_dma = ring_dma;
537
538 np->mii_if.dev = dev;
539 np->mii_if.mdio_read = mdio_read;
540 np->mii_if.mdio_write = mdio_write;
541 np->mii_if.phy_id_mask = 0x1f;
542 np->mii_if.reg_num_mask = 0x1f;
543
544 /* The chip-specific entries in the device structure. */
545 dev->netdev_ops = &netdev_ops;
546 SET_ETHTOOL_OPS(dev, &ethtool_ops);
547 dev->watchdog_timeo = TX_TIMEOUT;
548
549 pci_set_drvdata(pdev, dev);
550
551 i = register_netdev(dev);
552 if (i)
553 goto err_out_unmap_rx;
554
555 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
556 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
557 dev->dev_addr, irq);
558
559 np->phys[0] = 1; /* Default setting */
560 np->mii_preamble_required++;
561
562 /*
563 * It seems some phys doesn't deal well with address 0 being accessed
564 * first
565 */
566 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
567 phy = 0;
568 phy_end = 31;
569 } else {
570 phy = 1;
571 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
572 }
573 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
574 int phyx = phy & 0x1f;
575 int mii_status = mdio_read(dev, phyx, MII_BMSR);
576 if (mii_status != 0xffff && mii_status != 0x0000) {
577 np->phys[phy_idx++] = phyx;
578 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
579 if ((mii_status & 0x0040) == 0)
580 np->mii_preamble_required++;
581 printk(KERN_INFO "%s: MII PHY found at address %d, status "
582 "0x%4.4x advertising %4.4x.\n",
583 dev->name, phyx, mii_status, np->mii_if.advertising);
584 }
585 }
586 np->mii_preamble_required--;
587
588 if (phy_idx == 0) {
589 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
590 dev->name, ioread32(ioaddr + ASICCtrl));
591 goto err_out_unregister;
592 }
593
594 np->mii_if.phy_id = np->phys[0];
595
596 /* Parse override configuration */
597 np->an_enable = 1;
598 if (card_idx < MAX_UNITS) {
599 if (media[card_idx] != NULL) {
600 np->an_enable = 0;
601 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
602 strcmp (media[card_idx], "4") == 0) {
603 np->speed = 100;
604 np->mii_if.full_duplex = 1;
605 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
606 strcmp (media[card_idx], "3") == 0) {
607 np->speed = 100;
608 np->mii_if.full_duplex = 0;
609 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
610 strcmp (media[card_idx], "2") == 0) {
611 np->speed = 10;
612 np->mii_if.full_duplex = 1;
613 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
614 strcmp (media[card_idx], "1") == 0) {
615 np->speed = 10;
616 np->mii_if.full_duplex = 0;
617 } else {
618 np->an_enable = 1;
619 }
620 }
621 if (flowctrl == 1)
622 np->flowctrl = 1;
623 }
624
625 /* Fibre PHY? */
626 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
627 /* Default 100Mbps Full */
628 if (np->an_enable) {
629 np->speed = 100;
630 np->mii_if.full_duplex = 1;
631 np->an_enable = 0;
632 }
633 }
634 /* Reset PHY */
635 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
636 mdelay (300);
637 /* If flow control enabled, we need to advertise it.*/
638 if (np->flowctrl)
639 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
640 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
641 /* Force media type */
642 if (!np->an_enable) {
643 mii_ctl = 0;
644 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
645 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
646 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
647 printk (KERN_INFO "Override speed=%d, %s duplex\n",
648 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
649
650 }
651
652 /* Perhaps move the reset here? */
653 /* Reset the chip to erase previous misconfiguration. */
654 if (netif_msg_hw(np))
655 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
656 sundance_reset(dev, 0x00ff << 16);
657 if (netif_msg_hw(np))
658 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
659
660 card_idx++;
661 return 0;
662
663 err_out_unregister:
664 unregister_netdev(dev);
665 err_out_unmap_rx:
666 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
667 err_out_unmap_tx:
668 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
669 err_out_cleardev:
670 pci_set_drvdata(pdev, NULL);
671 pci_iounmap(pdev, ioaddr);
672 err_out_res:
673 pci_release_regions(pdev);
674 err_out_netdev:
675 free_netdev (dev);
676 return -ENODEV;
677 }
678
679 static int change_mtu(struct net_device *dev, int new_mtu)
680 {
681 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
682 return -EINVAL;
683 if (netif_running(dev))
684 return -EBUSY;
685 dev->mtu = new_mtu;
686 return 0;
687 }
688
689 #define eeprom_delay(ee_addr) ioread32(ee_addr)
690 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
691 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
692 {
693 int boguscnt = 10000; /* Typical 1900 ticks. */
694 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
695 do {
696 eeprom_delay(ioaddr + EECtrl);
697 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
698 return ioread16(ioaddr + EEData);
699 }
700 } while (--boguscnt > 0);
701 return 0;
702 }
703
704 /* MII transceiver control section.
705 Read and write the MII registers using software-generated serial
706 MDIO protocol. See the MII specifications or DP83840A data sheet
707 for details.
708
709 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
710 met by back-to-back 33Mhz PCI cycles. */
711 #define mdio_delay() ioread8(mdio_addr)
712
713 enum mii_reg_bits {
714 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
715 };
716 #define MDIO_EnbIn (0)
717 #define MDIO_WRITE0 (MDIO_EnbOutput)
718 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
719
720 /* Generate the preamble required for initial synchronization and
721 a few older transceivers. */
722 static void mdio_sync(void __iomem *mdio_addr)
723 {
724 int bits = 32;
725
726 /* Establish sync by sending at least 32 logic ones. */
727 while (--bits >= 0) {
728 iowrite8(MDIO_WRITE1, mdio_addr);
729 mdio_delay();
730 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
731 mdio_delay();
732 }
733 }
734
735 static int mdio_read(struct net_device *dev, int phy_id, int location)
736 {
737 struct netdev_private *np = netdev_priv(dev);
738 void __iomem *mdio_addr = np->base + MIICtrl;
739 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
740 int i, retval = 0;
741
742 if (np->mii_preamble_required)
743 mdio_sync(mdio_addr);
744
745 /* Shift the read command bits out. */
746 for (i = 15; i >= 0; i--) {
747 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
748
749 iowrite8(dataval, mdio_addr);
750 mdio_delay();
751 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
752 mdio_delay();
753 }
754 /* Read the two transition, 16 data, and wire-idle bits. */
755 for (i = 19; i > 0; i--) {
756 iowrite8(MDIO_EnbIn, mdio_addr);
757 mdio_delay();
758 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
759 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
760 mdio_delay();
761 }
762 return (retval>>1) & 0xffff;
763 }
764
765 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
766 {
767 struct netdev_private *np = netdev_priv(dev);
768 void __iomem *mdio_addr = np->base + MIICtrl;
769 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
770 int i;
771
772 if (np->mii_preamble_required)
773 mdio_sync(mdio_addr);
774
775 /* Shift the command bits out. */
776 for (i = 31; i >= 0; i--) {
777 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
778
779 iowrite8(dataval, mdio_addr);
780 mdio_delay();
781 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
782 mdio_delay();
783 }
784 /* Clear out extra bits. */
785 for (i = 2; i > 0; i--) {
786 iowrite8(MDIO_EnbIn, mdio_addr);
787 mdio_delay();
788 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
789 mdio_delay();
790 }
791 return;
792 }
793
794 static int mdio_wait_link(struct net_device *dev, int wait)
795 {
796 int bmsr;
797 int phy_id;
798 struct netdev_private *np;
799
800 np = netdev_priv(dev);
801 phy_id = np->phys[0];
802
803 do {
804 bmsr = mdio_read(dev, phy_id, MII_BMSR);
805 if (bmsr & 0x0004)
806 return 0;
807 mdelay(1);
808 } while (--wait > 0);
809 return -1;
810 }
811
812 static int netdev_open(struct net_device *dev)
813 {
814 struct netdev_private *np = netdev_priv(dev);
815 void __iomem *ioaddr = np->base;
816 unsigned long flags;
817 int i;
818
819 /* Do we need to reset the chip??? */
820
821 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
822 if (i)
823 return i;
824
825 if (netif_msg_ifup(np))
826 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
827 dev->name, dev->irq);
828 init_ring(dev);
829
830 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
831 /* The Tx list pointer is written as packets are queued. */
832
833 /* Initialize other registers. */
834 __set_mac_addr(dev);
835 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
836 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
837 #else
838 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
839 #endif
840 if (dev->mtu > 2047)
841 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
842
843 /* Configure the PCI bus bursts and FIFO thresholds. */
844
845 if (dev->if_port == 0)
846 dev->if_port = np->default_port;
847
848 spin_lock_init(&np->mcastlock);
849
850 set_rx_mode(dev);
851 iowrite16(0, ioaddr + IntrEnable);
852 iowrite16(0, ioaddr + DownCounter);
853 /* Set the chip to poll every N*320nsec. */
854 iowrite8(100, ioaddr + RxDMAPollPeriod);
855 iowrite8(127, ioaddr + TxDMAPollPeriod);
856 /* Fix DFE-580TX packet drop issue */
857 if (np->pci_dev->revision >= 0x14)
858 iowrite8(0x01, ioaddr + DebugCtrl1);
859 netif_start_queue(dev);
860
861 spin_lock_irqsave(&np->lock, flags);
862 reset_tx(dev);
863 spin_unlock_irqrestore(&np->lock, flags);
864
865 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
866
867 if (netif_msg_ifup(np))
868 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
869 "MAC Control %x, %4.4x %4.4x.\n",
870 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
871 ioread32(ioaddr + MACCtrl0),
872 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
873
874 /* Set the timer to check for link beat. */
875 init_timer(&np->timer);
876 np->timer.expires = jiffies + 3*HZ;
877 np->timer.data = (unsigned long)dev;
878 np->timer.function = &netdev_timer; /* timer handler */
879 add_timer(&np->timer);
880
881 /* Enable interrupts by setting the interrupt mask. */
882 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
883
884 return 0;
885 }
886
887 static void check_duplex(struct net_device *dev)
888 {
889 struct netdev_private *np = netdev_priv(dev);
890 void __iomem *ioaddr = np->base;
891 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
892 int negotiated = mii_lpa & np->mii_if.advertising;
893 int duplex;
894
895 /* Force media */
896 if (!np->an_enable || mii_lpa == 0xffff) {
897 if (np->mii_if.full_duplex)
898 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
899 ioaddr + MACCtrl0);
900 return;
901 }
902
903 /* Autonegotiation */
904 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
905 if (np->mii_if.full_duplex != duplex) {
906 np->mii_if.full_duplex = duplex;
907 if (netif_msg_link(np))
908 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
909 "negotiated capability %4.4x.\n", dev->name,
910 duplex ? "full" : "half", np->phys[0], negotiated);
911 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
912 }
913 }
914
915 static void netdev_timer(unsigned long data)
916 {
917 struct net_device *dev = (struct net_device *)data;
918 struct netdev_private *np = netdev_priv(dev);
919 void __iomem *ioaddr = np->base;
920 int next_tick = 10*HZ;
921
922 if (netif_msg_timer(np)) {
923 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
924 "Tx %x Rx %x.\n",
925 dev->name, ioread16(ioaddr + IntrEnable),
926 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
927 }
928 check_duplex(dev);
929 np->timer.expires = jiffies + next_tick;
930 add_timer(&np->timer);
931 }
932
933 static void tx_timeout(struct net_device *dev)
934 {
935 struct netdev_private *np = netdev_priv(dev);
936 void __iomem *ioaddr = np->base;
937 unsigned long flag;
938
939 netif_stop_queue(dev);
940 tasklet_disable(&np->tx_tasklet);
941 iowrite16(0, ioaddr + IntrEnable);
942 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
943 "TxFrameId %2.2x,"
944 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
945 ioread8(ioaddr + TxFrameId));
946
947 {
948 int i;
949 for (i=0; i<TX_RING_SIZE; i++) {
950 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
951 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
952 le32_to_cpu(np->tx_ring[i].next_desc),
953 le32_to_cpu(np->tx_ring[i].status),
954 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
955 le32_to_cpu(np->tx_ring[i].frag[0].addr),
956 le32_to_cpu(np->tx_ring[i].frag[0].length));
957 }
958 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
959 ioread32(np->base + TxListPtr),
960 netif_queue_stopped(dev));
961 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
962 np->cur_tx, np->cur_tx % TX_RING_SIZE,
963 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
964 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
965 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
966 }
967 spin_lock_irqsave(&np->lock, flag);
968
969 /* Stop and restart the chip's Tx processes . */
970 reset_tx(dev);
971 spin_unlock_irqrestore(&np->lock, flag);
972
973 dev->if_port = 0;
974
975 dev->trans_start = jiffies;
976 dev->stats.tx_errors++;
977 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
978 netif_wake_queue(dev);
979 }
980 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
981 tasklet_enable(&np->tx_tasklet);
982 }
983
984
985 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
986 static void init_ring(struct net_device *dev)
987 {
988 struct netdev_private *np = netdev_priv(dev);
989 int i;
990
991 np->cur_rx = np->cur_tx = 0;
992 np->dirty_rx = np->dirty_tx = 0;
993 np->cur_task = 0;
994
995 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
996
997 /* Initialize all Rx descriptors. */
998 for (i = 0; i < RX_RING_SIZE; i++) {
999 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1000 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1001 np->rx_ring[i].status = 0;
1002 np->rx_ring[i].frag[0].length = 0;
1003 np->rx_skbuff[i] = NULL;
1004 }
1005
1006 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1007 for (i = 0; i < RX_RING_SIZE; i++) {
1008 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1009 np->rx_skbuff[i] = skb;
1010 if (skb == NULL)
1011 break;
1012 skb->dev = dev; /* Mark as being used by this device. */
1013 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1014 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1015 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1016 PCI_DMA_FROMDEVICE));
1017 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1018 }
1019 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1020
1021 for (i = 0; i < TX_RING_SIZE; i++) {
1022 np->tx_skbuff[i] = NULL;
1023 np->tx_ring[i].status = 0;
1024 }
1025 return;
1026 }
1027
1028 static void tx_poll (unsigned long data)
1029 {
1030 struct net_device *dev = (struct net_device *)data;
1031 struct netdev_private *np = netdev_priv(dev);
1032 unsigned head = np->cur_task % TX_RING_SIZE;
1033 struct netdev_desc *txdesc =
1034 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1035
1036 /* Chain the next pointer */
1037 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1038 int entry = np->cur_task % TX_RING_SIZE;
1039 txdesc = &np->tx_ring[entry];
1040 if (np->last_tx) {
1041 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1042 entry*sizeof(struct netdev_desc));
1043 }
1044 np->last_tx = txdesc;
1045 }
1046 /* Indicate the latest descriptor of tx ring */
1047 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1048
1049 if (ioread32 (np->base + TxListPtr) == 0)
1050 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1051 np->base + TxListPtr);
1052 return;
1053 }
1054
1055 static netdev_tx_t
1056 start_tx (struct sk_buff *skb, struct net_device *dev)
1057 {
1058 struct netdev_private *np = netdev_priv(dev);
1059 struct netdev_desc *txdesc;
1060 unsigned entry;
1061
1062 /* Calculate the next Tx descriptor entry. */
1063 entry = np->cur_tx % TX_RING_SIZE;
1064 np->tx_skbuff[entry] = skb;
1065 txdesc = &np->tx_ring[entry];
1066
1067 txdesc->next_desc = 0;
1068 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1069 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1070 skb->len,
1071 PCI_DMA_TODEVICE));
1072 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1073
1074 /* Increment cur_tx before tasklet_schedule() */
1075 np->cur_tx++;
1076 mb();
1077 /* Schedule a tx_poll() task */
1078 tasklet_schedule(&np->tx_tasklet);
1079
1080 /* On some architectures: explicitly flush cache lines here. */
1081 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1082 !netif_queue_stopped(dev)) {
1083 /* do nothing */
1084 } else {
1085 netif_stop_queue (dev);
1086 }
1087 dev->trans_start = jiffies;
1088 if (netif_msg_tx_queued(np)) {
1089 printk (KERN_DEBUG
1090 "%s: Transmit frame #%d queued in slot %d.\n",
1091 dev->name, np->cur_tx, entry);
1092 }
1093 return NETDEV_TX_OK;
1094 }
1095
1096 /* Reset hardware tx and free all of tx buffers */
1097 static int
1098 reset_tx (struct net_device *dev)
1099 {
1100 struct netdev_private *np = netdev_priv(dev);
1101 void __iomem *ioaddr = np->base;
1102 struct sk_buff *skb;
1103 int i;
1104 int irq = in_interrupt();
1105
1106 /* Reset tx logic, TxListPtr will be cleaned */
1107 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1108 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1109
1110 /* free all tx skbuff */
1111 for (i = 0; i < TX_RING_SIZE; i++) {
1112 np->tx_ring[i].next_desc = 0;
1113
1114 skb = np->tx_skbuff[i];
1115 if (skb) {
1116 pci_unmap_single(np->pci_dev,
1117 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1118 skb->len, PCI_DMA_TODEVICE);
1119 if (irq)
1120 dev_kfree_skb_irq (skb);
1121 else
1122 dev_kfree_skb (skb);
1123 np->tx_skbuff[i] = NULL;
1124 dev->stats.tx_dropped++;
1125 }
1126 }
1127 np->cur_tx = np->dirty_tx = 0;
1128 np->cur_task = 0;
1129
1130 np->last_tx = NULL;
1131 iowrite8(127, ioaddr + TxDMAPollPeriod);
1132
1133 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1134 return 0;
1135 }
1136
1137 /* The interrupt handler cleans up after the Tx thread,
1138 and schedule a Rx thread work */
1139 static irqreturn_t intr_handler(int irq, void *dev_instance)
1140 {
1141 struct net_device *dev = (struct net_device *)dev_instance;
1142 struct netdev_private *np = netdev_priv(dev);
1143 void __iomem *ioaddr = np->base;
1144 int hw_frame_id;
1145 int tx_cnt;
1146 int tx_status;
1147 int handled = 0;
1148 int i;
1149
1150
1151 do {
1152 int intr_status = ioread16(ioaddr + IntrStatus);
1153 iowrite16(intr_status, ioaddr + IntrStatus);
1154
1155 if (netif_msg_intr(np))
1156 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1157 dev->name, intr_status);
1158
1159 if (!(intr_status & DEFAULT_INTR))
1160 break;
1161
1162 handled = 1;
1163
1164 if (intr_status & (IntrRxDMADone)) {
1165 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1166 ioaddr + IntrEnable);
1167 if (np->budget < 0)
1168 np->budget = RX_BUDGET;
1169 tasklet_schedule(&np->rx_tasklet);
1170 }
1171 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1172 tx_status = ioread16 (ioaddr + TxStatus);
1173 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1174 if (netif_msg_tx_done(np))
1175 printk
1176 ("%s: Transmit status is %2.2x.\n",
1177 dev->name, tx_status);
1178 if (tx_status & 0x1e) {
1179 if (netif_msg_tx_err(np))
1180 printk("%s: Transmit error status %4.4x.\n",
1181 dev->name, tx_status);
1182 dev->stats.tx_errors++;
1183 if (tx_status & 0x10)
1184 dev->stats.tx_fifo_errors++;
1185 if (tx_status & 0x08)
1186 dev->stats.collisions++;
1187 if (tx_status & 0x04)
1188 dev->stats.tx_fifo_errors++;
1189 if (tx_status & 0x02)
1190 dev->stats.tx_window_errors++;
1191
1192 /*
1193 ** This reset has been verified on
1194 ** DFE-580TX boards ! phdm@macqel.be.
1195 */
1196 if (tx_status & 0x10) { /* TxUnderrun */
1197 /* Restart Tx FIFO and transmitter */
1198 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1199 /* No need to reset the Tx pointer here */
1200 }
1201 /* Restart the Tx. Need to make sure tx enabled */
1202 i = 10;
1203 do {
1204 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1205 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1206 break;
1207 mdelay(1);
1208 } while (--i);
1209 }
1210 /* Yup, this is a documentation bug. It cost me *hours*. */
1211 iowrite16 (0, ioaddr + TxStatus);
1212 if (tx_cnt < 0) {
1213 iowrite32(5000, ioaddr + DownCounter);
1214 break;
1215 }
1216 tx_status = ioread16 (ioaddr + TxStatus);
1217 }
1218 hw_frame_id = (tx_status >> 8) & 0xff;
1219 } else {
1220 hw_frame_id = ioread8(ioaddr + TxFrameId);
1221 }
1222
1223 if (np->pci_dev->revision >= 0x14) {
1224 spin_lock(&np->lock);
1225 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1226 int entry = np->dirty_tx % TX_RING_SIZE;
1227 struct sk_buff *skb;
1228 int sw_frame_id;
1229 sw_frame_id = (le32_to_cpu(
1230 np->tx_ring[entry].status) >> 2) & 0xff;
1231 if (sw_frame_id == hw_frame_id &&
1232 !(le32_to_cpu(np->tx_ring[entry].status)
1233 & 0x00010000))
1234 break;
1235 if (sw_frame_id == (hw_frame_id + 1) %
1236 TX_RING_SIZE)
1237 break;
1238 skb = np->tx_skbuff[entry];
1239 /* Free the original skb. */
1240 pci_unmap_single(np->pci_dev,
1241 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1242 skb->len, PCI_DMA_TODEVICE);
1243 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1244 np->tx_skbuff[entry] = NULL;
1245 np->tx_ring[entry].frag[0].addr = 0;
1246 np->tx_ring[entry].frag[0].length = 0;
1247 }
1248 spin_unlock(&np->lock);
1249 } else {
1250 spin_lock(&np->lock);
1251 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1252 int entry = np->dirty_tx % TX_RING_SIZE;
1253 struct sk_buff *skb;
1254 if (!(le32_to_cpu(np->tx_ring[entry].status)
1255 & 0x00010000))
1256 break;
1257 skb = np->tx_skbuff[entry];
1258 /* Free the original skb. */
1259 pci_unmap_single(np->pci_dev,
1260 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1261 skb->len, PCI_DMA_TODEVICE);
1262 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1263 np->tx_skbuff[entry] = NULL;
1264 np->tx_ring[entry].frag[0].addr = 0;
1265 np->tx_ring[entry].frag[0].length = 0;
1266 }
1267 spin_unlock(&np->lock);
1268 }
1269
1270 if (netif_queue_stopped(dev) &&
1271 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1272 /* The ring is no longer full, clear busy flag. */
1273 netif_wake_queue (dev);
1274 }
1275 /* Abnormal error summary/uncommon events handlers. */
1276 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1277 netdev_error(dev, intr_status);
1278 } while (0);
1279 if (netif_msg_intr(np))
1280 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1281 dev->name, ioread16(ioaddr + IntrStatus));
1282 return IRQ_RETVAL(handled);
1283 }
1284
1285 static void rx_poll(unsigned long data)
1286 {
1287 struct net_device *dev = (struct net_device *)data;
1288 struct netdev_private *np = netdev_priv(dev);
1289 int entry = np->cur_rx % RX_RING_SIZE;
1290 int boguscnt = np->budget;
1291 void __iomem *ioaddr = np->base;
1292 int received = 0;
1293
1294 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1295 while (1) {
1296 struct netdev_desc *desc = &(np->rx_ring[entry]);
1297 u32 frame_status = le32_to_cpu(desc->status);
1298 int pkt_len;
1299
1300 if (--boguscnt < 0) {
1301 goto not_done;
1302 }
1303 if (!(frame_status & DescOwn))
1304 break;
1305 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1306 if (netif_msg_rx_status(np))
1307 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1308 frame_status);
1309 if (frame_status & 0x001f4000) {
1310 /* There was a error. */
1311 if (netif_msg_rx_err(np))
1312 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1313 frame_status);
1314 dev->stats.rx_errors++;
1315 if (frame_status & 0x00100000)
1316 dev->stats.rx_length_errors++;
1317 if (frame_status & 0x00010000)
1318 dev->stats.rx_fifo_errors++;
1319 if (frame_status & 0x00060000)
1320 dev->stats.rx_frame_errors++;
1321 if (frame_status & 0x00080000)
1322 dev->stats.rx_crc_errors++;
1323 if (frame_status & 0x00100000) {
1324 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1325 " status %8.8x.\n",
1326 dev->name, frame_status);
1327 }
1328 } else {
1329 struct sk_buff *skb;
1330 #ifndef final_version
1331 if (netif_msg_rx_status(np))
1332 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1333 ", bogus_cnt %d.\n",
1334 pkt_len, boguscnt);
1335 #endif
1336 /* Check if the packet is long enough to accept without copying
1337 to a minimally-sized skbuff. */
1338 if (pkt_len < rx_copybreak &&
1339 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1340 skb_reserve(skb, 2); /* 16 byte align the IP header */
1341 pci_dma_sync_single_for_cpu(np->pci_dev,
1342 le32_to_cpu(desc->frag[0].addr),
1343 np->rx_buf_sz,
1344 PCI_DMA_FROMDEVICE);
1345
1346 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1347 pci_dma_sync_single_for_device(np->pci_dev,
1348 le32_to_cpu(desc->frag[0].addr),
1349 np->rx_buf_sz,
1350 PCI_DMA_FROMDEVICE);
1351 skb_put(skb, pkt_len);
1352 } else {
1353 pci_unmap_single(np->pci_dev,
1354 le32_to_cpu(desc->frag[0].addr),
1355 np->rx_buf_sz,
1356 PCI_DMA_FROMDEVICE);
1357 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1358 np->rx_skbuff[entry] = NULL;
1359 }
1360 skb->protocol = eth_type_trans(skb, dev);
1361 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1362 netif_rx(skb);
1363 }
1364 entry = (entry + 1) % RX_RING_SIZE;
1365 received++;
1366 }
1367 np->cur_rx = entry;
1368 refill_rx (dev);
1369 np->budget -= received;
1370 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1371 return;
1372
1373 not_done:
1374 np->cur_rx = entry;
1375 refill_rx (dev);
1376 if (!received)
1377 received = 1;
1378 np->budget -= received;
1379 if (np->budget <= 0)
1380 np->budget = RX_BUDGET;
1381 tasklet_schedule(&np->rx_tasklet);
1382 return;
1383 }
1384
1385 static void refill_rx (struct net_device *dev)
1386 {
1387 struct netdev_private *np = netdev_priv(dev);
1388 int entry;
1389 int cnt = 0;
1390
1391 /* Refill the Rx ring buffers. */
1392 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1393 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1394 struct sk_buff *skb;
1395 entry = np->dirty_rx % RX_RING_SIZE;
1396 if (np->rx_skbuff[entry] == NULL) {
1397 skb = dev_alloc_skb(np->rx_buf_sz);
1398 np->rx_skbuff[entry] = skb;
1399 if (skb == NULL)
1400 break; /* Better luck next round. */
1401 skb->dev = dev; /* Mark as being used by this device. */
1402 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1403 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1404 pci_map_single(np->pci_dev, skb->data,
1405 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1406 }
1407 /* Perhaps we need not reset this field. */
1408 np->rx_ring[entry].frag[0].length =
1409 cpu_to_le32(np->rx_buf_sz | LastFrag);
1410 np->rx_ring[entry].status = 0;
1411 cnt++;
1412 }
1413 return;
1414 }
1415 static void netdev_error(struct net_device *dev, int intr_status)
1416 {
1417 struct netdev_private *np = netdev_priv(dev);
1418 void __iomem *ioaddr = np->base;
1419 u16 mii_ctl, mii_advertise, mii_lpa;
1420 int speed;
1421
1422 if (intr_status & LinkChange) {
1423 if (mdio_wait_link(dev, 10) == 0) {
1424 printk(KERN_INFO "%s: Link up\n", dev->name);
1425 if (np->an_enable) {
1426 mii_advertise = mdio_read(dev, np->phys[0],
1427 MII_ADVERTISE);
1428 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1429 mii_advertise &= mii_lpa;
1430 printk(KERN_INFO "%s: Link changed: ",
1431 dev->name);
1432 if (mii_advertise & ADVERTISE_100FULL) {
1433 np->speed = 100;
1434 printk("100Mbps, full duplex\n");
1435 } else if (mii_advertise & ADVERTISE_100HALF) {
1436 np->speed = 100;
1437 printk("100Mbps, half duplex\n");
1438 } else if (mii_advertise & ADVERTISE_10FULL) {
1439 np->speed = 10;
1440 printk("10Mbps, full duplex\n");
1441 } else if (mii_advertise & ADVERTISE_10HALF) {
1442 np->speed = 10;
1443 printk("10Mbps, half duplex\n");
1444 } else
1445 printk("\n");
1446
1447 } else {
1448 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1449 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1450 np->speed = speed;
1451 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1452 dev->name, speed);
1453 printk("%s duplex.\n",
1454 (mii_ctl & BMCR_FULLDPLX) ?
1455 "full" : "half");
1456 }
1457 check_duplex(dev);
1458 if (np->flowctrl && np->mii_if.full_duplex) {
1459 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1460 ioaddr + MulticastFilter1+2);
1461 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1462 ioaddr + MACCtrl0);
1463 }
1464 netif_carrier_on(dev);
1465 } else {
1466 printk(KERN_INFO "%s: Link down\n", dev->name);
1467 netif_carrier_off(dev);
1468 }
1469 }
1470 if (intr_status & StatsMax) {
1471 get_stats(dev);
1472 }
1473 if (intr_status & IntrPCIErr) {
1474 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1475 dev->name, intr_status);
1476 /* We must do a global reset of DMA to continue. */
1477 }
1478 }
1479
1480 static struct net_device_stats *get_stats(struct net_device *dev)
1481 {
1482 struct netdev_private *np = netdev_priv(dev);
1483 void __iomem *ioaddr = np->base;
1484 int i;
1485
1486 /* We should lock this segment of code for SMP eventually, although
1487 the vulnerability window is very small and statistics are
1488 non-critical. */
1489 /* The chip only need report frame silently dropped. */
1490 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1491 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1492 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1493 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1494 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1495 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1496 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1497 ioread8(ioaddr + StatsTxDefer);
1498 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1499 ioread8(ioaddr + i);
1500 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1501 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1502 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1503 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1504
1505 return &dev->stats;
1506 }
1507
1508 static void set_rx_mode(struct net_device *dev)
1509 {
1510 struct netdev_private *np = netdev_priv(dev);
1511 void __iomem *ioaddr = np->base;
1512 u16 mc_filter[4]; /* Multicast hash filter */
1513 u32 rx_mode;
1514 int i;
1515
1516 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1517 memset(mc_filter, 0xff, sizeof(mc_filter));
1518 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1519 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1520 (dev->flags & IFF_ALLMULTI)) {
1521 /* Too many to match, or accept all multicasts. */
1522 memset(mc_filter, 0xff, sizeof(mc_filter));
1523 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1524 } else if (!netdev_mc_empty(dev)) {
1525 struct dev_mc_list *mclist;
1526 int bit;
1527 int index;
1528 int crc;
1529 memset (mc_filter, 0, sizeof (mc_filter));
1530 netdev_for_each_mc_addr(mclist, dev) {
1531 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1532 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1533 if (crc & 0x80000000) index |= 1 << bit;
1534 mc_filter[index/16] |= (1 << (index % 16));
1535 }
1536 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1537 } else {
1538 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1539 return;
1540 }
1541 if (np->mii_if.full_duplex && np->flowctrl)
1542 mc_filter[3] |= 0x0200;
1543
1544 for (i = 0; i < 4; i++)
1545 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1546 iowrite8(rx_mode, ioaddr + RxMode);
1547 }
1548
1549 static int __set_mac_addr(struct net_device *dev)
1550 {
1551 struct netdev_private *np = netdev_priv(dev);
1552 u16 addr16;
1553
1554 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1555 iowrite16(addr16, np->base + StationAddr);
1556 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1557 iowrite16(addr16, np->base + StationAddr+2);
1558 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1559 iowrite16(addr16, np->base + StationAddr+4);
1560 return 0;
1561 }
1562
1563 static int check_if_running(struct net_device *dev)
1564 {
1565 if (!netif_running(dev))
1566 return -EINVAL;
1567 return 0;
1568 }
1569
1570 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1571 {
1572 struct netdev_private *np = netdev_priv(dev);
1573 strcpy(info->driver, DRV_NAME);
1574 strcpy(info->version, DRV_VERSION);
1575 strcpy(info->bus_info, pci_name(np->pci_dev));
1576 }
1577
1578 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1579 {
1580 struct netdev_private *np = netdev_priv(dev);
1581 spin_lock_irq(&np->lock);
1582 mii_ethtool_gset(&np->mii_if, ecmd);
1583 spin_unlock_irq(&np->lock);
1584 return 0;
1585 }
1586
1587 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1588 {
1589 struct netdev_private *np = netdev_priv(dev);
1590 int res;
1591 spin_lock_irq(&np->lock);
1592 res = mii_ethtool_sset(&np->mii_if, ecmd);
1593 spin_unlock_irq(&np->lock);
1594 return res;
1595 }
1596
1597 static int nway_reset(struct net_device *dev)
1598 {
1599 struct netdev_private *np = netdev_priv(dev);
1600 return mii_nway_restart(&np->mii_if);
1601 }
1602
1603 static u32 get_link(struct net_device *dev)
1604 {
1605 struct netdev_private *np = netdev_priv(dev);
1606 return mii_link_ok(&np->mii_if);
1607 }
1608
1609 static u32 get_msglevel(struct net_device *dev)
1610 {
1611 struct netdev_private *np = netdev_priv(dev);
1612 return np->msg_enable;
1613 }
1614
1615 static void set_msglevel(struct net_device *dev, u32 val)
1616 {
1617 struct netdev_private *np = netdev_priv(dev);
1618 np->msg_enable = val;
1619 }
1620
1621 static const struct ethtool_ops ethtool_ops = {
1622 .begin = check_if_running,
1623 .get_drvinfo = get_drvinfo,
1624 .get_settings = get_settings,
1625 .set_settings = set_settings,
1626 .nway_reset = nway_reset,
1627 .get_link = get_link,
1628 .get_msglevel = get_msglevel,
1629 .set_msglevel = set_msglevel,
1630 };
1631
1632 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1633 {
1634 struct netdev_private *np = netdev_priv(dev);
1635 int rc;
1636
1637 if (!netif_running(dev))
1638 return -EINVAL;
1639
1640 spin_lock_irq(&np->lock);
1641 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1642 spin_unlock_irq(&np->lock);
1643
1644 return rc;
1645 }
1646
1647 static int netdev_close(struct net_device *dev)
1648 {
1649 struct netdev_private *np = netdev_priv(dev);
1650 void __iomem *ioaddr = np->base;
1651 struct sk_buff *skb;
1652 int i;
1653
1654 /* Wait and kill tasklet */
1655 tasklet_kill(&np->rx_tasklet);
1656 tasklet_kill(&np->tx_tasklet);
1657 np->cur_tx = 0;
1658 np->dirty_tx = 0;
1659 np->cur_task = 0;
1660 np->last_tx = NULL;
1661
1662 netif_stop_queue(dev);
1663
1664 if (netif_msg_ifdown(np)) {
1665 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1666 "Rx %4.4x Int %2.2x.\n",
1667 dev->name, ioread8(ioaddr + TxStatus),
1668 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1669 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1670 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1671 }
1672
1673 /* Disable interrupts by clearing the interrupt mask. */
1674 iowrite16(0x0000, ioaddr + IntrEnable);
1675
1676 /* Disable Rx and Tx DMA for safely release resource */
1677 iowrite32(0x500, ioaddr + DMACtrl);
1678
1679 /* Stop the chip's Tx and Rx processes. */
1680 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1681
1682 for (i = 2000; i > 0; i--) {
1683 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1684 break;
1685 mdelay(1);
1686 }
1687
1688 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1689 ioaddr +ASICCtrl + 2);
1690
1691 for (i = 2000; i > 0; i--) {
1692 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1693 break;
1694 mdelay(1);
1695 }
1696
1697 #ifdef __i386__
1698 if (netif_msg_hw(np)) {
1699 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1700 (int)(np->tx_ring_dma));
1701 for (i = 0; i < TX_RING_SIZE; i++)
1702 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1703 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1704 np->tx_ring[i].frag[0].length);
1705 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1706 (int)(np->rx_ring_dma));
1707 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1708 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1709 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1710 np->rx_ring[i].frag[0].length);
1711 }
1712 }
1713 #endif /* __i386__ debugging only */
1714
1715 free_irq(dev->irq, dev);
1716
1717 del_timer_sync(&np->timer);
1718
1719 /* Free all the skbuffs in the Rx queue. */
1720 for (i = 0; i < RX_RING_SIZE; i++) {
1721 np->rx_ring[i].status = 0;
1722 skb = np->rx_skbuff[i];
1723 if (skb) {
1724 pci_unmap_single(np->pci_dev,
1725 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1726 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1727 dev_kfree_skb(skb);
1728 np->rx_skbuff[i] = NULL;
1729 }
1730 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1731 }
1732 for (i = 0; i < TX_RING_SIZE; i++) {
1733 np->tx_ring[i].next_desc = 0;
1734 skb = np->tx_skbuff[i];
1735 if (skb) {
1736 pci_unmap_single(np->pci_dev,
1737 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1738 skb->len, PCI_DMA_TODEVICE);
1739 dev_kfree_skb(skb);
1740 np->tx_skbuff[i] = NULL;
1741 }
1742 }
1743
1744 return 0;
1745 }
1746
1747 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1748 {
1749 struct net_device *dev = pci_get_drvdata(pdev);
1750
1751 if (dev) {
1752 struct netdev_private *np = netdev_priv(dev);
1753
1754 unregister_netdev(dev);
1755 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1756 np->rx_ring_dma);
1757 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1758 np->tx_ring_dma);
1759 pci_iounmap(pdev, np->base);
1760 pci_release_regions(pdev);
1761 free_netdev(dev);
1762 pci_set_drvdata(pdev, NULL);
1763 }
1764 }
1765
1766 static struct pci_driver sundance_driver = {
1767 .name = DRV_NAME,
1768 .id_table = sundance_pci_tbl,
1769 .probe = sundance_probe1,
1770 .remove = __devexit_p(sundance_remove1),
1771 };
1772
1773 static int __init sundance_init(void)
1774 {
1775 /* when a module, this is printed whether or not devices are found in probe */
1776 #ifdef MODULE
1777 printk(version);
1778 #endif
1779 return pci_register_driver(&sundance_driver);
1780 }
1781
1782 static void __exit sundance_exit(void)
1783 {
1784 pci_unregister_driver(&sundance_driver);
1785 }
1786
1787 module_init(sundance_init);
1788 module_exit(sundance_exit);
1789
1790
This page took 0.0673 seconds and 6 git commands to generate.