[PATCH] lockdep: fix smc91x
[deliverable/linux.git] / drivers / net / via-rhine.c
CommitLineData
1da177e4
LT
1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
03a8c661 28 [link no longer provides useful info -jgarzik]
1da177e4
LT
29
30*/
31
32#define DRV_NAME "via-rhine"
633949a1
RL
33#define DRV_VERSION "1.4.1"
34#define DRV_RELDATE "July-24-2006"
1da177e4
LT
35
36
37/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45static int rx_copybreak;
46
47/*
48 * In case you are looking for 'options[]' or 'full_duplex[]', they
49 * are gone. Use ethtool(8) instead.
50 */
51
52/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
53 The Rhine has a 64 element 8390-like hash table. */
54static const int multicast_filter_limit = 32;
55
56
57/* Operational parameters that are set at compile time. */
58
59/* Keep the ring sizes a power of two for compile efficiency.
60 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
61 Making the Tx ring too large decreases the effectiveness of channel
62 bonding and packet priority.
63 There are no ill effects from too-large receive rings. */
64#define TX_RING_SIZE 16
65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
633949a1
RL
66#ifdef CONFIG_VIA_RHINE_NAPI
67#define RX_RING_SIZE 64
68#else
1da177e4 69#define RX_RING_SIZE 16
633949a1 70#endif
1da177e4
LT
71
72
73/* Operational parameters that usually are not changed. */
74
75/* Time in jiffies before concluding the transmitter is hung. */
76#define TX_TIMEOUT (2*HZ)
77
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80#include <linux/module.h>
81#include <linux/moduleparam.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/pci.h>
1e7f0bd8 90#include <linux/dma-mapping.h>
1da177e4
LT
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
93#include <linux/skbuff.h>
94#include <linux/init.h>
95#include <linux/delay.h>
96#include <linux/mii.h>
97#include <linux/ethtool.h>
98#include <linux/crc32.h>
99#include <linux/bitops.h>
100#include <asm/processor.h> /* Processor type for cache alignment. */
101#include <asm/io.h>
102#include <asm/irq.h>
103#include <asm/uaccess.h>
104
105/* These identify the driver base version and may not be removed. */
106static char version[] __devinitdata =
107KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
108
109/* This driver was written to use PCI memory space. Some early versions
110 of the Rhine may only work correctly with I/O space accesses. */
111#ifdef CONFIG_VIA_RHINE_MMIO
112#define USE_MMIO
113#else
114#endif
115
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
118MODULE_LICENSE("GPL");
119
120module_param(max_interrupt_work, int, 0);
121module_param(debug, int, 0);
122module_param(rx_copybreak, int, 0);
123MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
124MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
125MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
126
127/*
128 Theory of Operation
129
130I. Board Compatibility
131
132This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
133controller.
134
135II. Board-specific settings
136
137Boards with this chip are functional only in a bus-master PCI slot.
138
139Many operational settings are loaded from the EEPROM to the Config word at
140offset 0x78. For most of these settings, this driver assumes that they are
141correct.
142If this driver is compiled to use PCI memory space operations the EEPROM
143must be configured to enable memory ops.
144
145III. Driver operation
146
147IIIa. Ring buffers
148
149This driver uses two statically allocated fixed-size descriptor lists
150formed into rings by a branch from the final descriptor to the beginning of
151the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
152
153IIIb/c. Transmit/Receive Structure
154
155This driver attempts to use a zero-copy receive and transmit scheme.
156
157Alas, all data buffers are required to start on a 32 bit boundary, so
158the driver must often copy transmit packets into bounce buffers.
159
160The driver allocates full frame size skbuffs for the Rx ring buffers at
161open() time and passes the skb->data field to the chip as receive data
162buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
163a fresh skbuff is allocated and the frame is copied to the new skbuff.
164When the incoming frame is larger, the skbuff is passed directly up the
165protocol stack. Buffers consumed this way are replaced by newly allocated
166skbuffs in the last phase of rhine_rx().
167
168The RX_COPYBREAK value is chosen to trade-off the memory wasted by
169using a full-sized skbuff for small frames vs. the copying costs of larger
170frames. New boards are typically used in generously configured machines
171and the underfilled buffers have negligible impact compared to the benefit of
172a single allocation size, so the default value of zero results in never
173copying packets. When copying is done, the cost is usually mitigated by using
174a combined copy/checksum routine. Copying also preloads the cache, which is
175most useful with small frames.
176
177Since the VIA chips are only able to transfer data to buffers on 32 bit
178boundaries, the IP header at offset 14 in an ethernet frame isn't
179longword aligned for further processing. Copying these unaligned buffers
180has the beneficial effect of 16-byte aligning the IP header.
181
182IIId. Synchronization
183
184The driver runs as two independent, single-threaded flows of control. One
185is the send-packet routine, which enforces single-threaded use by the
186dev->priv->lock spinlock. The other thread is the interrupt handler, which
187is single threaded by the hardware and interrupt handling software.
188
189The send packet thread has partial control over the Tx ring. It locks the
190dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
191is not available it stops the transmit queue by calling netif_stop_queue.
192
193The interrupt handler has exclusive control over the Rx ring and records stats
194from the Tx ring. After reaping the stats, it marks the Tx queue entry as
195empty by incrementing the dirty_tx mark. If at least half of the entries in
196the Rx ring are available the transmit queue is woken up if it was stopped.
197
198IV. Notes
199
200IVb. References
201
202Preliminary VT86C100A manual from http://www.via.com.tw/
203http://www.scyld.com/expert/100mbps.html
204http://www.scyld.com/expert/NWay.html
205ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
206ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
207
208
209IVc. Errata
210
211The VT86C100A manual is not reliable information.
212The 3043 chip does not handle unaligned transmit or receive buffers, resulting
213in significant performance degradation for bounce buffer copies on transmit
214and unaligned IP headers on receive.
215The chip does not pad to minimum transmit length.
216
217*/
218
219
220/* This table drives the PCI probe routines. It's mostly boilerplate in all
221 of the drivers, and will likely be provided by some future kernel.
222 Note the matching code -- the first table entry matchs all 56** cards but
223 second only the 1234 card.
224*/
225
226enum rhine_revs {
227 VT86C100A = 0x00,
228 VTunknown0 = 0x20,
229 VT6102 = 0x40,
230 VT8231 = 0x50, /* Integrated MAC */
231 VT8233 = 0x60, /* Integrated MAC */
232 VT8235 = 0x74, /* Integrated MAC */
233 VT8237 = 0x78, /* Integrated MAC */
234 VTunknown1 = 0x7C,
235 VT6105 = 0x80,
236 VT6105_B0 = 0x83,
237 VT6105L = 0x8A,
238 VT6107 = 0x8C,
239 VTunknown2 = 0x8E,
240 VT6105M = 0x90, /* Management adapter */
241};
242
243enum rhine_quirks {
244 rqWOL = 0x0001, /* Wake-On-LAN support */
245 rqForceReset = 0x0002,
246 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
247 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
248 rqRhineI = 0x0100, /* See comment below */
249};
250/*
251 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
252 * MMIO as well as for the collision counter and the Tx FIFO underflow
253 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
254 */
255
256/* Beware of PCI posted writes */
257#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
258
46009c8b
JG
259static const struct pci_device_id rhine_pci_tbl[] = {
260 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
261 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
262 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
263 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
1da177e4
LT
264 { } /* terminate list */
265};
266MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
267
268
269/* Offsets to the device registers. */
270enum register_offsets {
271 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
272 ChipCmd1=0x09,
273 IntrStatus=0x0C, IntrEnable=0x0E,
274 MulticastFilter0=0x10, MulticastFilter1=0x14,
275 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
276 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
277 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
278 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
279 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
280 StickyHW=0x83, IntrStatus2=0x84,
281 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
282 WOLcrClr1=0xA6, WOLcgClr=0xA7,
283 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
284};
285
286/* Bits in ConfigD */
287enum backoff_bits {
288 BackOptional=0x01, BackModify=0x02,
289 BackCaptureEffect=0x04, BackRandom=0x08
290};
291
292#ifdef USE_MMIO
293/* Registers we check that mmio and reg are the same. */
294static const int mmio_verify_registers[] = {
295 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
296 0
297};
298#endif
299
300/* Bits in the interrupt status/mask registers. */
301enum intr_status_bits {
302 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
303 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
304 IntrPCIErr=0x0040,
305 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
306 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
307 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
308 IntrRxWakeUp=0x8000,
309 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
310 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
311 IntrTxErrSummary=0x082218,
312};
313
314/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
315enum wol_bits {
316 WOLucast = 0x10,
317 WOLmagic = 0x20,
318 WOLbmcast = 0x30,
319 WOLlnkon = 0x40,
320 WOLlnkoff = 0x80,
321};
322
323/* The Rx and Tx buffer descriptors. */
324struct rx_desc {
325 s32 rx_status;
326 u32 desc_length; /* Chain flag, Buffer/frame length */
327 u32 addr;
328 u32 next_desc;
329};
330struct tx_desc {
331 s32 tx_status;
332 u32 desc_length; /* Chain flag, Tx Config, Frame length */
333 u32 addr;
334 u32 next_desc;
335};
336
337/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
338#define TXDESC 0x00e08000
339
340enum rx_status_bits {
341 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
342};
343
344/* Bits in *_desc.*_status */
345enum desc_status_bits {
346 DescOwn=0x80000000
347};
348
349/* Bits in ChipCmd. */
350enum chip_cmd_bits {
351 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
352 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
353 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
354 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
355};
356
357struct rhine_private {
358 /* Descriptor rings */
359 struct rx_desc *rx_ring;
360 struct tx_desc *tx_ring;
361 dma_addr_t rx_ring_dma;
362 dma_addr_t tx_ring_dma;
363
364 /* The addresses of receive-in-place skbuffs. */
365 struct sk_buff *rx_skbuff[RX_RING_SIZE];
366 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
367
368 /* The saved address of a sent-in-place packet/buffer, for later free(). */
369 struct sk_buff *tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
371
4be5de25 372 /* Tx bounce buffers (Rhine-I only) */
1da177e4
LT
373 unsigned char *tx_buf[TX_RING_SIZE];
374 unsigned char *tx_bufs;
375 dma_addr_t tx_bufs_dma;
376
377 struct pci_dev *pdev;
378 long pioaddr;
379 struct net_device_stats stats;
380 spinlock_t lock;
381
382 /* Frequently used values: keep some adjacent for cache effect. */
383 u32 quirks;
384 struct rx_desc *rx_head_desc;
385 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
386 unsigned int cur_tx, dirty_tx;
387 unsigned int rx_buf_sz; /* Based on MTU+slack. */
388 u8 wolopts;
389
390 u8 tx_thresh, rx_thresh;
391
392 struct mii_if_info mii_if;
393 void __iomem *base;
394};
395
396static int mdio_read(struct net_device *dev, int phy_id, int location);
397static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
398static int rhine_open(struct net_device *dev);
399static void rhine_tx_timeout(struct net_device *dev);
400static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
401static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
402static void rhine_tx(struct net_device *dev);
633949a1 403static int rhine_rx(struct net_device *dev, int limit);
1da177e4
LT
404static void rhine_error(struct net_device *dev, int intr_status);
405static void rhine_set_rx_mode(struct net_device *dev);
406static struct net_device_stats *rhine_get_stats(struct net_device *dev);
407static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
408static struct ethtool_ops netdev_ethtool_ops;
409static int rhine_close(struct net_device *dev);
d18c3db5 410static void rhine_shutdown (struct pci_dev *pdev);
1da177e4
LT
411
412#define RHINE_WAIT_FOR(condition) do { \
413 int i=1024; \
414 while (!(condition) && --i) \
415 ; \
416 if (debug > 1 && i < 512) \
417 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
418 DRV_NAME, 1024-i, __func__, __LINE__); \
419} while(0)
420
421static inline u32 get_intr_status(struct net_device *dev)
422{
423 struct rhine_private *rp = netdev_priv(dev);
424 void __iomem *ioaddr = rp->base;
425 u32 intr_status;
426
427 intr_status = ioread16(ioaddr + IntrStatus);
428 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
429 if (rp->quirks & rqStatusWBRace)
430 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
431 return intr_status;
432}
433
434/*
435 * Get power related registers into sane state.
436 * Notify user about past WOL event.
437 */
438static void rhine_power_init(struct net_device *dev)
439{
440 struct rhine_private *rp = netdev_priv(dev);
441 void __iomem *ioaddr = rp->base;
442 u16 wolstat;
443
444 if (rp->quirks & rqWOL) {
445 /* Make sure chip is in power state D0 */
446 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
447
448 /* Disable "force PME-enable" */
449 iowrite8(0x80, ioaddr + WOLcgClr);
450
451 /* Clear power-event config bits (WOL) */
452 iowrite8(0xFF, ioaddr + WOLcrClr);
453 /* More recent cards can manage two additional patterns */
454 if (rp->quirks & rq6patterns)
455 iowrite8(0x03, ioaddr + WOLcrClr1);
456
457 /* Save power-event status bits */
458 wolstat = ioread8(ioaddr + PwrcsrSet);
459 if (rp->quirks & rq6patterns)
460 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
461
462 /* Clear power-event status bits */
463 iowrite8(0xFF, ioaddr + PwrcsrClr);
464 if (rp->quirks & rq6patterns)
465 iowrite8(0x03, ioaddr + PwrcsrClr1);
466
467 if (wolstat) {
468 char *reason;
469 switch (wolstat) {
470 case WOLmagic:
471 reason = "Magic packet";
472 break;
473 case WOLlnkon:
474 reason = "Link went up";
475 break;
476 case WOLlnkoff:
477 reason = "Link went down";
478 break;
479 case WOLucast:
480 reason = "Unicast packet";
481 break;
482 case WOLbmcast:
483 reason = "Multicast/broadcast packet";
484 break;
485 default:
486 reason = "Unknown";
487 }
488 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
489 DRV_NAME, reason);
490 }
491 }
492}
493
494static void rhine_chip_reset(struct net_device *dev)
495{
496 struct rhine_private *rp = netdev_priv(dev);
497 void __iomem *ioaddr = rp->base;
498
499 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
500 IOSYNC;
501
502 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
503 printk(KERN_INFO "%s: Reset not complete yet. "
504 "Trying harder.\n", DRV_NAME);
505
506 /* Force reset */
507 if (rp->quirks & rqForceReset)
508 iowrite8(0x40, ioaddr + MiscCmd);
509
510 /* Reset can take somewhat longer (rare) */
511 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
512 }
513
514 if (debug > 1)
515 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
516 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
517 "failed" : "succeeded");
518}
519
520#ifdef USE_MMIO
521static void enable_mmio(long pioaddr, u32 quirks)
522{
523 int n;
524 if (quirks & rqRhineI) {
525 /* More recent docs say that this bit is reserved ... */
526 n = inb(pioaddr + ConfigA) | 0x20;
527 outb(n, pioaddr + ConfigA);
528 } else {
529 n = inb(pioaddr + ConfigD) | 0x80;
530 outb(n, pioaddr + ConfigD);
531 }
532}
533#endif
534
535/*
536 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
537 * (plus 0x6C for Rhine-I/II)
538 */
539static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
540{
541 struct rhine_private *rp = netdev_priv(dev);
542 void __iomem *ioaddr = rp->base;
543
544 outb(0x20, pioaddr + MACRegEEcsr);
545 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
546
547#ifdef USE_MMIO
548 /*
549 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
550 * MMIO. If reloading EEPROM was done first this could be avoided, but
551 * it is not known if that still works with the "win98-reboot" problem.
552 */
553 enable_mmio(pioaddr, rp->quirks);
554#endif
555
556 /* Turn off EEPROM-controlled wake-up (magic packet) */
557 if (rp->quirks & rqWOL)
558 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
559
560}
561
562#ifdef CONFIG_NET_POLL_CONTROLLER
563static void rhine_poll(struct net_device *dev)
564{
565 disable_irq(dev->irq);
566 rhine_interrupt(dev->irq, (void *)dev, NULL);
567 enable_irq(dev->irq);
568}
569#endif
570
633949a1
RL
571#ifdef CONFIG_VIA_RHINE_NAPI
572static int rhine_napipoll(struct net_device *dev, int *budget)
573{
574 struct rhine_private *rp = netdev_priv(dev);
575 void __iomem *ioaddr = rp->base;
576 int done, limit = min(dev->quota, *budget);
577
578 done = rhine_rx(dev, limit);
579 *budget -= done;
580 dev->quota -= done;
581
582 if (done < limit) {
583 netif_rx_complete(dev);
584
585 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
586 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
587 IntrTxDone | IntrTxError | IntrTxUnderrun |
588 IntrPCIErr | IntrStatsMax | IntrLinkChange,
589 ioaddr + IntrEnable);
590 return 0;
591 }
592 else
593 return 1;
594}
595#endif
596
1da177e4
LT
597static void rhine_hw_init(struct net_device *dev, long pioaddr)
598{
599 struct rhine_private *rp = netdev_priv(dev);
600
601 /* Reset the chip to erase previous misconfiguration. */
602 rhine_chip_reset(dev);
603
604 /* Rhine-I needs extra time to recuperate before EEPROM reload */
605 if (rp->quirks & rqRhineI)
606 msleep(5);
607
608 /* Reload EEPROM controlled bytes cleared by soft reset */
609 rhine_reload_eeprom(pioaddr, dev);
610}
611
612static int __devinit rhine_init_one(struct pci_dev *pdev,
613 const struct pci_device_id *ent)
614{
615 struct net_device *dev;
616 struct rhine_private *rp;
617 int i, rc;
618 u8 pci_rev;
619 u32 quirks;
620 long pioaddr;
621 long memaddr;
622 void __iomem *ioaddr;
623 int io_size, phy_id;
624 const char *name;
625#ifdef USE_MMIO
626 int bar = 1;
627#else
628 int bar = 0;
629#endif
630
631/* when built into the kernel, we only print version if device is found */
632#ifndef MODULE
633 static int printed_version;
634 if (!printed_version++)
635 printk(version);
636#endif
637
638 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
639
640 io_size = 256;
641 phy_id = 0;
642 quirks = 0;
643 name = "Rhine";
644 if (pci_rev < VTunknown0) {
645 quirks = rqRhineI;
646 io_size = 128;
647 }
648 else if (pci_rev >= VT6102) {
649 quirks = rqWOL | rqForceReset;
650 if (pci_rev < VT6105) {
651 name = "Rhine II";
652 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
653 }
654 else {
655 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
656 if (pci_rev >= VT6105_B0)
657 quirks |= rq6patterns;
658 if (pci_rev < VT6105M)
659 name = "Rhine III";
660 else
661 name = "Rhine III (Management Adapter)";
662 }
663 }
664
665 rc = pci_enable_device(pdev);
666 if (rc)
667 goto err_out;
668
669 /* this should always be supported */
1e7f0bd8 670 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4
LT
671 if (rc) {
672 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
673 "the card!?\n");
674 goto err_out;
675 }
676
677 /* sanity check */
678 if ((pci_resource_len(pdev, 0) < io_size) ||
679 (pci_resource_len(pdev, 1) < io_size)) {
680 rc = -EIO;
681 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
682 goto err_out;
683 }
684
685 pioaddr = pci_resource_start(pdev, 0);
686 memaddr = pci_resource_start(pdev, 1);
687
688 pci_set_master(pdev);
689
690 dev = alloc_etherdev(sizeof(struct rhine_private));
691 if (!dev) {
692 rc = -ENOMEM;
693 printk(KERN_ERR "alloc_etherdev failed\n");
694 goto err_out;
695 }
696 SET_MODULE_OWNER(dev);
697 SET_NETDEV_DEV(dev, &pdev->dev);
698
699 rp = netdev_priv(dev);
700 rp->quirks = quirks;
701 rp->pioaddr = pioaddr;
702 rp->pdev = pdev;
703
704 rc = pci_request_regions(pdev, DRV_NAME);
705 if (rc)
706 goto err_out_free_netdev;
707
708 ioaddr = pci_iomap(pdev, bar, io_size);
709 if (!ioaddr) {
710 rc = -EIO;
711 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
712 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
713 goto err_out_free_res;
714 }
715
716#ifdef USE_MMIO
717 enable_mmio(pioaddr, quirks);
718
719 /* Check that selected MMIO registers match the PIO ones */
720 i = 0;
721 while (mmio_verify_registers[i]) {
722 int reg = mmio_verify_registers[i++];
723 unsigned char a = inb(pioaddr+reg);
724 unsigned char b = readb(ioaddr+reg);
725 if (a != b) {
726 rc = -EIO;
727 printk(KERN_ERR "MMIO do not match PIO [%02x] "
728 "(%02x != %02x)\n", reg, a, b);
729 goto err_out_unmap;
730 }
731 }
732#endif /* USE_MMIO */
733
734 dev->base_addr = (unsigned long)ioaddr;
735 rp->base = ioaddr;
736
737 /* Get chip registers into a sane state */
738 rhine_power_init(dev);
739 rhine_hw_init(dev, pioaddr);
740
741 for (i = 0; i < 6; i++)
742 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
b81e8e1f 743 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4 744
b81e8e1f 745 if (!is_valid_ether_addr(dev->perm_addr)) {
1da177e4
LT
746 rc = -EIO;
747 printk(KERN_ERR "Invalid MAC address\n");
748 goto err_out_unmap;
749 }
750
751 /* For Rhine-I/II, phy_id is loaded from EEPROM */
752 if (!phy_id)
753 phy_id = ioread8(ioaddr + 0x6C);
754
755 dev->irq = pdev->irq;
756
757 spin_lock_init(&rp->lock);
758 rp->mii_if.dev = dev;
759 rp->mii_if.mdio_read = mdio_read;
760 rp->mii_if.mdio_write = mdio_write;
761 rp->mii_if.phy_id_mask = 0x1f;
762 rp->mii_if.reg_num_mask = 0x1f;
763
764 /* The chip-specific entries in the device structure. */
765 dev->open = rhine_open;
766 dev->hard_start_xmit = rhine_start_tx;
767 dev->stop = rhine_close;
768 dev->get_stats = rhine_get_stats;
769 dev->set_multicast_list = rhine_set_rx_mode;
770 dev->do_ioctl = netdev_ioctl;
771 dev->ethtool_ops = &netdev_ethtool_ops;
772 dev->tx_timeout = rhine_tx_timeout;
773 dev->watchdog_timeo = TX_TIMEOUT;
774#ifdef CONFIG_NET_POLL_CONTROLLER
775 dev->poll_controller = rhine_poll;
633949a1
RL
776#endif
777#ifdef CONFIG_VIA_RHINE_NAPI
778 dev->poll = rhine_napipoll;
779 dev->weight = 64;
1da177e4
LT
780#endif
781 if (rp->quirks & rqRhineI)
782 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
783
784 /* dev->name not defined before register_netdev()! */
785 rc = register_netdev(dev);
786 if (rc)
787 goto err_out_unmap;
788
789 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
790 dev->name, name,
791#ifdef USE_MMIO
792 memaddr
793#else
794 (long)ioaddr
795#endif
796 );
797
798 for (i = 0; i < 5; i++)
799 printk("%2.2x:", dev->dev_addr[i]);
800 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
801
802 pci_set_drvdata(pdev, dev);
803
804 {
805 u16 mii_cmd;
806 int mii_status = mdio_read(dev, phy_id, 1);
807 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
808 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
809 if (mii_status != 0xffff && mii_status != 0x0000) {
810 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
811 printk(KERN_INFO "%s: MII PHY found at address "
812 "%d, status 0x%4.4x advertising %4.4x "
813 "Link %4.4x.\n", dev->name, phy_id,
814 mii_status, rp->mii_if.advertising,
815 mdio_read(dev, phy_id, 5));
816
817 /* set IFF_RUNNING */
818 if (mii_status & BMSR_LSTATUS)
819 netif_carrier_on(dev);
820 else
821 netif_carrier_off(dev);
822
823 }
824 }
825 rp->mii_if.phy_id = phy_id;
826
827 return 0;
828
829err_out_unmap:
830 pci_iounmap(pdev, ioaddr);
831err_out_free_res:
832 pci_release_regions(pdev);
833err_out_free_netdev:
834 free_netdev(dev);
835err_out:
836 return rc;
837}
838
839static int alloc_ring(struct net_device* dev)
840{
841 struct rhine_private *rp = netdev_priv(dev);
842 void *ring;
843 dma_addr_t ring_dma;
844
845 ring = pci_alloc_consistent(rp->pdev,
846 RX_RING_SIZE * sizeof(struct rx_desc) +
847 TX_RING_SIZE * sizeof(struct tx_desc),
848 &ring_dma);
849 if (!ring) {
850 printk(KERN_ERR "Could not allocate DMA memory.\n");
851 return -ENOMEM;
852 }
853 if (rp->quirks & rqRhineI) {
854 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
855 PKT_BUF_SZ * TX_RING_SIZE,
856 &rp->tx_bufs_dma);
857 if (rp->tx_bufs == NULL) {
858 pci_free_consistent(rp->pdev,
859 RX_RING_SIZE * sizeof(struct rx_desc) +
860 TX_RING_SIZE * sizeof(struct tx_desc),
861 ring, ring_dma);
862 return -ENOMEM;
863 }
864 }
865
866 rp->rx_ring = ring;
867 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
868 rp->rx_ring_dma = ring_dma;
869 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
870
871 return 0;
872}
873
874static void free_ring(struct net_device* dev)
875{
876 struct rhine_private *rp = netdev_priv(dev);
877
878 pci_free_consistent(rp->pdev,
879 RX_RING_SIZE * sizeof(struct rx_desc) +
880 TX_RING_SIZE * sizeof(struct tx_desc),
881 rp->rx_ring, rp->rx_ring_dma);
882 rp->tx_ring = NULL;
883
884 if (rp->tx_bufs)
885 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
886 rp->tx_bufs, rp->tx_bufs_dma);
887
888 rp->tx_bufs = NULL;
889
890}
891
892static void alloc_rbufs(struct net_device *dev)
893{
894 struct rhine_private *rp = netdev_priv(dev);
895 dma_addr_t next;
896 int i;
897
898 rp->dirty_rx = rp->cur_rx = 0;
899
900 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
901 rp->rx_head_desc = &rp->rx_ring[0];
902 next = rp->rx_ring_dma;
903
904 /* Init the ring entries */
905 for (i = 0; i < RX_RING_SIZE; i++) {
906 rp->rx_ring[i].rx_status = 0;
907 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
908 next += sizeof(struct rx_desc);
909 rp->rx_ring[i].next_desc = cpu_to_le32(next);
910 rp->rx_skbuff[i] = NULL;
911 }
912 /* Mark the last entry as wrapping the ring. */
913 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
914
915 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
916 for (i = 0; i < RX_RING_SIZE; i++) {
917 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
918 rp->rx_skbuff[i] = skb;
919 if (skb == NULL)
920 break;
921 skb->dev = dev; /* Mark as being used by this device. */
922
923 rp->rx_skbuff_dma[i] =
689be439 924 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1da177e4
LT
925 PCI_DMA_FROMDEVICE);
926
927 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
928 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
929 }
930 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
931}
932
933static void free_rbufs(struct net_device* dev)
934{
935 struct rhine_private *rp = netdev_priv(dev);
936 int i;
937
938 /* Free all the skbuffs in the Rx queue. */
939 for (i = 0; i < RX_RING_SIZE; i++) {
940 rp->rx_ring[i].rx_status = 0;
941 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
942 if (rp->rx_skbuff[i]) {
943 pci_unmap_single(rp->pdev,
944 rp->rx_skbuff_dma[i],
945 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
946 dev_kfree_skb(rp->rx_skbuff[i]);
947 }
948 rp->rx_skbuff[i] = NULL;
949 }
950}
951
952static void alloc_tbufs(struct net_device* dev)
953{
954 struct rhine_private *rp = netdev_priv(dev);
955 dma_addr_t next;
956 int i;
957
958 rp->dirty_tx = rp->cur_tx = 0;
959 next = rp->tx_ring_dma;
960 for (i = 0; i < TX_RING_SIZE; i++) {
961 rp->tx_skbuff[i] = NULL;
962 rp->tx_ring[i].tx_status = 0;
963 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
964 next += sizeof(struct tx_desc);
965 rp->tx_ring[i].next_desc = cpu_to_le32(next);
4be5de25
RL
966 if (rp->quirks & rqRhineI)
967 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1da177e4
LT
968 }
969 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
970
971}
972
973static void free_tbufs(struct net_device* dev)
974{
975 struct rhine_private *rp = netdev_priv(dev);
976 int i;
977
978 for (i = 0; i < TX_RING_SIZE; i++) {
979 rp->tx_ring[i].tx_status = 0;
980 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
981 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
982 if (rp->tx_skbuff[i]) {
983 if (rp->tx_skbuff_dma[i]) {
984 pci_unmap_single(rp->pdev,
985 rp->tx_skbuff_dma[i],
986 rp->tx_skbuff[i]->len,
987 PCI_DMA_TODEVICE);
988 }
989 dev_kfree_skb(rp->tx_skbuff[i]);
990 }
991 rp->tx_skbuff[i] = NULL;
992 rp->tx_buf[i] = NULL;
993 }
994}
995
996static void rhine_check_media(struct net_device *dev, unsigned int init_media)
997{
998 struct rhine_private *rp = netdev_priv(dev);
999 void __iomem *ioaddr = rp->base;
1000
1001 mii_check_media(&rp->mii_if, debug, init_media);
1002
1003 if (rp->mii_if.full_duplex)
1004 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1005 ioaddr + ChipCmd1);
1006 else
1007 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1008 ioaddr + ChipCmd1);
00b428c2
RL
1009 if (debug > 1)
1010 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1011 rp->mii_if.force_media, netif_carrier_ok(dev));
1012}
1013
1014/* Called after status of force_media possibly changed */
0761be4f 1015static void rhine_set_carrier(struct mii_if_info *mii)
00b428c2
RL
1016{
1017 if (mii->force_media) {
1018 /* autoneg is off: Link is always assumed to be up */
1019 if (!netif_carrier_ok(mii->dev))
1020 netif_carrier_on(mii->dev);
1021 }
1022 else /* Let MMI library update carrier status */
1023 rhine_check_media(mii->dev, 0);
1024 if (debug > 1)
1025 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1026 mii->dev->name, mii->force_media,
1027 netif_carrier_ok(mii->dev));
1da177e4
LT
1028}
1029
1030static void init_registers(struct net_device *dev)
1031{
1032 struct rhine_private *rp = netdev_priv(dev);
1033 void __iomem *ioaddr = rp->base;
1034 int i;
1035
1036 for (i = 0; i < 6; i++)
1037 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1038
1039 /* Initialize other registers. */
1040 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1041 /* Configure initial FIFO thresholds. */
1042 iowrite8(0x20, ioaddr + TxConfig);
1043 rp->tx_thresh = 0x20;
1044 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1045
1046 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1047 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1048
1049 rhine_set_rx_mode(dev);
1050
ab197668
SH
1051 netif_poll_enable(dev);
1052
1da177e4
LT
1053 /* Enable interrupts by setting the interrupt mask. */
1054 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1055 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1056 IntrTxDone | IntrTxError | IntrTxUnderrun |
1057 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1058 ioaddr + IntrEnable);
1059
1060 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1061 ioaddr + ChipCmd);
1062 rhine_check_media(dev, 1);
1063}
1064
1065/* Enable MII link status auto-polling (required for IntrLinkChange) */
1066static void rhine_enable_linkmon(void __iomem *ioaddr)
1067{
1068 iowrite8(0, ioaddr + MIICmd);
1069 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1070 iowrite8(0x80, ioaddr + MIICmd);
1071
1072 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1073
1074 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1075}
1076
1077/* Disable MII link status auto-polling (required for MDIO access) */
1078static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1079{
1080 iowrite8(0, ioaddr + MIICmd);
1081
1082 if (quirks & rqRhineI) {
1083 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1084
38bb6b28
JL
1085 /* Can be called from ISR. Evil. */
1086 mdelay(1);
1da177e4
LT
1087
1088 /* 0x80 must be set immediately before turning it off */
1089 iowrite8(0x80, ioaddr + MIICmd);
1090
1091 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1092
1093 /* Heh. Now clear 0x80 again. */
1094 iowrite8(0, ioaddr + MIICmd);
1095 }
1096 else
1097 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1098}
1099
1100/* Read and write over the MII Management Data I/O (MDIO) interface. */
1101
1102static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1103{
1104 struct rhine_private *rp = netdev_priv(dev);
1105 void __iomem *ioaddr = rp->base;
1106 int result;
1107
1108 rhine_disable_linkmon(ioaddr, rp->quirks);
1109
1110 /* rhine_disable_linkmon already cleared MIICmd */
1111 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1112 iowrite8(regnum, ioaddr + MIIRegAddr);
1113 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1114 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1115 result = ioread16(ioaddr + MIIData);
1116
1117 rhine_enable_linkmon(ioaddr);
1118 return result;
1119}
1120
1121static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1122{
1123 struct rhine_private *rp = netdev_priv(dev);
1124 void __iomem *ioaddr = rp->base;
1125
1126 rhine_disable_linkmon(ioaddr, rp->quirks);
1127
1128 /* rhine_disable_linkmon already cleared MIICmd */
1129 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1130 iowrite8(regnum, ioaddr + MIIRegAddr);
1131 iowrite16(value, ioaddr + MIIData);
1132 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1133 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1134
1135 rhine_enable_linkmon(ioaddr);
1136}
1137
1138static int rhine_open(struct net_device *dev)
1139{
1140 struct rhine_private *rp = netdev_priv(dev);
1141 void __iomem *ioaddr = rp->base;
1142 int rc;
1143
1fb9df5d 1144 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1da177e4
LT
1145 dev);
1146 if (rc)
1147 return rc;
1148
1149 if (debug > 1)
1150 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1151 dev->name, rp->pdev->irq);
1152
1153 rc = alloc_ring(dev);
1154 if (rc) {
1155 free_irq(rp->pdev->irq, dev);
1156 return rc;
1157 }
1158 alloc_rbufs(dev);
1159 alloc_tbufs(dev);
1160 rhine_chip_reset(dev);
1161 init_registers(dev);
1162 if (debug > 2)
1163 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1164 "MII status: %4.4x.\n",
1165 dev->name, ioread16(ioaddr + ChipCmd),
1166 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1167
1168 netif_start_queue(dev);
1169
1170 return 0;
1171}
1172
1173static void rhine_tx_timeout(struct net_device *dev)
1174{
1175 struct rhine_private *rp = netdev_priv(dev);
1176 void __iomem *ioaddr = rp->base;
1177
1178 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1179 "%4.4x, resetting...\n",
1180 dev->name, ioread16(ioaddr + IntrStatus),
1181 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1182
1183 /* protect against concurrent rx interrupts */
1184 disable_irq(rp->pdev->irq);
1185
1186 spin_lock(&rp->lock);
1187
1188 /* clear all descriptors */
1189 free_tbufs(dev);
1190 free_rbufs(dev);
1191 alloc_tbufs(dev);
1192 alloc_rbufs(dev);
1193
1194 /* Reinitialize the hardware. */
1195 rhine_chip_reset(dev);
1196 init_registers(dev);
1197
1198 spin_unlock(&rp->lock);
1199 enable_irq(rp->pdev->irq);
1200
1201 dev->trans_start = jiffies;
1202 rp->stats.tx_errors++;
1203 netif_wake_queue(dev);
1204}
1205
1206static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1207{
1208 struct rhine_private *rp = netdev_priv(dev);
1209 void __iomem *ioaddr = rp->base;
1210 unsigned entry;
1211
1212 /* Caution: the write order is important here, set the field
1213 with the "ownership" bits last. */
1214
1215 /* Calculate the next Tx descriptor entry. */
1216 entry = rp->cur_tx % TX_RING_SIZE;
1217
5b057c6b
HX
1218 if (skb_padto(skb, ETH_ZLEN))
1219 return 0;
1da177e4
LT
1220
1221 rp->tx_skbuff[entry] = skb;
1222
1223 if ((rp->quirks & rqRhineI) &&
1224 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1225 /* Must use alignment buffer. */
1226 if (skb->len > PKT_BUF_SZ) {
1227 /* packet too long, drop it */
1228 dev_kfree_skb(skb);
1229 rp->tx_skbuff[entry] = NULL;
1230 rp->stats.tx_dropped++;
1231 return 0;
1232 }
3e0d167a
CB
1233
1234 /* Padding is not copied and so must be redone. */
1da177e4 1235 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
3e0d167a
CB
1236 if (skb->len < ETH_ZLEN)
1237 memset(rp->tx_buf[entry] + skb->len, 0,
1238 ETH_ZLEN - skb->len);
1da177e4
LT
1239 rp->tx_skbuff_dma[entry] = 0;
1240 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1241 (rp->tx_buf[entry] -
1242 rp->tx_bufs));
1243 } else {
1244 rp->tx_skbuff_dma[entry] =
1245 pci_map_single(rp->pdev, skb->data, skb->len,
1246 PCI_DMA_TODEVICE);
1247 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1248 }
1249
1250 rp->tx_ring[entry].desc_length =
1251 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1252
1253 /* lock eth irq */
1254 spin_lock_irq(&rp->lock);
1255 wmb();
1256 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1257 wmb();
1258
1259 rp->cur_tx++;
1260
1261 /* Non-x86 Todo: explicitly flush cache lines here. */
1262
1263 /* Wake the potentially-idle transmit channel */
1264 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1265 ioaddr + ChipCmd1);
1266 IOSYNC;
1267
1268 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1269 netif_stop_queue(dev);
1270
1271 dev->trans_start = jiffies;
1272
1273 spin_unlock_irq(&rp->lock);
1274
1275 if (debug > 4) {
1276 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1277 dev->name, rp->cur_tx-1, entry);
1278 }
1279 return 0;
1280}
1281
1282/* The interrupt handler does all of the Rx thread work and cleans up
1283 after the Tx thread. */
1284static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1285{
1286 struct net_device *dev = dev_instance;
1287 struct rhine_private *rp = netdev_priv(dev);
1288 void __iomem *ioaddr = rp->base;
1289 u32 intr_status;
1290 int boguscnt = max_interrupt_work;
1291 int handled = 0;
1292
1293 while ((intr_status = get_intr_status(dev))) {
1294 handled = 1;
1295
1296 /* Acknowledge all of the current interrupt sources ASAP. */
1297 if (intr_status & IntrTxDescRace)
1298 iowrite8(0x08, ioaddr + IntrStatus2);
1299 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1300 IOSYNC;
1301
1302 if (debug > 4)
1303 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1304 dev->name, intr_status);
1305
1306 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
633949a1
RL
1307 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1308#ifdef CONFIG_VIA_RHINE_NAPI
1309 iowrite16(IntrTxAborted |
1310 IntrTxDone | IntrTxError | IntrTxUnderrun |
1311 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1312 ioaddr + IntrEnable);
1313
1314 netif_rx_schedule(dev);
1315#else
1316 rhine_rx(dev, RX_RING_SIZE);
1317#endif
1318 }
1da177e4
LT
1319
1320 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1321 if (intr_status & IntrTxErrSummary) {
1322 /* Avoid scavenging before Tx engine turned off */
1323 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1324 if (debug > 2 &&
1325 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1326 printk(KERN_WARNING "%s: "
1327 "rhine_interrupt() Tx engine"
1328 "still on.\n", dev->name);
1329 }
1330 rhine_tx(dev);
1331 }
1332
1333 /* Abnormal error summary/uncommon events handlers. */
1334 if (intr_status & (IntrPCIErr | IntrLinkChange |
1335 IntrStatsMax | IntrTxError | IntrTxAborted |
1336 IntrTxUnderrun | IntrTxDescRace))
1337 rhine_error(dev, intr_status);
1338
1339 if (--boguscnt < 0) {
1340 printk(KERN_WARNING "%s: Too much work at interrupt, "
1341 "status=%#8.8x.\n",
1342 dev->name, intr_status);
1343 break;
1344 }
1345 }
1346
1347 if (debug > 3)
1348 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1349 dev->name, ioread16(ioaddr + IntrStatus));
1350 return IRQ_RETVAL(handled);
1351}
1352
1353/* This routine is logically part of the interrupt handler, but isolated
1354 for clarity. */
1355static void rhine_tx(struct net_device *dev)
1356{
1357 struct rhine_private *rp = netdev_priv(dev);
1358 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1359
1360 spin_lock(&rp->lock);
1361
1362 /* find and cleanup dirty tx descriptors */
1363 while (rp->dirty_tx != rp->cur_tx) {
1364 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1365 if (debug > 6)
ed4030d1 1366 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1da177e4
LT
1367 entry, txstatus);
1368 if (txstatus & DescOwn)
1369 break;
1370 if (txstatus & 0x8000) {
1371 if (debug > 1)
1372 printk(KERN_DEBUG "%s: Transmit error, "
1373 "Tx status %8.8x.\n",
1374 dev->name, txstatus);
1375 rp->stats.tx_errors++;
1376 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1377 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1378 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1379 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1380 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1381 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1382 rp->stats.tx_fifo_errors++;
1383 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1384 break; /* Keep the skb - we try again */
1385 }
1386 /* Transmitter restarted in 'abnormal' handler. */
1387 } else {
1388 if (rp->quirks & rqRhineI)
1389 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1390 else
1391 rp->stats.collisions += txstatus & 0x0F;
1392 if (debug > 6)
1393 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1394 (txstatus >> 3) & 0xF,
1395 txstatus & 0xF);
1396 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1397 rp->stats.tx_packets++;
1398 }
1399 /* Free the original skb. */
1400 if (rp->tx_skbuff_dma[entry]) {
1401 pci_unmap_single(rp->pdev,
1402 rp->tx_skbuff_dma[entry],
1403 rp->tx_skbuff[entry]->len,
1404 PCI_DMA_TODEVICE);
1405 }
1406 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1407 rp->tx_skbuff[entry] = NULL;
1408 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1409 }
1410 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1411 netif_wake_queue(dev);
1412
1413 spin_unlock(&rp->lock);
1414}
1415
633949a1
RL
1416/* Process up to limit frames from receive ring */
1417static int rhine_rx(struct net_device *dev, int limit)
1da177e4
LT
1418{
1419 struct rhine_private *rp = netdev_priv(dev);
633949a1 1420 int count;
1da177e4 1421 int entry = rp->cur_rx % RX_RING_SIZE;
1da177e4
LT
1422
1423 if (debug > 4) {
1424 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1425 dev->name, entry,
1426 le32_to_cpu(rp->rx_head_desc->rx_status));
1427 }
1428
1429 /* If EOP is set on the next entry, it's a new packet. Send it up. */
633949a1 1430 for (count = 0; count < limit; ++count) {
1da177e4
LT
1431 struct rx_desc *desc = rp->rx_head_desc;
1432 u32 desc_status = le32_to_cpu(desc->rx_status);
1433 int data_size = desc_status >> 16;
1434
633949a1
RL
1435 if (desc_status & DescOwn)
1436 break;
1437
1da177e4 1438 if (debug > 4)
ed4030d1 1439 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1da177e4 1440 desc_status);
633949a1 1441
1da177e4
LT
1442 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1443 if ((desc_status & RxWholePkt) != RxWholePkt) {
1444 printk(KERN_WARNING "%s: Oversized Ethernet "
1445 "frame spanned multiple buffers, entry "
1446 "%#x length %d status %8.8x!\n",
1447 dev->name, entry, data_size,
1448 desc_status);
1449 printk(KERN_WARNING "%s: Oversized Ethernet "
1450 "frame %p vs %p.\n", dev->name,
1451 rp->rx_head_desc, &rp->rx_ring[entry]);
1452 rp->stats.rx_length_errors++;
1453 } else if (desc_status & RxErr) {
1454 /* There was a error. */
1455 if (debug > 2)
ed4030d1 1456 printk(KERN_DEBUG "rhine_rx() Rx "
1da177e4
LT
1457 "error was %8.8x.\n",
1458 desc_status);
1459 rp->stats.rx_errors++;
1460 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1461 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1462 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1463 if (desc_status & 0x0002) {
1464 /* this can also be updated outside the interrupt handler */
1465 spin_lock(&rp->lock);
1466 rp->stats.rx_crc_errors++;
1467 spin_unlock(&rp->lock);
1468 }
1469 }
1470 } else {
1471 struct sk_buff *skb;
1472 /* Length should omit the CRC */
1473 int pkt_len = data_size - 4;
1474
1475 /* Check if the packet is long enough to accept without
1476 copying to a minimally-sized skbuff. */
1477 if (pkt_len < rx_copybreak &&
1478 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1479 skb->dev = dev;
1480 skb_reserve(skb, 2); /* 16 byte align the IP header */
1481 pci_dma_sync_single_for_cpu(rp->pdev,
1482 rp->rx_skbuff_dma[entry],
1483 rp->rx_buf_sz,
1484 PCI_DMA_FROMDEVICE);
1485
1486 eth_copy_and_sum(skb,
689be439 1487 rp->rx_skbuff[entry]->data,
1da177e4
LT
1488 pkt_len, 0);
1489 skb_put(skb, pkt_len);
1490 pci_dma_sync_single_for_device(rp->pdev,
1491 rp->rx_skbuff_dma[entry],
1492 rp->rx_buf_sz,
1493 PCI_DMA_FROMDEVICE);
1494 } else {
1495 skb = rp->rx_skbuff[entry];
1496 if (skb == NULL) {
1497 printk(KERN_ERR "%s: Inconsistent Rx "
1498 "descriptor chain.\n",
1499 dev->name);
1500 break;
1501 }
1502 rp->rx_skbuff[entry] = NULL;
1503 skb_put(skb, pkt_len);
1504 pci_unmap_single(rp->pdev,
1505 rp->rx_skbuff_dma[entry],
1506 rp->rx_buf_sz,
1507 PCI_DMA_FROMDEVICE);
1508 }
1509 skb->protocol = eth_type_trans(skb, dev);
633949a1
RL
1510#ifdef CONFIG_VIA_RHINE_NAPI
1511 netif_receive_skb(skb);
1512#else
1da177e4 1513 netif_rx(skb);
633949a1 1514#endif
1da177e4
LT
1515 dev->last_rx = jiffies;
1516 rp->stats.rx_bytes += pkt_len;
1517 rp->stats.rx_packets++;
1518 }
1519 entry = (++rp->cur_rx) % RX_RING_SIZE;
1520 rp->rx_head_desc = &rp->rx_ring[entry];
1521 }
1522
1523 /* Refill the Rx ring buffers. */
1524 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1525 struct sk_buff *skb;
1526 entry = rp->dirty_rx % RX_RING_SIZE;
1527 if (rp->rx_skbuff[entry] == NULL) {
1528 skb = dev_alloc_skb(rp->rx_buf_sz);
1529 rp->rx_skbuff[entry] = skb;
1530 if (skb == NULL)
1531 break; /* Better luck next round. */
1532 skb->dev = dev; /* Mark as being used by this device. */
1533 rp->rx_skbuff_dma[entry] =
689be439 1534 pci_map_single(rp->pdev, skb->data,
1da177e4
LT
1535 rp->rx_buf_sz,
1536 PCI_DMA_FROMDEVICE);
1537 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1538 }
1539 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1540 }
633949a1
RL
1541
1542 return count;
1da177e4
LT
1543}
1544
1545/*
1546 * Clears the "tally counters" for CRC errors and missed frames(?).
1547 * It has been reported that some chips need a write of 0 to clear
1548 * these, for others the counters are set to 1 when written to and
1549 * instead cleared when read. So we clear them both ways ...
1550 */
1551static inline void clear_tally_counters(void __iomem *ioaddr)
1552{
1553 iowrite32(0, ioaddr + RxMissed);
1554 ioread16(ioaddr + RxCRCErrs);
1555 ioread16(ioaddr + RxMissed);
1556}
1557
1558static void rhine_restart_tx(struct net_device *dev) {
1559 struct rhine_private *rp = netdev_priv(dev);
1560 void __iomem *ioaddr = rp->base;
1561 int entry = rp->dirty_tx % TX_RING_SIZE;
1562 u32 intr_status;
1563
1564 /*
1565 * If new errors occured, we need to sort them out before doing Tx.
1566 * In that case the ISR will be back here RSN anyway.
1567 */
1568 intr_status = get_intr_status(dev);
1569
1570 if ((intr_status & IntrTxErrSummary) == 0) {
1571
1572 /* We know better than the chip where it should continue. */
1573 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1574 ioaddr + TxRingPtr);
1575
1576 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1577 ioaddr + ChipCmd);
1578 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1579 ioaddr + ChipCmd1);
1580 IOSYNC;
1581 }
1582 else {
1583 /* This should never happen */
1584 if (debug > 1)
1585 printk(KERN_WARNING "%s: rhine_restart_tx() "
1586 "Another error occured %8.8x.\n",
1587 dev->name, intr_status);
1588 }
1589
1590}
1591
1592static void rhine_error(struct net_device *dev, int intr_status)
1593{
1594 struct rhine_private *rp = netdev_priv(dev);
1595 void __iomem *ioaddr = rp->base;
1596
1597 spin_lock(&rp->lock);
1598
1599 if (intr_status & IntrLinkChange)
38bb6b28 1600 rhine_check_media(dev, 0);
1da177e4
LT
1601 if (intr_status & IntrStatsMax) {
1602 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1603 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1604 clear_tally_counters(ioaddr);
1605 }
1606 if (intr_status & IntrTxAborted) {
1607 if (debug > 1)
1608 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1609 dev->name, intr_status);
1610 }
1611 if (intr_status & IntrTxUnderrun) {
1612 if (rp->tx_thresh < 0xE0)
1613 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1614 if (debug > 1)
1615 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1616 "threshold now %2.2x.\n",
1617 dev->name, rp->tx_thresh);
1618 }
1619 if (intr_status & IntrTxDescRace) {
1620 if (debug > 2)
1621 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1622 dev->name);
1623 }
1624 if ((intr_status & IntrTxError) &&
1625 (intr_status & (IntrTxAborted |
1626 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1627 if (rp->tx_thresh < 0xE0) {
1628 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1629 }
1630 if (debug > 1)
1631 printk(KERN_INFO "%s: Unspecified error. Tx "
1632 "threshold now %2.2x.\n",
1633 dev->name, rp->tx_thresh);
1634 }
1635 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1636 IntrTxError))
1637 rhine_restart_tx(dev);
1638
1639 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1640 IntrTxError | IntrTxAborted | IntrNormalSummary |
1641 IntrTxDescRace)) {
1642 if (debug > 1)
1643 printk(KERN_ERR "%s: Something Wicked happened! "
1644 "%8.8x.\n", dev->name, intr_status);
1645 }
1646
1647 spin_unlock(&rp->lock);
1648}
1649
1650static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1651{
1652 struct rhine_private *rp = netdev_priv(dev);
1653 void __iomem *ioaddr = rp->base;
1654 unsigned long flags;
1655
1656 spin_lock_irqsave(&rp->lock, flags);
1657 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1658 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1659 clear_tally_counters(ioaddr);
1660 spin_unlock_irqrestore(&rp->lock, flags);
1661
1662 return &rp->stats;
1663}
1664
1665static void rhine_set_rx_mode(struct net_device *dev)
1666{
1667 struct rhine_private *rp = netdev_priv(dev);
1668 void __iomem *ioaddr = rp->base;
1669 u32 mc_filter[2]; /* Multicast hash filter */
1670 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1671
1672 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1673 /* Unconditionally log net taps. */
1674 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1675 dev->name);
1676 rx_mode = 0x1C;
1677 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1678 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1679 } else if ((dev->mc_count > multicast_filter_limit)
1680 || (dev->flags & IFF_ALLMULTI)) {
1681 /* Too many to match, or accept all multicasts. */
1682 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1683 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1684 rx_mode = 0x0C;
1685 } else {
1686 struct dev_mc_list *mclist;
1687 int i;
1688 memset(mc_filter, 0, sizeof(mc_filter));
1689 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1690 i++, mclist = mclist->next) {
1691 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1692
1693 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1694 }
1695 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1696 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1697 rx_mode = 0x0C;
1698 }
1699 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1700}
1701
1702static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1703{
1704 struct rhine_private *rp = netdev_priv(dev);
1705
1706 strcpy(info->driver, DRV_NAME);
1707 strcpy(info->version, DRV_VERSION);
1708 strcpy(info->bus_info, pci_name(rp->pdev));
1709}
1710
1711static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1712{
1713 struct rhine_private *rp = netdev_priv(dev);
1714 int rc;
1715
1716 spin_lock_irq(&rp->lock);
1717 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1718 spin_unlock_irq(&rp->lock);
1719
1720 return rc;
1721}
1722
1723static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1724{
1725 struct rhine_private *rp = netdev_priv(dev);
1726 int rc;
1727
1728 spin_lock_irq(&rp->lock);
1729 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1730 spin_unlock_irq(&rp->lock);
00b428c2 1731 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1732
1733 return rc;
1734}
1735
1736static int netdev_nway_reset(struct net_device *dev)
1737{
1738 struct rhine_private *rp = netdev_priv(dev);
1739
1740 return mii_nway_restart(&rp->mii_if);
1741}
1742
1743static u32 netdev_get_link(struct net_device *dev)
1744{
1745 struct rhine_private *rp = netdev_priv(dev);
1746
1747 return mii_link_ok(&rp->mii_if);
1748}
1749
1750static u32 netdev_get_msglevel(struct net_device *dev)
1751{
1752 return debug;
1753}
1754
1755static void netdev_set_msglevel(struct net_device *dev, u32 value)
1756{
1757 debug = value;
1758}
1759
1760static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1761{
1762 struct rhine_private *rp = netdev_priv(dev);
1763
1764 if (!(rp->quirks & rqWOL))
1765 return;
1766
1767 spin_lock_irq(&rp->lock);
1768 wol->supported = WAKE_PHY | WAKE_MAGIC |
1769 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1770 wol->wolopts = rp->wolopts;
1771 spin_unlock_irq(&rp->lock);
1772}
1773
1774static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1775{
1776 struct rhine_private *rp = netdev_priv(dev);
1777 u32 support = WAKE_PHY | WAKE_MAGIC |
1778 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1779
1780 if (!(rp->quirks & rqWOL))
1781 return -EINVAL;
1782
1783 if (wol->wolopts & ~support)
1784 return -EINVAL;
1785
1786 spin_lock_irq(&rp->lock);
1787 rp->wolopts = wol->wolopts;
1788 spin_unlock_irq(&rp->lock);
1789
1790 return 0;
1791}
1792
1793static struct ethtool_ops netdev_ethtool_ops = {
1794 .get_drvinfo = netdev_get_drvinfo,
1795 .get_settings = netdev_get_settings,
1796 .set_settings = netdev_set_settings,
1797 .nway_reset = netdev_nway_reset,
1798 .get_link = netdev_get_link,
1799 .get_msglevel = netdev_get_msglevel,
1800 .set_msglevel = netdev_set_msglevel,
1801 .get_wol = rhine_get_wol,
1802 .set_wol = rhine_set_wol,
1803 .get_sg = ethtool_op_get_sg,
1804 .get_tx_csum = ethtool_op_get_tx_csum,
b81e8e1f 1805 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1806};
1807
1808static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1809{
1810 struct rhine_private *rp = netdev_priv(dev);
1811 int rc;
1812
1813 if (!netif_running(dev))
1814 return -EINVAL;
1815
1816 spin_lock_irq(&rp->lock);
1817 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1818 spin_unlock_irq(&rp->lock);
00b428c2 1819 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1820
1821 return rc;
1822}
1823
1824static int rhine_close(struct net_device *dev)
1825{
1826 struct rhine_private *rp = netdev_priv(dev);
1827 void __iomem *ioaddr = rp->base;
1828
1829 spin_lock_irq(&rp->lock);
1830
1831 netif_stop_queue(dev);
633949a1 1832 netif_poll_disable(dev);
1da177e4
LT
1833
1834 if (debug > 1)
1835 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1836 "status was %4.4x.\n",
1837 dev->name, ioread16(ioaddr + ChipCmd));
1838
1839 /* Switch to loopback mode to avoid hardware races. */
1840 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1841
1842 /* Disable interrupts by clearing the interrupt mask. */
1843 iowrite16(0x0000, ioaddr + IntrEnable);
1844
1845 /* Stop the chip's Tx and Rx processes. */
1846 iowrite16(CmdStop, ioaddr + ChipCmd);
1847
1848 spin_unlock_irq(&rp->lock);
1849
1850 free_irq(rp->pdev->irq, dev);
1851 free_rbufs(dev);
1852 free_tbufs(dev);
1853 free_ring(dev);
1854
1855 return 0;
1856}
1857
1858
1859static void __devexit rhine_remove_one(struct pci_dev *pdev)
1860{
1861 struct net_device *dev = pci_get_drvdata(pdev);
1862 struct rhine_private *rp = netdev_priv(dev);
1863
1864 unregister_netdev(dev);
1865
1866 pci_iounmap(pdev, rp->base);
1867 pci_release_regions(pdev);
1868
1869 free_netdev(dev);
1870 pci_disable_device(pdev);
1871 pci_set_drvdata(pdev, NULL);
1872}
1873
d18c3db5 1874static void rhine_shutdown (struct pci_dev *pdev)
1da177e4 1875{
1da177e4
LT
1876 struct net_device *dev = pci_get_drvdata(pdev);
1877 struct rhine_private *rp = netdev_priv(dev);
1878 void __iomem *ioaddr = rp->base;
1879
1880 if (!(rp->quirks & rqWOL))
1881 return; /* Nothing to do for non-WOL adapters */
1882
1883 rhine_power_init(dev);
1884
1885 /* Make sure we use pattern 0, 1 and not 4, 5 */
1886 if (rp->quirks & rq6patterns)
1887 iowrite8(0x04, ioaddr + 0xA7);
1888
1889 if (rp->wolopts & WAKE_MAGIC) {
1890 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1891 /*
1892 * Turn EEPROM-controlled wake-up back on -- some hardware may
1893 * not cooperate otherwise.
1894 */
1895 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1896 }
1897
1898 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1899 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1900
1901 if (rp->wolopts & WAKE_PHY)
1902 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1903
1904 if (rp->wolopts & WAKE_UCAST)
1905 iowrite8(WOLucast, ioaddr + WOLcrSet);
1906
1907 if (rp->wolopts) {
1908 /* Enable legacy WOL (for old motherboards) */
1909 iowrite8(0x01, ioaddr + PwcfgSet);
1910 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1911 }
1912
1913 /* Hit power state D3 (sleep) */
1914 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1915
1916 /* TODO: Check use of pci_enable_wake() */
1917
1918}
1919
1920#ifdef CONFIG_PM
1921static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1922{
1923 struct net_device *dev = pci_get_drvdata(pdev);
1924 struct rhine_private *rp = netdev_priv(dev);
1925 unsigned long flags;
1926
1927 if (!netif_running(dev))
1928 return 0;
1929
1930 netif_device_detach(dev);
1931 pci_save_state(pdev);
1932
1933 spin_lock_irqsave(&rp->lock, flags);
d18c3db5 1934 rhine_shutdown(pdev);
1da177e4
LT
1935 spin_unlock_irqrestore(&rp->lock, flags);
1936
1937 free_irq(dev->irq, dev);
1938 return 0;
1939}
1940
1941static int rhine_resume(struct pci_dev *pdev)
1942{
1943 struct net_device *dev = pci_get_drvdata(pdev);
1944 struct rhine_private *rp = netdev_priv(dev);
1945 unsigned long flags;
1946 int ret;
1947
1948 if (!netif_running(dev))
1949 return 0;
1950
1fb9df5d 1951 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1da177e4
LT
1952 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1953
1954 ret = pci_set_power_state(pdev, PCI_D0);
1955 if (debug > 1)
1956 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1957 dev->name, ret ? "failed" : "succeeded", ret);
1958
1959 pci_restore_state(pdev);
1960
1961 spin_lock_irqsave(&rp->lock, flags);
1962#ifdef USE_MMIO
1963 enable_mmio(rp->pioaddr, rp->quirks);
1964#endif
1965 rhine_power_init(dev);
1966 free_tbufs(dev);
1967 free_rbufs(dev);
1968 alloc_tbufs(dev);
1969 alloc_rbufs(dev);
1970 init_registers(dev);
1971 spin_unlock_irqrestore(&rp->lock, flags);
1972
1973 netif_device_attach(dev);
1974
1975 return 0;
1976}
1977#endif /* CONFIG_PM */
1978
1979static struct pci_driver rhine_driver = {
1980 .name = DRV_NAME,
1981 .id_table = rhine_pci_tbl,
1982 .probe = rhine_init_one,
1983 .remove = __devexit_p(rhine_remove_one),
1984#ifdef CONFIG_PM
1985 .suspend = rhine_suspend,
1986 .resume = rhine_resume,
1987#endif /* CONFIG_PM */
d18c3db5 1988 .shutdown = rhine_shutdown,
1da177e4
LT
1989};
1990
1991
1992static int __init rhine_init(void)
1993{
1994/* when a module, this is printed whether or not devices are found in probe */
1995#ifdef MODULE
1996 printk(version);
1997#endif
1998 return pci_module_init(&rhine_driver);
1999}
2000
2001
2002static void __exit rhine_cleanup(void)
2003{
2004 pci_unregister_driver(&rhine_driver);
2005}
2006
2007
2008module_init(rhine_init);
2009module_exit(rhine_cleanup);
This page took 0.258028 seconds and 5 git commands to generate.