Merge branch 'upstream' of master.kernel.org:/pub/scm/linux/kernel/git/shemminger...
[deliverable/linux.git] / drivers / net / via-rhine.c
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28
29
30 Linux kernel version history:
31
32 LK1.1.0:
33 - Jeff Garzik: softnet 'n stuff
34
35 LK1.1.1:
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
38
39 LK1.1.2:
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
41
42 LK1.1.3:
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
48
49 LK1.1.4:
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
52
53 LK1.1.5:
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
57
58 LK1.1.6:
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
61
62 LK1.1.7:
63 - Manfred Spraul: added reset into tx_timeout
64
65 LK1.1.9:
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
70
71 LK1.1.10:
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
74
75 LK1.1.11:
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
79
80 LK1.1.12:
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
82
83 LK1.1.13 (jgarzik):
84 - Add ethtool support
85 - Replace some MII-related magic numbers with constants
86
87 LK1.1.14 (Ivan G.):
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
99 (Roger Luethi)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
103
104 LK1.1.15 (jgarzik):
105 - Use new MII lib helper generic_mii_ioctl
106
107 LK1.1.16 (Roger Luethi)
108 - Etherleak fix
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
112 - Various clean ups
113
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
120
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
124
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
127
128 LK1.2.0-2.6 (Roger Luethi)
129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
133
134 */
135
136 #define DRV_NAME "via-rhine"
137 #define DRV_VERSION "1.2.0-2.6"
138 #define DRV_RELDATE "June-10-2004"
139
140
141 /* A few user-configurable values.
142 These may be modified when a driver module is loaded. */
143
144 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
145 static int max_interrupt_work = 20;
146
147 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
148 Setting to > 1518 effectively disables this feature. */
149 static int rx_copybreak;
150
151 /*
152 * In case you are looking for 'options[]' or 'full_duplex[]', they
153 * are gone. Use ethtool(8) instead.
154 */
155
156 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
157 The Rhine has a 64 element 8390-like hash table. */
158 static const int multicast_filter_limit = 32;
159
160
161 /* Operational parameters that are set at compile time. */
162
163 /* Keep the ring sizes a power of two for compile efficiency.
164 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
165 Making the Tx ring too large decreases the effectiveness of channel
166 bonding and packet priority.
167 There are no ill effects from too-large receive rings. */
168 #define TX_RING_SIZE 16
169 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
170 #define RX_RING_SIZE 16
171
172
173 /* Operational parameters that usually are not changed. */
174
175 /* Time in jiffies before concluding the transmitter is hung. */
176 #define TX_TIMEOUT (2*HZ)
177
178 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
179
180 #include <linux/module.h>
181 #include <linux/moduleparam.h>
182 #include <linux/kernel.h>
183 #include <linux/string.h>
184 #include <linux/timer.h>
185 #include <linux/errno.h>
186 #include <linux/ioport.h>
187 #include <linux/slab.h>
188 #include <linux/interrupt.h>
189 #include <linux/pci.h>
190 #include <linux/dma-mapping.h>
191 #include <linux/netdevice.h>
192 #include <linux/etherdevice.h>
193 #include <linux/skbuff.h>
194 #include <linux/init.h>
195 #include <linux/delay.h>
196 #include <linux/mii.h>
197 #include <linux/ethtool.h>
198 #include <linux/crc32.h>
199 #include <linux/bitops.h>
200 #include <asm/processor.h> /* Processor type for cache alignment. */
201 #include <asm/io.h>
202 #include <asm/irq.h>
203 #include <asm/uaccess.h>
204
205 /* These identify the driver base version and may not be removed. */
206 static char version[] __devinitdata =
207 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
208
209 /* This driver was written to use PCI memory space. Some early versions
210 of the Rhine may only work correctly with I/O space accesses. */
211 #ifdef CONFIG_VIA_RHINE_MMIO
212 #define USE_MMIO
213 #else
214 #endif
215
216 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
217 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
218 MODULE_LICENSE("GPL");
219
220 module_param(max_interrupt_work, int, 0);
221 module_param(debug, int, 0);
222 module_param(rx_copybreak, int, 0);
223 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
224 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
225 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
226
227 /*
228 Theory of Operation
229
230 I. Board Compatibility
231
232 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
233 controller.
234
235 II. Board-specific settings
236
237 Boards with this chip are functional only in a bus-master PCI slot.
238
239 Many operational settings are loaded from the EEPROM to the Config word at
240 offset 0x78. For most of these settings, this driver assumes that they are
241 correct.
242 If this driver is compiled to use PCI memory space operations the EEPROM
243 must be configured to enable memory ops.
244
245 III. Driver operation
246
247 IIIa. Ring buffers
248
249 This driver uses two statically allocated fixed-size descriptor lists
250 formed into rings by a branch from the final descriptor to the beginning of
251 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
252
253 IIIb/c. Transmit/Receive Structure
254
255 This driver attempts to use a zero-copy receive and transmit scheme.
256
257 Alas, all data buffers are required to start on a 32 bit boundary, so
258 the driver must often copy transmit packets into bounce buffers.
259
260 The driver allocates full frame size skbuffs for the Rx ring buffers at
261 open() time and passes the skb->data field to the chip as receive data
262 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
263 a fresh skbuff is allocated and the frame is copied to the new skbuff.
264 When the incoming frame is larger, the skbuff is passed directly up the
265 protocol stack. Buffers consumed this way are replaced by newly allocated
266 skbuffs in the last phase of rhine_rx().
267
268 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
269 using a full-sized skbuff for small frames vs. the copying costs of larger
270 frames. New boards are typically used in generously configured machines
271 and the underfilled buffers have negligible impact compared to the benefit of
272 a single allocation size, so the default value of zero results in never
273 copying packets. When copying is done, the cost is usually mitigated by using
274 a combined copy/checksum routine. Copying also preloads the cache, which is
275 most useful with small frames.
276
277 Since the VIA chips are only able to transfer data to buffers on 32 bit
278 boundaries, the IP header at offset 14 in an ethernet frame isn't
279 longword aligned for further processing. Copying these unaligned buffers
280 has the beneficial effect of 16-byte aligning the IP header.
281
282 IIId. Synchronization
283
284 The driver runs as two independent, single-threaded flows of control. One
285 is the send-packet routine, which enforces single-threaded use by the
286 dev->priv->lock spinlock. The other thread is the interrupt handler, which
287 is single threaded by the hardware and interrupt handling software.
288
289 The send packet thread has partial control over the Tx ring. It locks the
290 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
291 is not available it stops the transmit queue by calling netif_stop_queue.
292
293 The interrupt handler has exclusive control over the Rx ring and records stats
294 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
295 empty by incrementing the dirty_tx mark. If at least half of the entries in
296 the Rx ring are available the transmit queue is woken up if it was stopped.
297
298 IV. Notes
299
300 IVb. References
301
302 Preliminary VT86C100A manual from http://www.via.com.tw/
303 http://www.scyld.com/expert/100mbps.html
304 http://www.scyld.com/expert/NWay.html
305 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
306 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
307
308
309 IVc. Errata
310
311 The VT86C100A manual is not reliable information.
312 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
313 in significant performance degradation for bounce buffer copies on transmit
314 and unaligned IP headers on receive.
315 The chip does not pad to minimum transmit length.
316
317 */
318
319
320 /* This table drives the PCI probe routines. It's mostly boilerplate in all
321 of the drivers, and will likely be provided by some future kernel.
322 Note the matching code -- the first table entry matchs all 56** cards but
323 second only the 1234 card.
324 */
325
326 enum rhine_revs {
327 VT86C100A = 0x00,
328 VTunknown0 = 0x20,
329 VT6102 = 0x40,
330 VT8231 = 0x50, /* Integrated MAC */
331 VT8233 = 0x60, /* Integrated MAC */
332 VT8235 = 0x74, /* Integrated MAC */
333 VT8237 = 0x78, /* Integrated MAC */
334 VTunknown1 = 0x7C,
335 VT6105 = 0x80,
336 VT6105_B0 = 0x83,
337 VT6105L = 0x8A,
338 VT6107 = 0x8C,
339 VTunknown2 = 0x8E,
340 VT6105M = 0x90, /* Management adapter */
341 };
342
343 enum rhine_quirks {
344 rqWOL = 0x0001, /* Wake-On-LAN support */
345 rqForceReset = 0x0002,
346 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
347 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
348 rqRhineI = 0x0100, /* See comment below */
349 };
350 /*
351 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
352 * MMIO as well as for the collision counter and the Tx FIFO underflow
353 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
354 */
355
356 /* Beware of PCI posted writes */
357 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
358
359 static struct pci_device_id rhine_pci_tbl[] =
360 {
361 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
362 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
363 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
364 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
365 { } /* terminate list */
366 };
367 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
368
369
370 /* Offsets to the device registers. */
371 enum register_offsets {
372 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
373 ChipCmd1=0x09,
374 IntrStatus=0x0C, IntrEnable=0x0E,
375 MulticastFilter0=0x10, MulticastFilter1=0x14,
376 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
377 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
378 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
379 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
380 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
381 StickyHW=0x83, IntrStatus2=0x84,
382 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
383 WOLcrClr1=0xA6, WOLcgClr=0xA7,
384 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
385 };
386
387 /* Bits in ConfigD */
388 enum backoff_bits {
389 BackOptional=0x01, BackModify=0x02,
390 BackCaptureEffect=0x04, BackRandom=0x08
391 };
392
393 #ifdef USE_MMIO
394 /* Registers we check that mmio and reg are the same. */
395 static const int mmio_verify_registers[] = {
396 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
397 0
398 };
399 #endif
400
401 /* Bits in the interrupt status/mask registers. */
402 enum intr_status_bits {
403 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
404 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
405 IntrPCIErr=0x0040,
406 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
407 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
408 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
409 IntrRxWakeUp=0x8000,
410 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
411 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
412 IntrTxErrSummary=0x082218,
413 };
414
415 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
416 enum wol_bits {
417 WOLucast = 0x10,
418 WOLmagic = 0x20,
419 WOLbmcast = 0x30,
420 WOLlnkon = 0x40,
421 WOLlnkoff = 0x80,
422 };
423
424 /* The Rx and Tx buffer descriptors. */
425 struct rx_desc {
426 s32 rx_status;
427 u32 desc_length; /* Chain flag, Buffer/frame length */
428 u32 addr;
429 u32 next_desc;
430 };
431 struct tx_desc {
432 s32 tx_status;
433 u32 desc_length; /* Chain flag, Tx Config, Frame length */
434 u32 addr;
435 u32 next_desc;
436 };
437
438 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
439 #define TXDESC 0x00e08000
440
441 enum rx_status_bits {
442 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
443 };
444
445 /* Bits in *_desc.*_status */
446 enum desc_status_bits {
447 DescOwn=0x80000000
448 };
449
450 /* Bits in ChipCmd. */
451 enum chip_cmd_bits {
452 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
453 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
454 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
455 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
456 };
457
458 struct rhine_private {
459 /* Descriptor rings */
460 struct rx_desc *rx_ring;
461 struct tx_desc *tx_ring;
462 dma_addr_t rx_ring_dma;
463 dma_addr_t tx_ring_dma;
464
465 /* The addresses of receive-in-place skbuffs. */
466 struct sk_buff *rx_skbuff[RX_RING_SIZE];
467 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
468
469 /* The saved address of a sent-in-place packet/buffer, for later free(). */
470 struct sk_buff *tx_skbuff[TX_RING_SIZE];
471 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
472
473 /* Tx bounce buffers (Rhine-I only) */
474 unsigned char *tx_buf[TX_RING_SIZE];
475 unsigned char *tx_bufs;
476 dma_addr_t tx_bufs_dma;
477
478 struct pci_dev *pdev;
479 long pioaddr;
480 struct net_device_stats stats;
481 spinlock_t lock;
482
483 /* Frequently used values: keep some adjacent for cache effect. */
484 u32 quirks;
485 struct rx_desc *rx_head_desc;
486 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
487 unsigned int cur_tx, dirty_tx;
488 unsigned int rx_buf_sz; /* Based on MTU+slack. */
489 u8 wolopts;
490
491 u8 tx_thresh, rx_thresh;
492
493 struct mii_if_info mii_if;
494 struct work_struct tx_timeout_task;
495 struct work_struct check_media_task;
496 void __iomem *base;
497 };
498
499 static int mdio_read(struct net_device *dev, int phy_id, int location);
500 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
501 static int rhine_open(struct net_device *dev);
502 static void rhine_tx_timeout(struct net_device *dev);
503 static void rhine_tx_timeout_task(struct net_device *dev);
504 static void rhine_check_media_task(struct net_device *dev);
505 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
506 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
507 static void rhine_tx(struct net_device *dev);
508 static void rhine_rx(struct net_device *dev);
509 static void rhine_error(struct net_device *dev, int intr_status);
510 static void rhine_set_rx_mode(struct net_device *dev);
511 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
512 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
513 static struct ethtool_ops netdev_ethtool_ops;
514 static int rhine_close(struct net_device *dev);
515 static void rhine_shutdown (struct pci_dev *pdev);
516
517 #define RHINE_WAIT_FOR(condition) do { \
518 int i=1024; \
519 while (!(condition) && --i) \
520 ; \
521 if (debug > 1 && i < 512) \
522 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
523 DRV_NAME, 1024-i, __func__, __LINE__); \
524 } while(0)
525
526 static inline u32 get_intr_status(struct net_device *dev)
527 {
528 struct rhine_private *rp = netdev_priv(dev);
529 void __iomem *ioaddr = rp->base;
530 u32 intr_status;
531
532 intr_status = ioread16(ioaddr + IntrStatus);
533 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
534 if (rp->quirks & rqStatusWBRace)
535 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
536 return intr_status;
537 }
538
539 /*
540 * Get power related registers into sane state.
541 * Notify user about past WOL event.
542 */
543 static void rhine_power_init(struct net_device *dev)
544 {
545 struct rhine_private *rp = netdev_priv(dev);
546 void __iomem *ioaddr = rp->base;
547 u16 wolstat;
548
549 if (rp->quirks & rqWOL) {
550 /* Make sure chip is in power state D0 */
551 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
552
553 /* Disable "force PME-enable" */
554 iowrite8(0x80, ioaddr + WOLcgClr);
555
556 /* Clear power-event config bits (WOL) */
557 iowrite8(0xFF, ioaddr + WOLcrClr);
558 /* More recent cards can manage two additional patterns */
559 if (rp->quirks & rq6patterns)
560 iowrite8(0x03, ioaddr + WOLcrClr1);
561
562 /* Save power-event status bits */
563 wolstat = ioread8(ioaddr + PwrcsrSet);
564 if (rp->quirks & rq6patterns)
565 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
566
567 /* Clear power-event status bits */
568 iowrite8(0xFF, ioaddr + PwrcsrClr);
569 if (rp->quirks & rq6patterns)
570 iowrite8(0x03, ioaddr + PwrcsrClr1);
571
572 if (wolstat) {
573 char *reason;
574 switch (wolstat) {
575 case WOLmagic:
576 reason = "Magic packet";
577 break;
578 case WOLlnkon:
579 reason = "Link went up";
580 break;
581 case WOLlnkoff:
582 reason = "Link went down";
583 break;
584 case WOLucast:
585 reason = "Unicast packet";
586 break;
587 case WOLbmcast:
588 reason = "Multicast/broadcast packet";
589 break;
590 default:
591 reason = "Unknown";
592 }
593 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
594 DRV_NAME, reason);
595 }
596 }
597 }
598
599 static void rhine_chip_reset(struct net_device *dev)
600 {
601 struct rhine_private *rp = netdev_priv(dev);
602 void __iomem *ioaddr = rp->base;
603
604 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
605 IOSYNC;
606
607 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
608 printk(KERN_INFO "%s: Reset not complete yet. "
609 "Trying harder.\n", DRV_NAME);
610
611 /* Force reset */
612 if (rp->quirks & rqForceReset)
613 iowrite8(0x40, ioaddr + MiscCmd);
614
615 /* Reset can take somewhat longer (rare) */
616 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
617 }
618
619 if (debug > 1)
620 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
621 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
622 "failed" : "succeeded");
623 }
624
625 #ifdef USE_MMIO
626 static void enable_mmio(long pioaddr, u32 quirks)
627 {
628 int n;
629 if (quirks & rqRhineI) {
630 /* More recent docs say that this bit is reserved ... */
631 n = inb(pioaddr + ConfigA) | 0x20;
632 outb(n, pioaddr + ConfigA);
633 } else {
634 n = inb(pioaddr + ConfigD) | 0x80;
635 outb(n, pioaddr + ConfigD);
636 }
637 }
638 #endif
639
640 /*
641 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
642 * (plus 0x6C for Rhine-I/II)
643 */
644 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
645 {
646 struct rhine_private *rp = netdev_priv(dev);
647 void __iomem *ioaddr = rp->base;
648
649 outb(0x20, pioaddr + MACRegEEcsr);
650 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
651
652 #ifdef USE_MMIO
653 /*
654 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
655 * MMIO. If reloading EEPROM was done first this could be avoided, but
656 * it is not known if that still works with the "win98-reboot" problem.
657 */
658 enable_mmio(pioaddr, rp->quirks);
659 #endif
660
661 /* Turn off EEPROM-controlled wake-up (magic packet) */
662 if (rp->quirks & rqWOL)
663 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
664
665 }
666
667 #ifdef CONFIG_NET_POLL_CONTROLLER
668 static void rhine_poll(struct net_device *dev)
669 {
670 disable_irq(dev->irq);
671 rhine_interrupt(dev->irq, (void *)dev, NULL);
672 enable_irq(dev->irq);
673 }
674 #endif
675
676 static void rhine_hw_init(struct net_device *dev, long pioaddr)
677 {
678 struct rhine_private *rp = netdev_priv(dev);
679
680 /* Reset the chip to erase previous misconfiguration. */
681 rhine_chip_reset(dev);
682
683 /* Rhine-I needs extra time to recuperate before EEPROM reload */
684 if (rp->quirks & rqRhineI)
685 msleep(5);
686
687 /* Reload EEPROM controlled bytes cleared by soft reset */
688 rhine_reload_eeprom(pioaddr, dev);
689 }
690
691 static int __devinit rhine_init_one(struct pci_dev *pdev,
692 const struct pci_device_id *ent)
693 {
694 struct net_device *dev;
695 struct rhine_private *rp;
696 int i, rc;
697 u8 pci_rev;
698 u32 quirks;
699 long pioaddr;
700 long memaddr;
701 void __iomem *ioaddr;
702 int io_size, phy_id;
703 const char *name;
704 #ifdef USE_MMIO
705 int bar = 1;
706 #else
707 int bar = 0;
708 #endif
709
710 /* when built into the kernel, we only print version if device is found */
711 #ifndef MODULE
712 static int printed_version;
713 if (!printed_version++)
714 printk(version);
715 #endif
716
717 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
718
719 io_size = 256;
720 phy_id = 0;
721 quirks = 0;
722 name = "Rhine";
723 if (pci_rev < VTunknown0) {
724 quirks = rqRhineI;
725 io_size = 128;
726 }
727 else if (pci_rev >= VT6102) {
728 quirks = rqWOL | rqForceReset;
729 if (pci_rev < VT6105) {
730 name = "Rhine II";
731 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
732 }
733 else {
734 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
735 if (pci_rev >= VT6105_B0)
736 quirks |= rq6patterns;
737 if (pci_rev < VT6105M)
738 name = "Rhine III";
739 else
740 name = "Rhine III (Management Adapter)";
741 }
742 }
743
744 rc = pci_enable_device(pdev);
745 if (rc)
746 goto err_out;
747
748 /* this should always be supported */
749 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
750 if (rc) {
751 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
752 "the card!?\n");
753 goto err_out;
754 }
755
756 /* sanity check */
757 if ((pci_resource_len(pdev, 0) < io_size) ||
758 (pci_resource_len(pdev, 1) < io_size)) {
759 rc = -EIO;
760 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
761 goto err_out;
762 }
763
764 pioaddr = pci_resource_start(pdev, 0);
765 memaddr = pci_resource_start(pdev, 1);
766
767 pci_set_master(pdev);
768
769 dev = alloc_etherdev(sizeof(struct rhine_private));
770 if (!dev) {
771 rc = -ENOMEM;
772 printk(KERN_ERR "alloc_etherdev failed\n");
773 goto err_out;
774 }
775 SET_MODULE_OWNER(dev);
776 SET_NETDEV_DEV(dev, &pdev->dev);
777
778 rp = netdev_priv(dev);
779 rp->quirks = quirks;
780 rp->pioaddr = pioaddr;
781 rp->pdev = pdev;
782
783 rc = pci_request_regions(pdev, DRV_NAME);
784 if (rc)
785 goto err_out_free_netdev;
786
787 ioaddr = pci_iomap(pdev, bar, io_size);
788 if (!ioaddr) {
789 rc = -EIO;
790 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
791 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
792 goto err_out_free_res;
793 }
794
795 #ifdef USE_MMIO
796 enable_mmio(pioaddr, quirks);
797
798 /* Check that selected MMIO registers match the PIO ones */
799 i = 0;
800 while (mmio_verify_registers[i]) {
801 int reg = mmio_verify_registers[i++];
802 unsigned char a = inb(pioaddr+reg);
803 unsigned char b = readb(ioaddr+reg);
804 if (a != b) {
805 rc = -EIO;
806 printk(KERN_ERR "MMIO do not match PIO [%02x] "
807 "(%02x != %02x)\n", reg, a, b);
808 goto err_out_unmap;
809 }
810 }
811 #endif /* USE_MMIO */
812
813 dev->base_addr = (unsigned long)ioaddr;
814 rp->base = ioaddr;
815
816 /* Get chip registers into a sane state */
817 rhine_power_init(dev);
818 rhine_hw_init(dev, pioaddr);
819
820 for (i = 0; i < 6; i++)
821 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
822 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
823
824 if (!is_valid_ether_addr(dev->perm_addr)) {
825 rc = -EIO;
826 printk(KERN_ERR "Invalid MAC address\n");
827 goto err_out_unmap;
828 }
829
830 /* For Rhine-I/II, phy_id is loaded from EEPROM */
831 if (!phy_id)
832 phy_id = ioread8(ioaddr + 0x6C);
833
834 dev->irq = pdev->irq;
835
836 spin_lock_init(&rp->lock);
837 rp->mii_if.dev = dev;
838 rp->mii_if.mdio_read = mdio_read;
839 rp->mii_if.mdio_write = mdio_write;
840 rp->mii_if.phy_id_mask = 0x1f;
841 rp->mii_if.reg_num_mask = 0x1f;
842
843 /* The chip-specific entries in the device structure. */
844 dev->open = rhine_open;
845 dev->hard_start_xmit = rhine_start_tx;
846 dev->stop = rhine_close;
847 dev->get_stats = rhine_get_stats;
848 dev->set_multicast_list = rhine_set_rx_mode;
849 dev->do_ioctl = netdev_ioctl;
850 dev->ethtool_ops = &netdev_ethtool_ops;
851 dev->tx_timeout = rhine_tx_timeout;
852 dev->watchdog_timeo = TX_TIMEOUT;
853 #ifdef CONFIG_NET_POLL_CONTROLLER
854 dev->poll_controller = rhine_poll;
855 #endif
856 if (rp->quirks & rqRhineI)
857 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
858
859 INIT_WORK(&rp->tx_timeout_task,
860 (void (*)(void *))rhine_tx_timeout_task, dev);
861
862 INIT_WORK(&rp->check_media_task,
863 (void (*)(void *))rhine_check_media_task, dev);
864
865 /* dev->name not defined before register_netdev()! */
866 rc = register_netdev(dev);
867 if (rc)
868 goto err_out_unmap;
869
870 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
871 dev->name, name,
872 #ifdef USE_MMIO
873 memaddr
874 #else
875 (long)ioaddr
876 #endif
877 );
878
879 for (i = 0; i < 5; i++)
880 printk("%2.2x:", dev->dev_addr[i]);
881 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
882
883 pci_set_drvdata(pdev, dev);
884
885 {
886 u16 mii_cmd;
887 int mii_status = mdio_read(dev, phy_id, 1);
888 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
889 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
890 if (mii_status != 0xffff && mii_status != 0x0000) {
891 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
892 printk(KERN_INFO "%s: MII PHY found at address "
893 "%d, status 0x%4.4x advertising %4.4x "
894 "Link %4.4x.\n", dev->name, phy_id,
895 mii_status, rp->mii_if.advertising,
896 mdio_read(dev, phy_id, 5));
897
898 /* set IFF_RUNNING */
899 if (mii_status & BMSR_LSTATUS)
900 netif_carrier_on(dev);
901 else
902 netif_carrier_off(dev);
903
904 }
905 }
906 rp->mii_if.phy_id = phy_id;
907
908 return 0;
909
910 err_out_unmap:
911 pci_iounmap(pdev, ioaddr);
912 err_out_free_res:
913 pci_release_regions(pdev);
914 err_out_free_netdev:
915 free_netdev(dev);
916 err_out:
917 return rc;
918 }
919
920 static int alloc_ring(struct net_device* dev)
921 {
922 struct rhine_private *rp = netdev_priv(dev);
923 void *ring;
924 dma_addr_t ring_dma;
925
926 ring = pci_alloc_consistent(rp->pdev,
927 RX_RING_SIZE * sizeof(struct rx_desc) +
928 TX_RING_SIZE * sizeof(struct tx_desc),
929 &ring_dma);
930 if (!ring) {
931 printk(KERN_ERR "Could not allocate DMA memory.\n");
932 return -ENOMEM;
933 }
934 if (rp->quirks & rqRhineI) {
935 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
936 PKT_BUF_SZ * TX_RING_SIZE,
937 &rp->tx_bufs_dma);
938 if (rp->tx_bufs == NULL) {
939 pci_free_consistent(rp->pdev,
940 RX_RING_SIZE * sizeof(struct rx_desc) +
941 TX_RING_SIZE * sizeof(struct tx_desc),
942 ring, ring_dma);
943 return -ENOMEM;
944 }
945 }
946
947 rp->rx_ring = ring;
948 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
949 rp->rx_ring_dma = ring_dma;
950 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
951
952 return 0;
953 }
954
955 static void free_ring(struct net_device* dev)
956 {
957 struct rhine_private *rp = netdev_priv(dev);
958
959 pci_free_consistent(rp->pdev,
960 RX_RING_SIZE * sizeof(struct rx_desc) +
961 TX_RING_SIZE * sizeof(struct tx_desc),
962 rp->rx_ring, rp->rx_ring_dma);
963 rp->tx_ring = NULL;
964
965 if (rp->tx_bufs)
966 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
967 rp->tx_bufs, rp->tx_bufs_dma);
968
969 rp->tx_bufs = NULL;
970
971 }
972
973 static void alloc_rbufs(struct net_device *dev)
974 {
975 struct rhine_private *rp = netdev_priv(dev);
976 dma_addr_t next;
977 int i;
978
979 rp->dirty_rx = rp->cur_rx = 0;
980
981 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
982 rp->rx_head_desc = &rp->rx_ring[0];
983 next = rp->rx_ring_dma;
984
985 /* Init the ring entries */
986 for (i = 0; i < RX_RING_SIZE; i++) {
987 rp->rx_ring[i].rx_status = 0;
988 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
989 next += sizeof(struct rx_desc);
990 rp->rx_ring[i].next_desc = cpu_to_le32(next);
991 rp->rx_skbuff[i] = NULL;
992 }
993 /* Mark the last entry as wrapping the ring. */
994 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
995
996 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
997 for (i = 0; i < RX_RING_SIZE; i++) {
998 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
999 rp->rx_skbuff[i] = skb;
1000 if (skb == NULL)
1001 break;
1002 skb->dev = dev; /* Mark as being used by this device. */
1003
1004 rp->rx_skbuff_dma[i] =
1005 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1006 PCI_DMA_FROMDEVICE);
1007
1008 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1009 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1010 }
1011 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1012 }
1013
1014 static void free_rbufs(struct net_device* dev)
1015 {
1016 struct rhine_private *rp = netdev_priv(dev);
1017 int i;
1018
1019 /* Free all the skbuffs in the Rx queue. */
1020 for (i = 0; i < RX_RING_SIZE; i++) {
1021 rp->rx_ring[i].rx_status = 0;
1022 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1023 if (rp->rx_skbuff[i]) {
1024 pci_unmap_single(rp->pdev,
1025 rp->rx_skbuff_dma[i],
1026 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1027 dev_kfree_skb(rp->rx_skbuff[i]);
1028 }
1029 rp->rx_skbuff[i] = NULL;
1030 }
1031 }
1032
1033 static void alloc_tbufs(struct net_device* dev)
1034 {
1035 struct rhine_private *rp = netdev_priv(dev);
1036 dma_addr_t next;
1037 int i;
1038
1039 rp->dirty_tx = rp->cur_tx = 0;
1040 next = rp->tx_ring_dma;
1041 for (i = 0; i < TX_RING_SIZE; i++) {
1042 rp->tx_skbuff[i] = NULL;
1043 rp->tx_ring[i].tx_status = 0;
1044 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1045 next += sizeof(struct tx_desc);
1046 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1047 if (rp->quirks & rqRhineI)
1048 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1049 }
1050 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1051
1052 }
1053
1054 static void free_tbufs(struct net_device* dev)
1055 {
1056 struct rhine_private *rp = netdev_priv(dev);
1057 int i;
1058
1059 for (i = 0; i < TX_RING_SIZE; i++) {
1060 rp->tx_ring[i].tx_status = 0;
1061 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1062 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1063 if (rp->tx_skbuff[i]) {
1064 if (rp->tx_skbuff_dma[i]) {
1065 pci_unmap_single(rp->pdev,
1066 rp->tx_skbuff_dma[i],
1067 rp->tx_skbuff[i]->len,
1068 PCI_DMA_TODEVICE);
1069 }
1070 dev_kfree_skb(rp->tx_skbuff[i]);
1071 }
1072 rp->tx_skbuff[i] = NULL;
1073 rp->tx_buf[i] = NULL;
1074 }
1075 }
1076
1077 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1078 {
1079 struct rhine_private *rp = netdev_priv(dev);
1080 void __iomem *ioaddr = rp->base;
1081
1082 mii_check_media(&rp->mii_if, debug, init_media);
1083
1084 if (rp->mii_if.full_duplex)
1085 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1086 ioaddr + ChipCmd1);
1087 else
1088 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1089 ioaddr + ChipCmd1);
1090 if (debug > 1)
1091 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1092 rp->mii_if.force_media, netif_carrier_ok(dev));
1093 }
1094
1095 /* Called after status of force_media possibly changed */
1096 static void rhine_set_carrier(struct mii_if_info *mii)
1097 {
1098 if (mii->force_media) {
1099 /* autoneg is off: Link is always assumed to be up */
1100 if (!netif_carrier_ok(mii->dev))
1101 netif_carrier_on(mii->dev);
1102 }
1103 else /* Let MMI library update carrier status */
1104 rhine_check_media(mii->dev, 0);
1105 if (debug > 1)
1106 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1107 mii->dev->name, mii->force_media,
1108 netif_carrier_ok(mii->dev));
1109 }
1110
1111 static void rhine_check_media_task(struct net_device *dev)
1112 {
1113 rhine_check_media(dev, 0);
1114 }
1115
1116 static void init_registers(struct net_device *dev)
1117 {
1118 struct rhine_private *rp = netdev_priv(dev);
1119 void __iomem *ioaddr = rp->base;
1120 int i;
1121
1122 for (i = 0; i < 6; i++)
1123 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1124
1125 /* Initialize other registers. */
1126 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1127 /* Configure initial FIFO thresholds. */
1128 iowrite8(0x20, ioaddr + TxConfig);
1129 rp->tx_thresh = 0x20;
1130 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1131
1132 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1133 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1134
1135 rhine_set_rx_mode(dev);
1136
1137 /* Enable interrupts by setting the interrupt mask. */
1138 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1139 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1140 IntrTxDone | IntrTxError | IntrTxUnderrun |
1141 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1142 ioaddr + IntrEnable);
1143
1144 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1145 ioaddr + ChipCmd);
1146 rhine_check_media(dev, 1);
1147 }
1148
1149 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1150 static void rhine_enable_linkmon(void __iomem *ioaddr)
1151 {
1152 iowrite8(0, ioaddr + MIICmd);
1153 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1154 iowrite8(0x80, ioaddr + MIICmd);
1155
1156 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1157
1158 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1159 }
1160
1161 /* Disable MII link status auto-polling (required for MDIO access) */
1162 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1163 {
1164 iowrite8(0, ioaddr + MIICmd);
1165
1166 if (quirks & rqRhineI) {
1167 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1168
1169 /* Do not call from ISR! */
1170 msleep(1);
1171
1172 /* 0x80 must be set immediately before turning it off */
1173 iowrite8(0x80, ioaddr + MIICmd);
1174
1175 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1176
1177 /* Heh. Now clear 0x80 again. */
1178 iowrite8(0, ioaddr + MIICmd);
1179 }
1180 else
1181 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1182 }
1183
1184 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1185
1186 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1187 {
1188 struct rhine_private *rp = netdev_priv(dev);
1189 void __iomem *ioaddr = rp->base;
1190 int result;
1191
1192 rhine_disable_linkmon(ioaddr, rp->quirks);
1193
1194 /* rhine_disable_linkmon already cleared MIICmd */
1195 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1196 iowrite8(regnum, ioaddr + MIIRegAddr);
1197 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1198 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1199 result = ioread16(ioaddr + MIIData);
1200
1201 rhine_enable_linkmon(ioaddr);
1202 return result;
1203 }
1204
1205 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1206 {
1207 struct rhine_private *rp = netdev_priv(dev);
1208 void __iomem *ioaddr = rp->base;
1209
1210 rhine_disable_linkmon(ioaddr, rp->quirks);
1211
1212 /* rhine_disable_linkmon already cleared MIICmd */
1213 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1214 iowrite8(regnum, ioaddr + MIIRegAddr);
1215 iowrite16(value, ioaddr + MIIData);
1216 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1217 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1218
1219 rhine_enable_linkmon(ioaddr);
1220 }
1221
1222 static int rhine_open(struct net_device *dev)
1223 {
1224 struct rhine_private *rp = netdev_priv(dev);
1225 void __iomem *ioaddr = rp->base;
1226 int rc;
1227
1228 rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1229 dev);
1230 if (rc)
1231 return rc;
1232
1233 if (debug > 1)
1234 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1235 dev->name, rp->pdev->irq);
1236
1237 rc = alloc_ring(dev);
1238 if (rc) {
1239 free_irq(rp->pdev->irq, dev);
1240 return rc;
1241 }
1242 alloc_rbufs(dev);
1243 alloc_tbufs(dev);
1244 rhine_chip_reset(dev);
1245 init_registers(dev);
1246 if (debug > 2)
1247 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1248 "MII status: %4.4x.\n",
1249 dev->name, ioread16(ioaddr + ChipCmd),
1250 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1251
1252 netif_start_queue(dev);
1253
1254 return 0;
1255 }
1256
1257 static void rhine_tx_timeout(struct net_device *dev)
1258 {
1259 struct rhine_private *rp = netdev_priv(dev);
1260
1261 /*
1262 * Move bulk of work outside of interrupt context
1263 */
1264 schedule_work(&rp->tx_timeout_task);
1265 }
1266
1267 static void rhine_tx_timeout_task(struct net_device *dev)
1268 {
1269 struct rhine_private *rp = netdev_priv(dev);
1270 void __iomem *ioaddr = rp->base;
1271
1272 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1273 "%4.4x, resetting...\n",
1274 dev->name, ioread16(ioaddr + IntrStatus),
1275 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1276
1277 /* protect against concurrent rx interrupts */
1278 disable_irq(rp->pdev->irq);
1279
1280 spin_lock(&rp->lock);
1281
1282 /* clear all descriptors */
1283 free_tbufs(dev);
1284 free_rbufs(dev);
1285 alloc_tbufs(dev);
1286 alloc_rbufs(dev);
1287
1288 /* Reinitialize the hardware. */
1289 rhine_chip_reset(dev);
1290 init_registers(dev);
1291
1292 spin_unlock(&rp->lock);
1293 enable_irq(rp->pdev->irq);
1294
1295 dev->trans_start = jiffies;
1296 rp->stats.tx_errors++;
1297 netif_wake_queue(dev);
1298 }
1299
1300 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1301 {
1302 struct rhine_private *rp = netdev_priv(dev);
1303 void __iomem *ioaddr = rp->base;
1304 unsigned entry;
1305
1306 /* Caution: the write order is important here, set the field
1307 with the "ownership" bits last. */
1308
1309 /* Calculate the next Tx descriptor entry. */
1310 entry = rp->cur_tx % TX_RING_SIZE;
1311
1312 if (skb->len < ETH_ZLEN) {
1313 skb = skb_padto(skb, ETH_ZLEN);
1314 if (skb == NULL)
1315 return 0;
1316 }
1317
1318 rp->tx_skbuff[entry] = skb;
1319
1320 if ((rp->quirks & rqRhineI) &&
1321 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1322 /* Must use alignment buffer. */
1323 if (skb->len > PKT_BUF_SZ) {
1324 /* packet too long, drop it */
1325 dev_kfree_skb(skb);
1326 rp->tx_skbuff[entry] = NULL;
1327 rp->stats.tx_dropped++;
1328 return 0;
1329 }
1330
1331 /* Padding is not copied and so must be redone. */
1332 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1333 if (skb->len < ETH_ZLEN)
1334 memset(rp->tx_buf[entry] + skb->len, 0,
1335 ETH_ZLEN - skb->len);
1336 rp->tx_skbuff_dma[entry] = 0;
1337 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1338 (rp->tx_buf[entry] -
1339 rp->tx_bufs));
1340 } else {
1341 rp->tx_skbuff_dma[entry] =
1342 pci_map_single(rp->pdev, skb->data, skb->len,
1343 PCI_DMA_TODEVICE);
1344 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1345 }
1346
1347 rp->tx_ring[entry].desc_length =
1348 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1349
1350 /* lock eth irq */
1351 spin_lock_irq(&rp->lock);
1352 wmb();
1353 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1354 wmb();
1355
1356 rp->cur_tx++;
1357
1358 /* Non-x86 Todo: explicitly flush cache lines here. */
1359
1360 /* Wake the potentially-idle transmit channel */
1361 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1362 ioaddr + ChipCmd1);
1363 IOSYNC;
1364
1365 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1366 netif_stop_queue(dev);
1367
1368 dev->trans_start = jiffies;
1369
1370 spin_unlock_irq(&rp->lock);
1371
1372 if (debug > 4) {
1373 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1374 dev->name, rp->cur_tx-1, entry);
1375 }
1376 return 0;
1377 }
1378
1379 /* The interrupt handler does all of the Rx thread work and cleans up
1380 after the Tx thread. */
1381 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1382 {
1383 struct net_device *dev = dev_instance;
1384 struct rhine_private *rp = netdev_priv(dev);
1385 void __iomem *ioaddr = rp->base;
1386 u32 intr_status;
1387 int boguscnt = max_interrupt_work;
1388 int handled = 0;
1389
1390 while ((intr_status = get_intr_status(dev))) {
1391 handled = 1;
1392
1393 /* Acknowledge all of the current interrupt sources ASAP. */
1394 if (intr_status & IntrTxDescRace)
1395 iowrite8(0x08, ioaddr + IntrStatus2);
1396 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1397 IOSYNC;
1398
1399 if (debug > 4)
1400 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1401 dev->name, intr_status);
1402
1403 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1404 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1405 rhine_rx(dev);
1406
1407 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1408 if (intr_status & IntrTxErrSummary) {
1409 /* Avoid scavenging before Tx engine turned off */
1410 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1411 if (debug > 2 &&
1412 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1413 printk(KERN_WARNING "%s: "
1414 "rhine_interrupt() Tx engine"
1415 "still on.\n", dev->name);
1416 }
1417 rhine_tx(dev);
1418 }
1419
1420 /* Abnormal error summary/uncommon events handlers. */
1421 if (intr_status & (IntrPCIErr | IntrLinkChange |
1422 IntrStatsMax | IntrTxError | IntrTxAborted |
1423 IntrTxUnderrun | IntrTxDescRace))
1424 rhine_error(dev, intr_status);
1425
1426 if (--boguscnt < 0) {
1427 printk(KERN_WARNING "%s: Too much work at interrupt, "
1428 "status=%#8.8x.\n",
1429 dev->name, intr_status);
1430 break;
1431 }
1432 }
1433
1434 if (debug > 3)
1435 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1436 dev->name, ioread16(ioaddr + IntrStatus));
1437 return IRQ_RETVAL(handled);
1438 }
1439
1440 /* This routine is logically part of the interrupt handler, but isolated
1441 for clarity. */
1442 static void rhine_tx(struct net_device *dev)
1443 {
1444 struct rhine_private *rp = netdev_priv(dev);
1445 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1446
1447 spin_lock(&rp->lock);
1448
1449 /* find and cleanup dirty tx descriptors */
1450 while (rp->dirty_tx != rp->cur_tx) {
1451 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1452 if (debug > 6)
1453 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1454 entry, txstatus);
1455 if (txstatus & DescOwn)
1456 break;
1457 if (txstatus & 0x8000) {
1458 if (debug > 1)
1459 printk(KERN_DEBUG "%s: Transmit error, "
1460 "Tx status %8.8x.\n",
1461 dev->name, txstatus);
1462 rp->stats.tx_errors++;
1463 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1464 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1465 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1466 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1467 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1468 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1469 rp->stats.tx_fifo_errors++;
1470 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1471 break; /* Keep the skb - we try again */
1472 }
1473 /* Transmitter restarted in 'abnormal' handler. */
1474 } else {
1475 if (rp->quirks & rqRhineI)
1476 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1477 else
1478 rp->stats.collisions += txstatus & 0x0F;
1479 if (debug > 6)
1480 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1481 (txstatus >> 3) & 0xF,
1482 txstatus & 0xF);
1483 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1484 rp->stats.tx_packets++;
1485 }
1486 /* Free the original skb. */
1487 if (rp->tx_skbuff_dma[entry]) {
1488 pci_unmap_single(rp->pdev,
1489 rp->tx_skbuff_dma[entry],
1490 rp->tx_skbuff[entry]->len,
1491 PCI_DMA_TODEVICE);
1492 }
1493 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1494 rp->tx_skbuff[entry] = NULL;
1495 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1496 }
1497 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1498 netif_wake_queue(dev);
1499
1500 spin_unlock(&rp->lock);
1501 }
1502
1503 /* This routine is logically part of the interrupt handler, but isolated
1504 for clarity and better register allocation. */
1505 static void rhine_rx(struct net_device *dev)
1506 {
1507 struct rhine_private *rp = netdev_priv(dev);
1508 int entry = rp->cur_rx % RX_RING_SIZE;
1509 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1510
1511 if (debug > 4) {
1512 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1513 dev->name, entry,
1514 le32_to_cpu(rp->rx_head_desc->rx_status));
1515 }
1516
1517 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1518 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1519 struct rx_desc *desc = rp->rx_head_desc;
1520 u32 desc_status = le32_to_cpu(desc->rx_status);
1521 int data_size = desc_status >> 16;
1522
1523 if (debug > 4)
1524 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1525 desc_status);
1526 if (--boguscnt < 0)
1527 break;
1528 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1529 if ((desc_status & RxWholePkt) != RxWholePkt) {
1530 printk(KERN_WARNING "%s: Oversized Ethernet "
1531 "frame spanned multiple buffers, entry "
1532 "%#x length %d status %8.8x!\n",
1533 dev->name, entry, data_size,
1534 desc_status);
1535 printk(KERN_WARNING "%s: Oversized Ethernet "
1536 "frame %p vs %p.\n", dev->name,
1537 rp->rx_head_desc, &rp->rx_ring[entry]);
1538 rp->stats.rx_length_errors++;
1539 } else if (desc_status & RxErr) {
1540 /* There was a error. */
1541 if (debug > 2)
1542 printk(KERN_DEBUG "rhine_rx() Rx "
1543 "error was %8.8x.\n",
1544 desc_status);
1545 rp->stats.rx_errors++;
1546 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1547 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1548 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1549 if (desc_status & 0x0002) {
1550 /* this can also be updated outside the interrupt handler */
1551 spin_lock(&rp->lock);
1552 rp->stats.rx_crc_errors++;
1553 spin_unlock(&rp->lock);
1554 }
1555 }
1556 } else {
1557 struct sk_buff *skb;
1558 /* Length should omit the CRC */
1559 int pkt_len = data_size - 4;
1560
1561 /* Check if the packet is long enough to accept without
1562 copying to a minimally-sized skbuff. */
1563 if (pkt_len < rx_copybreak &&
1564 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1565 skb->dev = dev;
1566 skb_reserve(skb, 2); /* 16 byte align the IP header */
1567 pci_dma_sync_single_for_cpu(rp->pdev,
1568 rp->rx_skbuff_dma[entry],
1569 rp->rx_buf_sz,
1570 PCI_DMA_FROMDEVICE);
1571
1572 eth_copy_and_sum(skb,
1573 rp->rx_skbuff[entry]->data,
1574 pkt_len, 0);
1575 skb_put(skb, pkt_len);
1576 pci_dma_sync_single_for_device(rp->pdev,
1577 rp->rx_skbuff_dma[entry],
1578 rp->rx_buf_sz,
1579 PCI_DMA_FROMDEVICE);
1580 } else {
1581 skb = rp->rx_skbuff[entry];
1582 if (skb == NULL) {
1583 printk(KERN_ERR "%s: Inconsistent Rx "
1584 "descriptor chain.\n",
1585 dev->name);
1586 break;
1587 }
1588 rp->rx_skbuff[entry] = NULL;
1589 skb_put(skb, pkt_len);
1590 pci_unmap_single(rp->pdev,
1591 rp->rx_skbuff_dma[entry],
1592 rp->rx_buf_sz,
1593 PCI_DMA_FROMDEVICE);
1594 }
1595 skb->protocol = eth_type_trans(skb, dev);
1596 netif_rx(skb);
1597 dev->last_rx = jiffies;
1598 rp->stats.rx_bytes += pkt_len;
1599 rp->stats.rx_packets++;
1600 }
1601 entry = (++rp->cur_rx) % RX_RING_SIZE;
1602 rp->rx_head_desc = &rp->rx_ring[entry];
1603 }
1604
1605 /* Refill the Rx ring buffers. */
1606 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1607 struct sk_buff *skb;
1608 entry = rp->dirty_rx % RX_RING_SIZE;
1609 if (rp->rx_skbuff[entry] == NULL) {
1610 skb = dev_alloc_skb(rp->rx_buf_sz);
1611 rp->rx_skbuff[entry] = skb;
1612 if (skb == NULL)
1613 break; /* Better luck next round. */
1614 skb->dev = dev; /* Mark as being used by this device. */
1615 rp->rx_skbuff_dma[entry] =
1616 pci_map_single(rp->pdev, skb->data,
1617 rp->rx_buf_sz,
1618 PCI_DMA_FROMDEVICE);
1619 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1620 }
1621 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1622 }
1623 }
1624
1625 /*
1626 * Clears the "tally counters" for CRC errors and missed frames(?).
1627 * It has been reported that some chips need a write of 0 to clear
1628 * these, for others the counters are set to 1 when written to and
1629 * instead cleared when read. So we clear them both ways ...
1630 */
1631 static inline void clear_tally_counters(void __iomem *ioaddr)
1632 {
1633 iowrite32(0, ioaddr + RxMissed);
1634 ioread16(ioaddr + RxCRCErrs);
1635 ioread16(ioaddr + RxMissed);
1636 }
1637
1638 static void rhine_restart_tx(struct net_device *dev) {
1639 struct rhine_private *rp = netdev_priv(dev);
1640 void __iomem *ioaddr = rp->base;
1641 int entry = rp->dirty_tx % TX_RING_SIZE;
1642 u32 intr_status;
1643
1644 /*
1645 * If new errors occured, we need to sort them out before doing Tx.
1646 * In that case the ISR will be back here RSN anyway.
1647 */
1648 intr_status = get_intr_status(dev);
1649
1650 if ((intr_status & IntrTxErrSummary) == 0) {
1651
1652 /* We know better than the chip where it should continue. */
1653 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1654 ioaddr + TxRingPtr);
1655
1656 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1657 ioaddr + ChipCmd);
1658 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1659 ioaddr + ChipCmd1);
1660 IOSYNC;
1661 }
1662 else {
1663 /* This should never happen */
1664 if (debug > 1)
1665 printk(KERN_WARNING "%s: rhine_restart_tx() "
1666 "Another error occured %8.8x.\n",
1667 dev->name, intr_status);
1668 }
1669
1670 }
1671
1672 static void rhine_error(struct net_device *dev, int intr_status)
1673 {
1674 struct rhine_private *rp = netdev_priv(dev);
1675 void __iomem *ioaddr = rp->base;
1676
1677 spin_lock(&rp->lock);
1678
1679 if (intr_status & IntrLinkChange)
1680 schedule_work(&rp->check_media_task);
1681 if (intr_status & IntrStatsMax) {
1682 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1683 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1684 clear_tally_counters(ioaddr);
1685 }
1686 if (intr_status & IntrTxAborted) {
1687 if (debug > 1)
1688 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1689 dev->name, intr_status);
1690 }
1691 if (intr_status & IntrTxUnderrun) {
1692 if (rp->tx_thresh < 0xE0)
1693 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1694 if (debug > 1)
1695 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1696 "threshold now %2.2x.\n",
1697 dev->name, rp->tx_thresh);
1698 }
1699 if (intr_status & IntrTxDescRace) {
1700 if (debug > 2)
1701 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1702 dev->name);
1703 }
1704 if ((intr_status & IntrTxError) &&
1705 (intr_status & (IntrTxAborted |
1706 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1707 if (rp->tx_thresh < 0xE0) {
1708 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1709 }
1710 if (debug > 1)
1711 printk(KERN_INFO "%s: Unspecified error. Tx "
1712 "threshold now %2.2x.\n",
1713 dev->name, rp->tx_thresh);
1714 }
1715 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1716 IntrTxError))
1717 rhine_restart_tx(dev);
1718
1719 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1720 IntrTxError | IntrTxAborted | IntrNormalSummary |
1721 IntrTxDescRace)) {
1722 if (debug > 1)
1723 printk(KERN_ERR "%s: Something Wicked happened! "
1724 "%8.8x.\n", dev->name, intr_status);
1725 }
1726
1727 spin_unlock(&rp->lock);
1728 }
1729
1730 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1731 {
1732 struct rhine_private *rp = netdev_priv(dev);
1733 void __iomem *ioaddr = rp->base;
1734 unsigned long flags;
1735
1736 spin_lock_irqsave(&rp->lock, flags);
1737 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1738 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1739 clear_tally_counters(ioaddr);
1740 spin_unlock_irqrestore(&rp->lock, flags);
1741
1742 return &rp->stats;
1743 }
1744
1745 static void rhine_set_rx_mode(struct net_device *dev)
1746 {
1747 struct rhine_private *rp = netdev_priv(dev);
1748 void __iomem *ioaddr = rp->base;
1749 u32 mc_filter[2]; /* Multicast hash filter */
1750 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1751
1752 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1753 /* Unconditionally log net taps. */
1754 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1755 dev->name);
1756 rx_mode = 0x1C;
1757 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1758 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1759 } else if ((dev->mc_count > multicast_filter_limit)
1760 || (dev->flags & IFF_ALLMULTI)) {
1761 /* Too many to match, or accept all multicasts. */
1762 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1763 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1764 rx_mode = 0x0C;
1765 } else {
1766 struct dev_mc_list *mclist;
1767 int i;
1768 memset(mc_filter, 0, sizeof(mc_filter));
1769 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1770 i++, mclist = mclist->next) {
1771 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1772
1773 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1774 }
1775 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1776 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1777 rx_mode = 0x0C;
1778 }
1779 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1780 }
1781
1782 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1783 {
1784 struct rhine_private *rp = netdev_priv(dev);
1785
1786 strcpy(info->driver, DRV_NAME);
1787 strcpy(info->version, DRV_VERSION);
1788 strcpy(info->bus_info, pci_name(rp->pdev));
1789 }
1790
1791 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1792 {
1793 struct rhine_private *rp = netdev_priv(dev);
1794 int rc;
1795
1796 spin_lock_irq(&rp->lock);
1797 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1798 spin_unlock_irq(&rp->lock);
1799
1800 return rc;
1801 }
1802
1803 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1804 {
1805 struct rhine_private *rp = netdev_priv(dev);
1806 int rc;
1807
1808 spin_lock_irq(&rp->lock);
1809 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1810 spin_unlock_irq(&rp->lock);
1811 rhine_set_carrier(&rp->mii_if);
1812
1813 return rc;
1814 }
1815
1816 static int netdev_nway_reset(struct net_device *dev)
1817 {
1818 struct rhine_private *rp = netdev_priv(dev);
1819
1820 return mii_nway_restart(&rp->mii_if);
1821 }
1822
1823 static u32 netdev_get_link(struct net_device *dev)
1824 {
1825 struct rhine_private *rp = netdev_priv(dev);
1826
1827 return mii_link_ok(&rp->mii_if);
1828 }
1829
1830 static u32 netdev_get_msglevel(struct net_device *dev)
1831 {
1832 return debug;
1833 }
1834
1835 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1836 {
1837 debug = value;
1838 }
1839
1840 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1841 {
1842 struct rhine_private *rp = netdev_priv(dev);
1843
1844 if (!(rp->quirks & rqWOL))
1845 return;
1846
1847 spin_lock_irq(&rp->lock);
1848 wol->supported = WAKE_PHY | WAKE_MAGIC |
1849 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1850 wol->wolopts = rp->wolopts;
1851 spin_unlock_irq(&rp->lock);
1852 }
1853
1854 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1855 {
1856 struct rhine_private *rp = netdev_priv(dev);
1857 u32 support = WAKE_PHY | WAKE_MAGIC |
1858 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1859
1860 if (!(rp->quirks & rqWOL))
1861 return -EINVAL;
1862
1863 if (wol->wolopts & ~support)
1864 return -EINVAL;
1865
1866 spin_lock_irq(&rp->lock);
1867 rp->wolopts = wol->wolopts;
1868 spin_unlock_irq(&rp->lock);
1869
1870 return 0;
1871 }
1872
1873 static struct ethtool_ops netdev_ethtool_ops = {
1874 .get_drvinfo = netdev_get_drvinfo,
1875 .get_settings = netdev_get_settings,
1876 .set_settings = netdev_set_settings,
1877 .nway_reset = netdev_nway_reset,
1878 .get_link = netdev_get_link,
1879 .get_msglevel = netdev_get_msglevel,
1880 .set_msglevel = netdev_set_msglevel,
1881 .get_wol = rhine_get_wol,
1882 .set_wol = rhine_set_wol,
1883 .get_sg = ethtool_op_get_sg,
1884 .get_tx_csum = ethtool_op_get_tx_csum,
1885 .get_perm_addr = ethtool_op_get_perm_addr,
1886 };
1887
1888 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1889 {
1890 struct rhine_private *rp = netdev_priv(dev);
1891 int rc;
1892
1893 if (!netif_running(dev))
1894 return -EINVAL;
1895
1896 spin_lock_irq(&rp->lock);
1897 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1898 spin_unlock_irq(&rp->lock);
1899 rhine_set_carrier(&rp->mii_if);
1900
1901 return rc;
1902 }
1903
1904 static int rhine_close(struct net_device *dev)
1905 {
1906 struct rhine_private *rp = netdev_priv(dev);
1907 void __iomem *ioaddr = rp->base;
1908
1909 spin_lock_irq(&rp->lock);
1910
1911 netif_stop_queue(dev);
1912
1913 if (debug > 1)
1914 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1915 "status was %4.4x.\n",
1916 dev->name, ioread16(ioaddr + ChipCmd));
1917
1918 /* Switch to loopback mode to avoid hardware races. */
1919 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1920
1921 /* Disable interrupts by clearing the interrupt mask. */
1922 iowrite16(0x0000, ioaddr + IntrEnable);
1923
1924 /* Stop the chip's Tx and Rx processes. */
1925 iowrite16(CmdStop, ioaddr + ChipCmd);
1926
1927 spin_unlock_irq(&rp->lock);
1928
1929 free_irq(rp->pdev->irq, dev);
1930
1931 flush_scheduled_work();
1932
1933 free_rbufs(dev);
1934 free_tbufs(dev);
1935 free_ring(dev);
1936
1937 return 0;
1938 }
1939
1940
1941 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1942 {
1943 struct net_device *dev = pci_get_drvdata(pdev);
1944 struct rhine_private *rp = netdev_priv(dev);
1945
1946 unregister_netdev(dev);
1947
1948 pci_iounmap(pdev, rp->base);
1949 pci_release_regions(pdev);
1950
1951 free_netdev(dev);
1952 pci_disable_device(pdev);
1953 pci_set_drvdata(pdev, NULL);
1954 }
1955
1956 static void rhine_shutdown (struct pci_dev *pdev)
1957 {
1958 struct net_device *dev = pci_get_drvdata(pdev);
1959 struct rhine_private *rp = netdev_priv(dev);
1960 void __iomem *ioaddr = rp->base;
1961
1962 if (!(rp->quirks & rqWOL))
1963 return; /* Nothing to do for non-WOL adapters */
1964
1965 rhine_power_init(dev);
1966
1967 /* Make sure we use pattern 0, 1 and not 4, 5 */
1968 if (rp->quirks & rq6patterns)
1969 iowrite8(0x04, ioaddr + 0xA7);
1970
1971 if (rp->wolopts & WAKE_MAGIC) {
1972 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1973 /*
1974 * Turn EEPROM-controlled wake-up back on -- some hardware may
1975 * not cooperate otherwise.
1976 */
1977 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1978 }
1979
1980 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1981 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1982
1983 if (rp->wolopts & WAKE_PHY)
1984 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1985
1986 if (rp->wolopts & WAKE_UCAST)
1987 iowrite8(WOLucast, ioaddr + WOLcrSet);
1988
1989 if (rp->wolopts) {
1990 /* Enable legacy WOL (for old motherboards) */
1991 iowrite8(0x01, ioaddr + PwcfgSet);
1992 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1993 }
1994
1995 /* Hit power state D3 (sleep) */
1996 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1997
1998 /* TODO: Check use of pci_enable_wake() */
1999
2000 }
2001
2002 #ifdef CONFIG_PM
2003 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2004 {
2005 struct net_device *dev = pci_get_drvdata(pdev);
2006 struct rhine_private *rp = netdev_priv(dev);
2007 unsigned long flags;
2008
2009 if (!netif_running(dev))
2010 return 0;
2011
2012 netif_device_detach(dev);
2013 pci_save_state(pdev);
2014
2015 spin_lock_irqsave(&rp->lock, flags);
2016 rhine_shutdown(pdev);
2017 spin_unlock_irqrestore(&rp->lock, flags);
2018
2019 free_irq(dev->irq, dev);
2020 return 0;
2021 }
2022
2023 static int rhine_resume(struct pci_dev *pdev)
2024 {
2025 struct net_device *dev = pci_get_drvdata(pdev);
2026 struct rhine_private *rp = netdev_priv(dev);
2027 unsigned long flags;
2028 int ret;
2029
2030 if (!netif_running(dev))
2031 return 0;
2032
2033 if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
2034 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
2035
2036 ret = pci_set_power_state(pdev, PCI_D0);
2037 if (debug > 1)
2038 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
2039 dev->name, ret ? "failed" : "succeeded", ret);
2040
2041 pci_restore_state(pdev);
2042
2043 spin_lock_irqsave(&rp->lock, flags);
2044 #ifdef USE_MMIO
2045 enable_mmio(rp->pioaddr, rp->quirks);
2046 #endif
2047 rhine_power_init(dev);
2048 free_tbufs(dev);
2049 free_rbufs(dev);
2050 alloc_tbufs(dev);
2051 alloc_rbufs(dev);
2052 init_registers(dev);
2053 spin_unlock_irqrestore(&rp->lock, flags);
2054
2055 netif_device_attach(dev);
2056
2057 return 0;
2058 }
2059 #endif /* CONFIG_PM */
2060
2061 static struct pci_driver rhine_driver = {
2062 .name = DRV_NAME,
2063 .id_table = rhine_pci_tbl,
2064 .probe = rhine_init_one,
2065 .remove = __devexit_p(rhine_remove_one),
2066 #ifdef CONFIG_PM
2067 .suspend = rhine_suspend,
2068 .resume = rhine_resume,
2069 #endif /* CONFIG_PM */
2070 .shutdown = rhine_shutdown,
2071 };
2072
2073
2074 static int __init rhine_init(void)
2075 {
2076 /* when a module, this is printed whether or not devices are found in probe */
2077 #ifdef MODULE
2078 printk(version);
2079 #endif
2080 return pci_module_init(&rhine_driver);
2081 }
2082
2083
2084 static void __exit rhine_cleanup(void)
2085 {
2086 pci_unregister_driver(&rhine_driver);
2087 }
2088
2089
2090 module_init(rhine_init);
2091 module_exit(rhine_cleanup);
This page took 0.072977 seconds and 5 git commands to generate.