Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ |
2 | /* | |
3 | Written 1998-2001 by Donald Becker. | |
4 | ||
5 | Current Maintainer: Roger Luethi <rl@hellgate.ch> | |
6 | ||
7 | This software may be used and distributed according to the terms of | |
8 | the GNU General Public License (GPL), incorporated herein by reference. | |
9 | Drivers based on or derived from this code fall under the GPL and must | |
10 | retain the authorship, copyright and license notice. This file is not | |
11 | a complete program and may only be used when the entire operating | |
12 | system is licensed under the GPL. | |
13 | ||
14 | This driver is designed for the VIA VT86C100A Rhine-I. | |
15 | It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM | |
16 | and management NIC 6105M). | |
17 | ||
18 | The author may be reached as becker@scyld.com, or C/O | |
19 | Scyld Computing Corporation | |
20 | 410 Severn Ave., Suite 210 | |
21 | Annapolis MD 21403 | |
22 | ||
23 | ||
24 | This driver contains some changes from the original Donald Becker | |
25 | version. He may or may not be interested in bug reports on this | |
26 | code. You can find his versions at: | |
27 | http://www.scyld.com/network/via-rhine.html | |
03a8c661 | 28 | [link no longer provides useful info -jgarzik] |
1da177e4 LT |
29 | |
30 | */ | |
31 | ||
32 | #define DRV_NAME "via-rhine" | |
e84df485 RL |
33 | #define DRV_VERSION "1.4.3" |
34 | #define DRV_RELDATE "2007-03-06" | |
1da177e4 LT |
35 | |
36 | ||
37 | /* A few user-configurable values. | |
38 | These may be modified when a driver module is loaded. */ | |
39 | ||
40 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | |
41 | static int max_interrupt_work = 20; | |
42 | ||
43 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | |
44 | Setting to > 1518 effectively disables this feature. */ | |
8e95a202 JP |
45 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ |
46 | defined(CONFIG_SPARC) || defined(__ia64__) || \ | |
47 | defined(__sh__) || defined(__mips__) | |
b47157f0 DM |
48 | static int rx_copybreak = 1518; |
49 | #else | |
1da177e4 | 50 | static int rx_copybreak; |
b47157f0 | 51 | #endif |
1da177e4 | 52 | |
b933b4d9 RL |
53 | /* Work-around for broken BIOSes: they are unable to get the chip back out of |
54 | power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ | |
55 | static int avoid_D3; | |
56 | ||
1da177e4 LT |
57 | /* |
58 | * In case you are looking for 'options[]' or 'full_duplex[]', they | |
59 | * are gone. Use ethtool(8) instead. | |
60 | */ | |
61 | ||
62 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | |
63 | The Rhine has a 64 element 8390-like hash table. */ | |
64 | static const int multicast_filter_limit = 32; | |
65 | ||
66 | ||
67 | /* Operational parameters that are set at compile time. */ | |
68 | ||
69 | /* Keep the ring sizes a power of two for compile efficiency. | |
70 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | |
71 | Making the Tx ring too large decreases the effectiveness of channel | |
72 | bonding and packet priority. | |
73 | There are no ill effects from too-large receive rings. */ | |
74 | #define TX_RING_SIZE 16 | |
75 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ | |
633949a1 | 76 | #define RX_RING_SIZE 64 |
1da177e4 LT |
77 | |
78 | /* Operational parameters that usually are not changed. */ | |
79 | ||
80 | /* Time in jiffies before concluding the transmitter is hung. */ | |
81 | #define TX_TIMEOUT (2*HZ) | |
82 | ||
83 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | |
84 | ||
85 | #include <linux/module.h> | |
86 | #include <linux/moduleparam.h> | |
87 | #include <linux/kernel.h> | |
88 | #include <linux/string.h> | |
89 | #include <linux/timer.h> | |
90 | #include <linux/errno.h> | |
91 | #include <linux/ioport.h> | |
92 | #include <linux/slab.h> | |
93 | #include <linux/interrupt.h> | |
94 | #include <linux/pci.h> | |
1e7f0bd8 | 95 | #include <linux/dma-mapping.h> |
1da177e4 LT |
96 | #include <linux/netdevice.h> |
97 | #include <linux/etherdevice.h> | |
98 | #include <linux/skbuff.h> | |
99 | #include <linux/init.h> | |
100 | #include <linux/delay.h> | |
101 | #include <linux/mii.h> | |
102 | #include <linux/ethtool.h> | |
103 | #include <linux/crc32.h> | |
104 | #include <linux/bitops.h> | |
c0d7a021 | 105 | #include <linux/workqueue.h> |
1da177e4 LT |
106 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
107 | #include <asm/io.h> | |
108 | #include <asm/irq.h> | |
109 | #include <asm/uaccess.h> | |
e84df485 | 110 | #include <linux/dmi.h> |
1da177e4 LT |
111 | |
112 | /* These identify the driver base version and may not be removed. */ | |
c8de1fce SH |
113 | static const char version[] __devinitconst = |
114 | KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE | |
115 | " Written by Donald Becker\n"; | |
1da177e4 LT |
116 | |
117 | /* This driver was written to use PCI memory space. Some early versions | |
118 | of the Rhine may only work correctly with I/O space accesses. */ | |
119 | #ifdef CONFIG_VIA_RHINE_MMIO | |
120 | #define USE_MMIO | |
121 | #else | |
122 | #endif | |
123 | ||
124 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | |
125 | MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); | |
126 | MODULE_LICENSE("GPL"); | |
127 | ||
128 | module_param(max_interrupt_work, int, 0); | |
129 | module_param(debug, int, 0); | |
130 | module_param(rx_copybreak, int, 0); | |
b933b4d9 | 131 | module_param(avoid_D3, bool, 0); |
1da177e4 LT |
132 | MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); |
133 | MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); | |
134 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); | |
b933b4d9 | 135 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); |
1da177e4 LT |
136 | |
137 | /* | |
138 | Theory of Operation | |
139 | ||
140 | I. Board Compatibility | |
141 | ||
142 | This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet | |
143 | controller. | |
144 | ||
145 | II. Board-specific settings | |
146 | ||
147 | Boards with this chip are functional only in a bus-master PCI slot. | |
148 | ||
149 | Many operational settings are loaded from the EEPROM to the Config word at | |
150 | offset 0x78. For most of these settings, this driver assumes that they are | |
151 | correct. | |
152 | If this driver is compiled to use PCI memory space operations the EEPROM | |
153 | must be configured to enable memory ops. | |
154 | ||
155 | III. Driver operation | |
156 | ||
157 | IIIa. Ring buffers | |
158 | ||
159 | This driver uses two statically allocated fixed-size descriptor lists | |
160 | formed into rings by a branch from the final descriptor to the beginning of | |
161 | the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. | |
162 | ||
163 | IIIb/c. Transmit/Receive Structure | |
164 | ||
165 | This driver attempts to use a zero-copy receive and transmit scheme. | |
166 | ||
167 | Alas, all data buffers are required to start on a 32 bit boundary, so | |
168 | the driver must often copy transmit packets into bounce buffers. | |
169 | ||
170 | The driver allocates full frame size skbuffs for the Rx ring buffers at | |
171 | open() time and passes the skb->data field to the chip as receive data | |
172 | buffers. When an incoming frame is less than RX_COPYBREAK bytes long, | |
173 | a fresh skbuff is allocated and the frame is copied to the new skbuff. | |
174 | When the incoming frame is larger, the skbuff is passed directly up the | |
175 | protocol stack. Buffers consumed this way are replaced by newly allocated | |
176 | skbuffs in the last phase of rhine_rx(). | |
177 | ||
178 | The RX_COPYBREAK value is chosen to trade-off the memory wasted by | |
179 | using a full-sized skbuff for small frames vs. the copying costs of larger | |
180 | frames. New boards are typically used in generously configured machines | |
181 | and the underfilled buffers have negligible impact compared to the benefit of | |
182 | a single allocation size, so the default value of zero results in never | |
183 | copying packets. When copying is done, the cost is usually mitigated by using | |
184 | a combined copy/checksum routine. Copying also preloads the cache, which is | |
185 | most useful with small frames. | |
186 | ||
187 | Since the VIA chips are only able to transfer data to buffers on 32 bit | |
188 | boundaries, the IP header at offset 14 in an ethernet frame isn't | |
189 | longword aligned for further processing. Copying these unaligned buffers | |
190 | has the beneficial effect of 16-byte aligning the IP header. | |
191 | ||
192 | IIId. Synchronization | |
193 | ||
194 | The driver runs as two independent, single-threaded flows of control. One | |
195 | is the send-packet routine, which enforces single-threaded use by the | |
b74ca3a8 WC |
196 | netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, |
197 | which is single threaded by the hardware and interrupt handling software. | |
1da177e4 LT |
198 | |
199 | The send packet thread has partial control over the Tx ring. It locks the | |
b74ca3a8 WC |
200 | netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in |
201 | the ring is not available it stops the transmit queue by | |
202 | calling netif_stop_queue. | |
1da177e4 LT |
203 | |
204 | The interrupt handler has exclusive control over the Rx ring and records stats | |
205 | from the Tx ring. After reaping the stats, it marks the Tx queue entry as | |
206 | empty by incrementing the dirty_tx mark. If at least half of the entries in | |
207 | the Rx ring are available the transmit queue is woken up if it was stopped. | |
208 | ||
209 | IV. Notes | |
210 | ||
211 | IVb. References | |
212 | ||
213 | Preliminary VT86C100A manual from http://www.via.com.tw/ | |
214 | http://www.scyld.com/expert/100mbps.html | |
215 | http://www.scyld.com/expert/NWay.html | |
216 | ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf | |
217 | ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF | |
218 | ||
219 | ||
220 | IVc. Errata | |
221 | ||
222 | The VT86C100A manual is not reliable information. | |
223 | The 3043 chip does not handle unaligned transmit or receive buffers, resulting | |
224 | in significant performance degradation for bounce buffer copies on transmit | |
225 | and unaligned IP headers on receive. | |
226 | The chip does not pad to minimum transmit length. | |
227 | ||
228 | */ | |
229 | ||
230 | ||
231 | /* This table drives the PCI probe routines. It's mostly boilerplate in all | |
232 | of the drivers, and will likely be provided by some future kernel. | |
233 | Note the matching code -- the first table entry matchs all 56** cards but | |
234 | second only the 1234 card. | |
235 | */ | |
236 | ||
237 | enum rhine_revs { | |
238 | VT86C100A = 0x00, | |
239 | VTunknown0 = 0x20, | |
240 | VT6102 = 0x40, | |
241 | VT8231 = 0x50, /* Integrated MAC */ | |
242 | VT8233 = 0x60, /* Integrated MAC */ | |
243 | VT8235 = 0x74, /* Integrated MAC */ | |
244 | VT8237 = 0x78, /* Integrated MAC */ | |
245 | VTunknown1 = 0x7C, | |
246 | VT6105 = 0x80, | |
247 | VT6105_B0 = 0x83, | |
248 | VT6105L = 0x8A, | |
249 | VT6107 = 0x8C, | |
250 | VTunknown2 = 0x8E, | |
251 | VT6105M = 0x90, /* Management adapter */ | |
252 | }; | |
253 | ||
254 | enum rhine_quirks { | |
255 | rqWOL = 0x0001, /* Wake-On-LAN support */ | |
256 | rqForceReset = 0x0002, | |
257 | rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ | |
258 | rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ | |
259 | rqRhineI = 0x0100, /* See comment below */ | |
260 | }; | |
261 | /* | |
262 | * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable | |
263 | * MMIO as well as for the collision counter and the Tx FIFO underflow | |
264 | * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. | |
265 | */ | |
266 | ||
267 | /* Beware of PCI posted writes */ | |
268 | #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) | |
269 | ||
a3aa1884 | 270 | static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = { |
46009c8b JG |
271 | { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ |
272 | { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ | |
273 | { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ | |
274 | { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ | |
1da177e4 LT |
275 | { } /* terminate list */ |
276 | }; | |
277 | MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); | |
278 | ||
279 | ||
280 | /* Offsets to the device registers. */ | |
281 | enum register_offsets { | |
282 | StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, | |
283 | ChipCmd1=0x09, | |
284 | IntrStatus=0x0C, IntrEnable=0x0E, | |
285 | MulticastFilter0=0x10, MulticastFilter1=0x14, | |
286 | RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, | |
287 | MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, | |
288 | MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, | |
289 | ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, | |
290 | RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, | |
291 | StickyHW=0x83, IntrStatus2=0x84, | |
292 | WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, | |
293 | WOLcrClr1=0xA6, WOLcgClr=0xA7, | |
294 | PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, | |
295 | }; | |
296 | ||
297 | /* Bits in ConfigD */ | |
298 | enum backoff_bits { | |
299 | BackOptional=0x01, BackModify=0x02, | |
300 | BackCaptureEffect=0x04, BackRandom=0x08 | |
301 | }; | |
302 | ||
303 | #ifdef USE_MMIO | |
304 | /* Registers we check that mmio and reg are the same. */ | |
305 | static const int mmio_verify_registers[] = { | |
306 | RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, | |
307 | 0 | |
308 | }; | |
309 | #endif | |
310 | ||
311 | /* Bits in the interrupt status/mask registers. */ | |
312 | enum intr_status_bits { | |
313 | IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020, | |
314 | IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210, | |
315 | IntrPCIErr=0x0040, | |
316 | IntrStatsMax=0x0080, IntrRxEarly=0x0100, | |
317 | IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000, | |
318 | IntrTxAborted=0x2000, IntrLinkChange=0x4000, | |
319 | IntrRxWakeUp=0x8000, | |
320 | IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260, | |
321 | IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */ | |
322 | IntrTxErrSummary=0x082218, | |
323 | }; | |
324 | ||
325 | /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ | |
326 | enum wol_bits { | |
327 | WOLucast = 0x10, | |
328 | WOLmagic = 0x20, | |
329 | WOLbmcast = 0x30, | |
330 | WOLlnkon = 0x40, | |
331 | WOLlnkoff = 0x80, | |
332 | }; | |
333 | ||
334 | /* The Rx and Tx buffer descriptors. */ | |
335 | struct rx_desc { | |
53c03f5c AV |
336 | __le32 rx_status; |
337 | __le32 desc_length; /* Chain flag, Buffer/frame length */ | |
338 | __le32 addr; | |
339 | __le32 next_desc; | |
1da177e4 LT |
340 | }; |
341 | struct tx_desc { | |
53c03f5c AV |
342 | __le32 tx_status; |
343 | __le32 desc_length; /* Chain flag, Tx Config, Frame length */ | |
344 | __le32 addr; | |
345 | __le32 next_desc; | |
1da177e4 LT |
346 | }; |
347 | ||
348 | /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ | |
349 | #define TXDESC 0x00e08000 | |
350 | ||
351 | enum rx_status_bits { | |
352 | RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F | |
353 | }; | |
354 | ||
355 | /* Bits in *_desc.*_status */ | |
356 | enum desc_status_bits { | |
357 | DescOwn=0x80000000 | |
358 | }; | |
359 | ||
360 | /* Bits in ChipCmd. */ | |
361 | enum chip_cmd_bits { | |
362 | CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, | |
363 | CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, | |
364 | Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, | |
365 | Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, | |
366 | }; | |
367 | ||
368 | struct rhine_private { | |
369 | /* Descriptor rings */ | |
370 | struct rx_desc *rx_ring; | |
371 | struct tx_desc *tx_ring; | |
372 | dma_addr_t rx_ring_dma; | |
373 | dma_addr_t tx_ring_dma; | |
374 | ||
375 | /* The addresses of receive-in-place skbuffs. */ | |
376 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; | |
377 | dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; | |
378 | ||
379 | /* The saved address of a sent-in-place packet/buffer, for later free(). */ | |
380 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | |
381 | dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; | |
382 | ||
4be5de25 | 383 | /* Tx bounce buffers (Rhine-I only) */ |
1da177e4 LT |
384 | unsigned char *tx_buf[TX_RING_SIZE]; |
385 | unsigned char *tx_bufs; | |
386 | dma_addr_t tx_bufs_dma; | |
387 | ||
388 | struct pci_dev *pdev; | |
389 | long pioaddr; | |
bea3348e SH |
390 | struct net_device *dev; |
391 | struct napi_struct napi; | |
1da177e4 | 392 | spinlock_t lock; |
c0d7a021 | 393 | struct work_struct reset_task; |
1da177e4 LT |
394 | |
395 | /* Frequently used values: keep some adjacent for cache effect. */ | |
396 | u32 quirks; | |
397 | struct rx_desc *rx_head_desc; | |
398 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | |
399 | unsigned int cur_tx, dirty_tx; | |
400 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | |
401 | u8 wolopts; | |
402 | ||
403 | u8 tx_thresh, rx_thresh; | |
404 | ||
405 | struct mii_if_info mii_if; | |
406 | void __iomem *base; | |
407 | }; | |
408 | ||
409 | static int mdio_read(struct net_device *dev, int phy_id, int location); | |
410 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | |
411 | static int rhine_open(struct net_device *dev); | |
c0d7a021 | 412 | static void rhine_reset_task(struct work_struct *work); |
1da177e4 | 413 | static void rhine_tx_timeout(struct net_device *dev); |
61357325 SH |
414 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
415 | struct net_device *dev); | |
7d12e780 | 416 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance); |
1da177e4 | 417 | static void rhine_tx(struct net_device *dev); |
633949a1 | 418 | static int rhine_rx(struct net_device *dev, int limit); |
1da177e4 LT |
419 | static void rhine_error(struct net_device *dev, int intr_status); |
420 | static void rhine_set_rx_mode(struct net_device *dev); | |
421 | static struct net_device_stats *rhine_get_stats(struct net_device *dev); | |
422 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |
7282d491 | 423 | static const struct ethtool_ops netdev_ethtool_ops; |
1da177e4 | 424 | static int rhine_close(struct net_device *dev); |
d18c3db5 | 425 | static void rhine_shutdown (struct pci_dev *pdev); |
1da177e4 LT |
426 | |
427 | #define RHINE_WAIT_FOR(condition) do { \ | |
428 | int i=1024; \ | |
429 | while (!(condition) && --i) \ | |
430 | ; \ | |
431 | if (debug > 1 && i < 512) \ | |
432 | printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \ | |
433 | DRV_NAME, 1024-i, __func__, __LINE__); \ | |
434 | } while(0) | |
435 | ||
436 | static inline u32 get_intr_status(struct net_device *dev) | |
437 | { | |
438 | struct rhine_private *rp = netdev_priv(dev); | |
439 | void __iomem *ioaddr = rp->base; | |
440 | u32 intr_status; | |
441 | ||
442 | intr_status = ioread16(ioaddr + IntrStatus); | |
443 | /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ | |
444 | if (rp->quirks & rqStatusWBRace) | |
445 | intr_status |= ioread8(ioaddr + IntrStatus2) << 16; | |
446 | return intr_status; | |
447 | } | |
448 | ||
449 | /* | |
450 | * Get power related registers into sane state. | |
451 | * Notify user about past WOL event. | |
452 | */ | |
453 | static void rhine_power_init(struct net_device *dev) | |
454 | { | |
455 | struct rhine_private *rp = netdev_priv(dev); | |
456 | void __iomem *ioaddr = rp->base; | |
457 | u16 wolstat; | |
458 | ||
459 | if (rp->quirks & rqWOL) { | |
460 | /* Make sure chip is in power state D0 */ | |
461 | iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); | |
462 | ||
463 | /* Disable "force PME-enable" */ | |
464 | iowrite8(0x80, ioaddr + WOLcgClr); | |
465 | ||
466 | /* Clear power-event config bits (WOL) */ | |
467 | iowrite8(0xFF, ioaddr + WOLcrClr); | |
468 | /* More recent cards can manage two additional patterns */ | |
469 | if (rp->quirks & rq6patterns) | |
470 | iowrite8(0x03, ioaddr + WOLcrClr1); | |
471 | ||
472 | /* Save power-event status bits */ | |
473 | wolstat = ioread8(ioaddr + PwrcsrSet); | |
474 | if (rp->quirks & rq6patterns) | |
475 | wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; | |
476 | ||
477 | /* Clear power-event status bits */ | |
478 | iowrite8(0xFF, ioaddr + PwrcsrClr); | |
479 | if (rp->quirks & rq6patterns) | |
480 | iowrite8(0x03, ioaddr + PwrcsrClr1); | |
481 | ||
482 | if (wolstat) { | |
483 | char *reason; | |
484 | switch (wolstat) { | |
485 | case WOLmagic: | |
486 | reason = "Magic packet"; | |
487 | break; | |
488 | case WOLlnkon: | |
489 | reason = "Link went up"; | |
490 | break; | |
491 | case WOLlnkoff: | |
492 | reason = "Link went down"; | |
493 | break; | |
494 | case WOLucast: | |
495 | reason = "Unicast packet"; | |
496 | break; | |
497 | case WOLbmcast: | |
498 | reason = "Multicast/broadcast packet"; | |
499 | break; | |
500 | default: | |
501 | reason = "Unknown"; | |
502 | } | |
503 | printk(KERN_INFO "%s: Woke system up. Reason: %s.\n", | |
504 | DRV_NAME, reason); | |
505 | } | |
506 | } | |
507 | } | |
508 | ||
509 | static void rhine_chip_reset(struct net_device *dev) | |
510 | { | |
511 | struct rhine_private *rp = netdev_priv(dev); | |
512 | void __iomem *ioaddr = rp->base; | |
513 | ||
514 | iowrite8(Cmd1Reset, ioaddr + ChipCmd1); | |
515 | IOSYNC; | |
516 | ||
517 | if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { | |
518 | printk(KERN_INFO "%s: Reset not complete yet. " | |
519 | "Trying harder.\n", DRV_NAME); | |
520 | ||
521 | /* Force reset */ | |
522 | if (rp->quirks & rqForceReset) | |
523 | iowrite8(0x40, ioaddr + MiscCmd); | |
524 | ||
525 | /* Reset can take somewhat longer (rare) */ | |
526 | RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset)); | |
527 | } | |
528 | ||
529 | if (debug > 1) | |
530 | printk(KERN_INFO "%s: Reset %s.\n", dev->name, | |
531 | (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? | |
532 | "failed" : "succeeded"); | |
533 | } | |
534 | ||
535 | #ifdef USE_MMIO | |
536 | static void enable_mmio(long pioaddr, u32 quirks) | |
537 | { | |
538 | int n; | |
539 | if (quirks & rqRhineI) { | |
540 | /* More recent docs say that this bit is reserved ... */ | |
541 | n = inb(pioaddr + ConfigA) | 0x20; | |
542 | outb(n, pioaddr + ConfigA); | |
543 | } else { | |
544 | n = inb(pioaddr + ConfigD) | 0x80; | |
545 | outb(n, pioaddr + ConfigD); | |
546 | } | |
547 | } | |
548 | #endif | |
549 | ||
550 | /* | |
551 | * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM | |
552 | * (plus 0x6C for Rhine-I/II) | |
553 | */ | |
554 | static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev) | |
555 | { | |
556 | struct rhine_private *rp = netdev_priv(dev); | |
557 | void __iomem *ioaddr = rp->base; | |
558 | ||
559 | outb(0x20, pioaddr + MACRegEEcsr); | |
560 | RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20)); | |
561 | ||
562 | #ifdef USE_MMIO | |
563 | /* | |
564 | * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable | |
565 | * MMIO. If reloading EEPROM was done first this could be avoided, but | |
566 | * it is not known if that still works with the "win98-reboot" problem. | |
567 | */ | |
568 | enable_mmio(pioaddr, rp->quirks); | |
569 | #endif | |
570 | ||
571 | /* Turn off EEPROM-controlled wake-up (magic packet) */ | |
572 | if (rp->quirks & rqWOL) | |
573 | iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); | |
574 | ||
575 | } | |
576 | ||
577 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
578 | static void rhine_poll(struct net_device *dev) | |
579 | { | |
580 | disable_irq(dev->irq); | |
7d12e780 | 581 | rhine_interrupt(dev->irq, (void *)dev); |
1da177e4 LT |
582 | enable_irq(dev->irq); |
583 | } | |
584 | #endif | |
585 | ||
bea3348e | 586 | static int rhine_napipoll(struct napi_struct *napi, int budget) |
633949a1 | 587 | { |
bea3348e SH |
588 | struct rhine_private *rp = container_of(napi, struct rhine_private, napi); |
589 | struct net_device *dev = rp->dev; | |
633949a1 | 590 | void __iomem *ioaddr = rp->base; |
bea3348e | 591 | int work_done; |
633949a1 | 592 | |
bea3348e | 593 | work_done = rhine_rx(dev, budget); |
633949a1 | 594 | |
bea3348e | 595 | if (work_done < budget) { |
288379f0 | 596 | napi_complete(napi); |
633949a1 RL |
597 | |
598 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | |
599 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | |
600 | IntrTxDone | IntrTxError | IntrTxUnderrun | | |
601 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | |
602 | ioaddr + IntrEnable); | |
633949a1 | 603 | } |
bea3348e | 604 | return work_done; |
633949a1 | 605 | } |
633949a1 | 606 | |
de4e7c88 | 607 | static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) |
1da177e4 LT |
608 | { |
609 | struct rhine_private *rp = netdev_priv(dev); | |
610 | ||
611 | /* Reset the chip to erase previous misconfiguration. */ | |
612 | rhine_chip_reset(dev); | |
613 | ||
614 | /* Rhine-I needs extra time to recuperate before EEPROM reload */ | |
615 | if (rp->quirks & rqRhineI) | |
616 | msleep(5); | |
617 | ||
618 | /* Reload EEPROM controlled bytes cleared by soft reset */ | |
619 | rhine_reload_eeprom(pioaddr, dev); | |
620 | } | |
621 | ||
5d1d07d8 SH |
622 | static const struct net_device_ops rhine_netdev_ops = { |
623 | .ndo_open = rhine_open, | |
624 | .ndo_stop = rhine_close, | |
625 | .ndo_start_xmit = rhine_start_tx, | |
626 | .ndo_get_stats = rhine_get_stats, | |
627 | .ndo_set_multicast_list = rhine_set_rx_mode, | |
635ecaa7 | 628 | .ndo_change_mtu = eth_change_mtu, |
5d1d07d8 | 629 | .ndo_validate_addr = eth_validate_addr, |
fe96aaa1 | 630 | .ndo_set_mac_address = eth_mac_addr, |
5d1d07d8 SH |
631 | .ndo_do_ioctl = netdev_ioctl, |
632 | .ndo_tx_timeout = rhine_tx_timeout, | |
633 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
634 | .ndo_poll_controller = rhine_poll, | |
635 | #endif | |
636 | }; | |
637 | ||
1da177e4 LT |
638 | static int __devinit rhine_init_one(struct pci_dev *pdev, |
639 | const struct pci_device_id *ent) | |
640 | { | |
641 | struct net_device *dev; | |
642 | struct rhine_private *rp; | |
643 | int i, rc; | |
1da177e4 LT |
644 | u32 quirks; |
645 | long pioaddr; | |
646 | long memaddr; | |
647 | void __iomem *ioaddr; | |
648 | int io_size, phy_id; | |
649 | const char *name; | |
650 | #ifdef USE_MMIO | |
651 | int bar = 1; | |
652 | #else | |
653 | int bar = 0; | |
654 | #endif | |
655 | ||
656 | /* when built into the kernel, we only print version if device is found */ | |
657 | #ifndef MODULE | |
658 | static int printed_version; | |
659 | if (!printed_version++) | |
660 | printk(version); | |
661 | #endif | |
662 | ||
1da177e4 LT |
663 | io_size = 256; |
664 | phy_id = 0; | |
665 | quirks = 0; | |
666 | name = "Rhine"; | |
44c10138 | 667 | if (pdev->revision < VTunknown0) { |
1da177e4 LT |
668 | quirks = rqRhineI; |
669 | io_size = 128; | |
670 | } | |
44c10138 | 671 | else if (pdev->revision >= VT6102) { |
1da177e4 | 672 | quirks = rqWOL | rqForceReset; |
44c10138 | 673 | if (pdev->revision < VT6105) { |
1da177e4 LT |
674 | name = "Rhine II"; |
675 | quirks |= rqStatusWBRace; /* Rhine-II exclusive */ | |
676 | } | |
677 | else { | |
678 | phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ | |
44c10138 | 679 | if (pdev->revision >= VT6105_B0) |
1da177e4 | 680 | quirks |= rq6patterns; |
44c10138 | 681 | if (pdev->revision < VT6105M) |
1da177e4 LT |
682 | name = "Rhine III"; |
683 | else | |
684 | name = "Rhine III (Management Adapter)"; | |
685 | } | |
686 | } | |
687 | ||
688 | rc = pci_enable_device(pdev); | |
689 | if (rc) | |
690 | goto err_out; | |
691 | ||
692 | /* this should always be supported */ | |
284901a9 | 693 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
1da177e4 LT |
694 | if (rc) { |
695 | printk(KERN_ERR "32-bit PCI DMA addresses not supported by " | |
696 | "the card!?\n"); | |
697 | goto err_out; | |
698 | } | |
699 | ||
700 | /* sanity check */ | |
701 | if ((pci_resource_len(pdev, 0) < io_size) || | |
702 | (pci_resource_len(pdev, 1) < io_size)) { | |
703 | rc = -EIO; | |
704 | printk(KERN_ERR "Insufficient PCI resources, aborting\n"); | |
705 | goto err_out; | |
706 | } | |
707 | ||
708 | pioaddr = pci_resource_start(pdev, 0); | |
709 | memaddr = pci_resource_start(pdev, 1); | |
710 | ||
711 | pci_set_master(pdev); | |
712 | ||
713 | dev = alloc_etherdev(sizeof(struct rhine_private)); | |
714 | if (!dev) { | |
715 | rc = -ENOMEM; | |
716 | printk(KERN_ERR "alloc_etherdev failed\n"); | |
717 | goto err_out; | |
718 | } | |
1da177e4 LT |
719 | SET_NETDEV_DEV(dev, &pdev->dev); |
720 | ||
721 | rp = netdev_priv(dev); | |
bea3348e | 722 | rp->dev = dev; |
1da177e4 LT |
723 | rp->quirks = quirks; |
724 | rp->pioaddr = pioaddr; | |
725 | rp->pdev = pdev; | |
726 | ||
727 | rc = pci_request_regions(pdev, DRV_NAME); | |
728 | if (rc) | |
729 | goto err_out_free_netdev; | |
730 | ||
731 | ioaddr = pci_iomap(pdev, bar, io_size); | |
732 | if (!ioaddr) { | |
733 | rc = -EIO; | |
734 | printk(KERN_ERR "ioremap failed for device %s, region 0x%X " | |
735 | "@ 0x%lX\n", pci_name(pdev), io_size, memaddr); | |
736 | goto err_out_free_res; | |
737 | } | |
738 | ||
739 | #ifdef USE_MMIO | |
740 | enable_mmio(pioaddr, quirks); | |
741 | ||
742 | /* Check that selected MMIO registers match the PIO ones */ | |
743 | i = 0; | |
744 | while (mmio_verify_registers[i]) { | |
745 | int reg = mmio_verify_registers[i++]; | |
746 | unsigned char a = inb(pioaddr+reg); | |
747 | unsigned char b = readb(ioaddr+reg); | |
748 | if (a != b) { | |
749 | rc = -EIO; | |
750 | printk(KERN_ERR "MMIO do not match PIO [%02x] " | |
751 | "(%02x != %02x)\n", reg, a, b); | |
752 | goto err_out_unmap; | |
753 | } | |
754 | } | |
755 | #endif /* USE_MMIO */ | |
756 | ||
757 | dev->base_addr = (unsigned long)ioaddr; | |
758 | rp->base = ioaddr; | |
759 | ||
760 | /* Get chip registers into a sane state */ | |
761 | rhine_power_init(dev); | |
762 | rhine_hw_init(dev, pioaddr); | |
763 | ||
764 | for (i = 0; i < 6; i++) | |
765 | dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); | |
b81e8e1f | 766 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
1da177e4 | 767 | |
b81e8e1f | 768 | if (!is_valid_ether_addr(dev->perm_addr)) { |
1da177e4 LT |
769 | rc = -EIO; |
770 | printk(KERN_ERR "Invalid MAC address\n"); | |
771 | goto err_out_unmap; | |
772 | } | |
773 | ||
774 | /* For Rhine-I/II, phy_id is loaded from EEPROM */ | |
775 | if (!phy_id) | |
776 | phy_id = ioread8(ioaddr + 0x6C); | |
777 | ||
778 | dev->irq = pdev->irq; | |
779 | ||
780 | spin_lock_init(&rp->lock); | |
c0d7a021 JP |
781 | INIT_WORK(&rp->reset_task, rhine_reset_task); |
782 | ||
1da177e4 LT |
783 | rp->mii_if.dev = dev; |
784 | rp->mii_if.mdio_read = mdio_read; | |
785 | rp->mii_if.mdio_write = mdio_write; | |
786 | rp->mii_if.phy_id_mask = 0x1f; | |
787 | rp->mii_if.reg_num_mask = 0x1f; | |
788 | ||
789 | /* The chip-specific entries in the device structure. */ | |
5d1d07d8 SH |
790 | dev->netdev_ops = &rhine_netdev_ops; |
791 | dev->ethtool_ops = &netdev_ethtool_ops, | |
1da177e4 | 792 | dev->watchdog_timeo = TX_TIMEOUT; |
5d1d07d8 | 793 | |
bea3348e | 794 | netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); |
32b0f53e | 795 | |
1da177e4 LT |
796 | if (rp->quirks & rqRhineI) |
797 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; | |
798 | ||
799 | /* dev->name not defined before register_netdev()! */ | |
800 | rc = register_netdev(dev); | |
801 | if (rc) | |
802 | goto err_out_unmap; | |
803 | ||
e174961c | 804 | printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n", |
1da177e4 LT |
805 | dev->name, name, |
806 | #ifdef USE_MMIO | |
0795af57 | 807 | memaddr, |
1da177e4 | 808 | #else |
0795af57 | 809 | (long)ioaddr, |
1da177e4 | 810 | #endif |
e174961c | 811 | dev->dev_addr, pdev->irq); |
1da177e4 LT |
812 | |
813 | pci_set_drvdata(pdev, dev); | |
814 | ||
815 | { | |
816 | u16 mii_cmd; | |
817 | int mii_status = mdio_read(dev, phy_id, 1); | |
818 | mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; | |
819 | mdio_write(dev, phy_id, MII_BMCR, mii_cmd); | |
820 | if (mii_status != 0xffff && mii_status != 0x0000) { | |
821 | rp->mii_if.advertising = mdio_read(dev, phy_id, 4); | |
822 | printk(KERN_INFO "%s: MII PHY found at address " | |
823 | "%d, status 0x%4.4x advertising %4.4x " | |
824 | "Link %4.4x.\n", dev->name, phy_id, | |
825 | mii_status, rp->mii_if.advertising, | |
826 | mdio_read(dev, phy_id, 5)); | |
827 | ||
828 | /* set IFF_RUNNING */ | |
829 | if (mii_status & BMSR_LSTATUS) | |
830 | netif_carrier_on(dev); | |
831 | else | |
832 | netif_carrier_off(dev); | |
833 | ||
834 | } | |
835 | } | |
836 | rp->mii_if.phy_id = phy_id; | |
b933b4d9 RL |
837 | if (debug > 1 && avoid_D3) |
838 | printk(KERN_INFO "%s: No D3 power state at shutdown.\n", | |
839 | dev->name); | |
1da177e4 LT |
840 | |
841 | return 0; | |
842 | ||
843 | err_out_unmap: | |
844 | pci_iounmap(pdev, ioaddr); | |
845 | err_out_free_res: | |
846 | pci_release_regions(pdev); | |
847 | err_out_free_netdev: | |
848 | free_netdev(dev); | |
849 | err_out: | |
850 | return rc; | |
851 | } | |
852 | ||
853 | static int alloc_ring(struct net_device* dev) | |
854 | { | |
855 | struct rhine_private *rp = netdev_priv(dev); | |
856 | void *ring; | |
857 | dma_addr_t ring_dma; | |
858 | ||
859 | ring = pci_alloc_consistent(rp->pdev, | |
860 | RX_RING_SIZE * sizeof(struct rx_desc) + | |
861 | TX_RING_SIZE * sizeof(struct tx_desc), | |
862 | &ring_dma); | |
863 | if (!ring) { | |
864 | printk(KERN_ERR "Could not allocate DMA memory.\n"); | |
865 | return -ENOMEM; | |
866 | } | |
867 | if (rp->quirks & rqRhineI) { | |
868 | rp->tx_bufs = pci_alloc_consistent(rp->pdev, | |
869 | PKT_BUF_SZ * TX_RING_SIZE, | |
870 | &rp->tx_bufs_dma); | |
871 | if (rp->tx_bufs == NULL) { | |
872 | pci_free_consistent(rp->pdev, | |
873 | RX_RING_SIZE * sizeof(struct rx_desc) + | |
874 | TX_RING_SIZE * sizeof(struct tx_desc), | |
875 | ring, ring_dma); | |
876 | return -ENOMEM; | |
877 | } | |
878 | } | |
879 | ||
880 | rp->rx_ring = ring; | |
881 | rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); | |
882 | rp->rx_ring_dma = ring_dma; | |
883 | rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); | |
884 | ||
885 | return 0; | |
886 | } | |
887 | ||
888 | static void free_ring(struct net_device* dev) | |
889 | { | |
890 | struct rhine_private *rp = netdev_priv(dev); | |
891 | ||
892 | pci_free_consistent(rp->pdev, | |
893 | RX_RING_SIZE * sizeof(struct rx_desc) + | |
894 | TX_RING_SIZE * sizeof(struct tx_desc), | |
895 | rp->rx_ring, rp->rx_ring_dma); | |
896 | rp->tx_ring = NULL; | |
897 | ||
898 | if (rp->tx_bufs) | |
899 | pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, | |
900 | rp->tx_bufs, rp->tx_bufs_dma); | |
901 | ||
902 | rp->tx_bufs = NULL; | |
903 | ||
904 | } | |
905 | ||
906 | static void alloc_rbufs(struct net_device *dev) | |
907 | { | |
908 | struct rhine_private *rp = netdev_priv(dev); | |
909 | dma_addr_t next; | |
910 | int i; | |
911 | ||
912 | rp->dirty_rx = rp->cur_rx = 0; | |
913 | ||
914 | rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); | |
915 | rp->rx_head_desc = &rp->rx_ring[0]; | |
916 | next = rp->rx_ring_dma; | |
917 | ||
918 | /* Init the ring entries */ | |
919 | for (i = 0; i < RX_RING_SIZE; i++) { | |
920 | rp->rx_ring[i].rx_status = 0; | |
921 | rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); | |
922 | next += sizeof(struct rx_desc); | |
923 | rp->rx_ring[i].next_desc = cpu_to_le32(next); | |
924 | rp->rx_skbuff[i] = NULL; | |
925 | } | |
926 | /* Mark the last entry as wrapping the ring. */ | |
927 | rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); | |
928 | ||
929 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | |
930 | for (i = 0; i < RX_RING_SIZE; i++) { | |
b26b555a | 931 | struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); |
1da177e4 LT |
932 | rp->rx_skbuff[i] = skb; |
933 | if (skb == NULL) | |
934 | break; | |
935 | skb->dev = dev; /* Mark as being used by this device. */ | |
936 | ||
937 | rp->rx_skbuff_dma[i] = | |
689be439 | 938 | pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, |
1da177e4 LT |
939 | PCI_DMA_FROMDEVICE); |
940 | ||
941 | rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); | |
942 | rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); | |
943 | } | |
944 | rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | |
945 | } | |
946 | ||
947 | static void free_rbufs(struct net_device* dev) | |
948 | { | |
949 | struct rhine_private *rp = netdev_priv(dev); | |
950 | int i; | |
951 | ||
952 | /* Free all the skbuffs in the Rx queue. */ | |
953 | for (i = 0; i < RX_RING_SIZE; i++) { | |
954 | rp->rx_ring[i].rx_status = 0; | |
955 | rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ | |
956 | if (rp->rx_skbuff[i]) { | |
957 | pci_unmap_single(rp->pdev, | |
958 | rp->rx_skbuff_dma[i], | |
959 | rp->rx_buf_sz, PCI_DMA_FROMDEVICE); | |
960 | dev_kfree_skb(rp->rx_skbuff[i]); | |
961 | } | |
962 | rp->rx_skbuff[i] = NULL; | |
963 | } | |
964 | } | |
965 | ||
966 | static void alloc_tbufs(struct net_device* dev) | |
967 | { | |
968 | struct rhine_private *rp = netdev_priv(dev); | |
969 | dma_addr_t next; | |
970 | int i; | |
971 | ||
972 | rp->dirty_tx = rp->cur_tx = 0; | |
973 | next = rp->tx_ring_dma; | |
974 | for (i = 0; i < TX_RING_SIZE; i++) { | |
975 | rp->tx_skbuff[i] = NULL; | |
976 | rp->tx_ring[i].tx_status = 0; | |
977 | rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); | |
978 | next += sizeof(struct tx_desc); | |
979 | rp->tx_ring[i].next_desc = cpu_to_le32(next); | |
4be5de25 RL |
980 | if (rp->quirks & rqRhineI) |
981 | rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; | |
1da177e4 LT |
982 | } |
983 | rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); | |
984 | ||
985 | } | |
986 | ||
987 | static void free_tbufs(struct net_device* dev) | |
988 | { | |
989 | struct rhine_private *rp = netdev_priv(dev); | |
990 | int i; | |
991 | ||
992 | for (i = 0; i < TX_RING_SIZE; i++) { | |
993 | rp->tx_ring[i].tx_status = 0; | |
994 | rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); | |
995 | rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ | |
996 | if (rp->tx_skbuff[i]) { | |
997 | if (rp->tx_skbuff_dma[i]) { | |
998 | pci_unmap_single(rp->pdev, | |
999 | rp->tx_skbuff_dma[i], | |
1000 | rp->tx_skbuff[i]->len, | |
1001 | PCI_DMA_TODEVICE); | |
1002 | } | |
1003 | dev_kfree_skb(rp->tx_skbuff[i]); | |
1004 | } | |
1005 | rp->tx_skbuff[i] = NULL; | |
1006 | rp->tx_buf[i] = NULL; | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | static void rhine_check_media(struct net_device *dev, unsigned int init_media) | |
1011 | { | |
1012 | struct rhine_private *rp = netdev_priv(dev); | |
1013 | void __iomem *ioaddr = rp->base; | |
1014 | ||
1015 | mii_check_media(&rp->mii_if, debug, init_media); | |
1016 | ||
1017 | if (rp->mii_if.full_duplex) | |
1018 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, | |
1019 | ioaddr + ChipCmd1); | |
1020 | else | |
1021 | iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, | |
1022 | ioaddr + ChipCmd1); | |
00b428c2 RL |
1023 | if (debug > 1) |
1024 | printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name, | |
1025 | rp->mii_if.force_media, netif_carrier_ok(dev)); | |
1026 | } | |
1027 | ||
1028 | /* Called after status of force_media possibly changed */ | |
0761be4f | 1029 | static void rhine_set_carrier(struct mii_if_info *mii) |
00b428c2 RL |
1030 | { |
1031 | if (mii->force_media) { | |
1032 | /* autoneg is off: Link is always assumed to be up */ | |
1033 | if (!netif_carrier_ok(mii->dev)) | |
1034 | netif_carrier_on(mii->dev); | |
1035 | } | |
1036 | else /* Let MMI library update carrier status */ | |
1037 | rhine_check_media(mii->dev, 0); | |
1038 | if (debug > 1) | |
1039 | printk(KERN_INFO "%s: force_media %d, carrier %d\n", | |
1040 | mii->dev->name, mii->force_media, | |
1041 | netif_carrier_ok(mii->dev)); | |
1da177e4 LT |
1042 | } |
1043 | ||
1044 | static void init_registers(struct net_device *dev) | |
1045 | { | |
1046 | struct rhine_private *rp = netdev_priv(dev); | |
1047 | void __iomem *ioaddr = rp->base; | |
1048 | int i; | |
1049 | ||
1050 | for (i = 0; i < 6; i++) | |
1051 | iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); | |
1052 | ||
1053 | /* Initialize other registers. */ | |
1054 | iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ | |
1055 | /* Configure initial FIFO thresholds. */ | |
1056 | iowrite8(0x20, ioaddr + TxConfig); | |
1057 | rp->tx_thresh = 0x20; | |
1058 | rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ | |
1059 | ||
1060 | iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); | |
1061 | iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); | |
1062 | ||
1063 | rhine_set_rx_mode(dev); | |
1064 | ||
bea3348e | 1065 | napi_enable(&rp->napi); |
ab197668 | 1066 | |
1da177e4 LT |
1067 | /* Enable interrupts by setting the interrupt mask. */ |
1068 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | |
1069 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | |
1070 | IntrTxDone | IntrTxError | IntrTxUnderrun | | |
1071 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | |
1072 | ioaddr + IntrEnable); | |
1073 | ||
1074 | iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), | |
1075 | ioaddr + ChipCmd); | |
1076 | rhine_check_media(dev, 1); | |
1077 | } | |
1078 | ||
1079 | /* Enable MII link status auto-polling (required for IntrLinkChange) */ | |
1080 | static void rhine_enable_linkmon(void __iomem *ioaddr) | |
1081 | { | |
1082 | iowrite8(0, ioaddr + MIICmd); | |
1083 | iowrite8(MII_BMSR, ioaddr + MIIRegAddr); | |
1084 | iowrite8(0x80, ioaddr + MIICmd); | |
1085 | ||
1086 | RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20)); | |
1087 | ||
1088 | iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); | |
1089 | } | |
1090 | ||
1091 | /* Disable MII link status auto-polling (required for MDIO access) */ | |
1092 | static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) | |
1093 | { | |
1094 | iowrite8(0, ioaddr + MIICmd); | |
1095 | ||
1096 | if (quirks & rqRhineI) { | |
1097 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR | |
1098 | ||
38bb6b28 JL |
1099 | /* Can be called from ISR. Evil. */ |
1100 | mdelay(1); | |
1da177e4 LT |
1101 | |
1102 | /* 0x80 must be set immediately before turning it off */ | |
1103 | iowrite8(0x80, ioaddr + MIICmd); | |
1104 | ||
1105 | RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20); | |
1106 | ||
1107 | /* Heh. Now clear 0x80 again. */ | |
1108 | iowrite8(0, ioaddr + MIICmd); | |
1109 | } | |
1110 | else | |
1111 | RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80); | |
1112 | } | |
1113 | ||
1114 | /* Read and write over the MII Management Data I/O (MDIO) interface. */ | |
1115 | ||
1116 | static int mdio_read(struct net_device *dev, int phy_id, int regnum) | |
1117 | { | |
1118 | struct rhine_private *rp = netdev_priv(dev); | |
1119 | void __iomem *ioaddr = rp->base; | |
1120 | int result; | |
1121 | ||
1122 | rhine_disable_linkmon(ioaddr, rp->quirks); | |
1123 | ||
1124 | /* rhine_disable_linkmon already cleared MIICmd */ | |
1125 | iowrite8(phy_id, ioaddr + MIIPhyAddr); | |
1126 | iowrite8(regnum, ioaddr + MIIRegAddr); | |
1127 | iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ | |
1128 | RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40)); | |
1129 | result = ioread16(ioaddr + MIIData); | |
1130 | ||
1131 | rhine_enable_linkmon(ioaddr); | |
1132 | return result; | |
1133 | } | |
1134 | ||
1135 | static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) | |
1136 | { | |
1137 | struct rhine_private *rp = netdev_priv(dev); | |
1138 | void __iomem *ioaddr = rp->base; | |
1139 | ||
1140 | rhine_disable_linkmon(ioaddr, rp->quirks); | |
1141 | ||
1142 | /* rhine_disable_linkmon already cleared MIICmd */ | |
1143 | iowrite8(phy_id, ioaddr + MIIPhyAddr); | |
1144 | iowrite8(regnum, ioaddr + MIIRegAddr); | |
1145 | iowrite16(value, ioaddr + MIIData); | |
1146 | iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ | |
1147 | RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20)); | |
1148 | ||
1149 | rhine_enable_linkmon(ioaddr); | |
1150 | } | |
1151 | ||
1152 | static int rhine_open(struct net_device *dev) | |
1153 | { | |
1154 | struct rhine_private *rp = netdev_priv(dev); | |
1155 | void __iomem *ioaddr = rp->base; | |
1156 | int rc; | |
1157 | ||
76781382 | 1158 | rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name, |
1da177e4 LT |
1159 | dev); |
1160 | if (rc) | |
1161 | return rc; | |
1162 | ||
1163 | if (debug > 1) | |
1164 | printk(KERN_DEBUG "%s: rhine_open() irq %d.\n", | |
1165 | dev->name, rp->pdev->irq); | |
1166 | ||
1167 | rc = alloc_ring(dev); | |
1168 | if (rc) { | |
1169 | free_irq(rp->pdev->irq, dev); | |
1170 | return rc; | |
1171 | } | |
1172 | alloc_rbufs(dev); | |
1173 | alloc_tbufs(dev); | |
1174 | rhine_chip_reset(dev); | |
1175 | init_registers(dev); | |
1176 | if (debug > 2) | |
1177 | printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x " | |
1178 | "MII status: %4.4x.\n", | |
1179 | dev->name, ioread16(ioaddr + ChipCmd), | |
1180 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); | |
1181 | ||
1182 | netif_start_queue(dev); | |
1183 | ||
1184 | return 0; | |
1185 | } | |
1186 | ||
c0d7a021 | 1187 | static void rhine_reset_task(struct work_struct *work) |
1da177e4 | 1188 | { |
c0d7a021 JP |
1189 | struct rhine_private *rp = container_of(work, struct rhine_private, |
1190 | reset_task); | |
1191 | struct net_device *dev = rp->dev; | |
1da177e4 LT |
1192 | |
1193 | /* protect against concurrent rx interrupts */ | |
1194 | disable_irq(rp->pdev->irq); | |
1195 | ||
bea3348e | 1196 | napi_disable(&rp->napi); |
bea3348e | 1197 | |
c0d7a021 | 1198 | spin_lock_bh(&rp->lock); |
1da177e4 LT |
1199 | |
1200 | /* clear all descriptors */ | |
1201 | free_tbufs(dev); | |
1202 | free_rbufs(dev); | |
1203 | alloc_tbufs(dev); | |
1204 | alloc_rbufs(dev); | |
1205 | ||
1206 | /* Reinitialize the hardware. */ | |
1207 | rhine_chip_reset(dev); | |
1208 | init_registers(dev); | |
1209 | ||
c0d7a021 | 1210 | spin_unlock_bh(&rp->lock); |
1da177e4 LT |
1211 | enable_irq(rp->pdev->irq); |
1212 | ||
1213 | dev->trans_start = jiffies; | |
553e2335 | 1214 | dev->stats.tx_errors++; |
1da177e4 LT |
1215 | netif_wake_queue(dev); |
1216 | } | |
1217 | ||
c0d7a021 JP |
1218 | static void rhine_tx_timeout(struct net_device *dev) |
1219 | { | |
1220 | struct rhine_private *rp = netdev_priv(dev); | |
1221 | void __iomem *ioaddr = rp->base; | |
1222 | ||
1223 | printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " | |
1224 | "%4.4x, resetting...\n", | |
1225 | dev->name, ioread16(ioaddr + IntrStatus), | |
1226 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); | |
1227 | ||
1228 | schedule_work(&rp->reset_task); | |
1229 | } | |
1230 | ||
61357325 SH |
1231 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
1232 | struct net_device *dev) | |
1da177e4 LT |
1233 | { |
1234 | struct rhine_private *rp = netdev_priv(dev); | |
1235 | void __iomem *ioaddr = rp->base; | |
1236 | unsigned entry; | |
22580f89 | 1237 | unsigned long flags; |
1da177e4 LT |
1238 | |
1239 | /* Caution: the write order is important here, set the field | |
1240 | with the "ownership" bits last. */ | |
1241 | ||
1242 | /* Calculate the next Tx descriptor entry. */ | |
1243 | entry = rp->cur_tx % TX_RING_SIZE; | |
1244 | ||
5b057c6b | 1245 | if (skb_padto(skb, ETH_ZLEN)) |
6ed10654 | 1246 | return NETDEV_TX_OK; |
1da177e4 LT |
1247 | |
1248 | rp->tx_skbuff[entry] = skb; | |
1249 | ||
1250 | if ((rp->quirks & rqRhineI) && | |
84fa7933 | 1251 | (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { |
1da177e4 LT |
1252 | /* Must use alignment buffer. */ |
1253 | if (skb->len > PKT_BUF_SZ) { | |
1254 | /* packet too long, drop it */ | |
1255 | dev_kfree_skb(skb); | |
1256 | rp->tx_skbuff[entry] = NULL; | |
553e2335 | 1257 | dev->stats.tx_dropped++; |
6ed10654 | 1258 | return NETDEV_TX_OK; |
1da177e4 | 1259 | } |
3e0d167a CB |
1260 | |
1261 | /* Padding is not copied and so must be redone. */ | |
1da177e4 | 1262 | skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); |
3e0d167a CB |
1263 | if (skb->len < ETH_ZLEN) |
1264 | memset(rp->tx_buf[entry] + skb->len, 0, | |
1265 | ETH_ZLEN - skb->len); | |
1da177e4 LT |
1266 | rp->tx_skbuff_dma[entry] = 0; |
1267 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + | |
1268 | (rp->tx_buf[entry] - | |
1269 | rp->tx_bufs)); | |
1270 | } else { | |
1271 | rp->tx_skbuff_dma[entry] = | |
1272 | pci_map_single(rp->pdev, skb->data, skb->len, | |
1273 | PCI_DMA_TODEVICE); | |
1274 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); | |
1275 | } | |
1276 | ||
1277 | rp->tx_ring[entry].desc_length = | |
1278 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); | |
1279 | ||
1280 | /* lock eth irq */ | |
22580f89 | 1281 | spin_lock_irqsave(&rp->lock, flags); |
1da177e4 LT |
1282 | wmb(); |
1283 | rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); | |
1284 | wmb(); | |
1285 | ||
1286 | rp->cur_tx++; | |
1287 | ||
1288 | /* Non-x86 Todo: explicitly flush cache lines here. */ | |
1289 | ||
1290 | /* Wake the potentially-idle transmit channel */ | |
1291 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, | |
1292 | ioaddr + ChipCmd1); | |
1293 | IOSYNC; | |
1294 | ||
1295 | if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) | |
1296 | netif_stop_queue(dev); | |
1297 | ||
1298 | dev->trans_start = jiffies; | |
1299 | ||
22580f89 | 1300 | spin_unlock_irqrestore(&rp->lock, flags); |
1da177e4 LT |
1301 | |
1302 | if (debug > 4) { | |
1303 | printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", | |
1304 | dev->name, rp->cur_tx-1, entry); | |
1305 | } | |
6ed10654 | 1306 | return NETDEV_TX_OK; |
1da177e4 LT |
1307 | } |
1308 | ||
1309 | /* The interrupt handler does all of the Rx thread work and cleans up | |
1310 | after the Tx thread. */ | |
7d12e780 | 1311 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance) |
1da177e4 LT |
1312 | { |
1313 | struct net_device *dev = dev_instance; | |
1314 | struct rhine_private *rp = netdev_priv(dev); | |
1315 | void __iomem *ioaddr = rp->base; | |
1316 | u32 intr_status; | |
1317 | int boguscnt = max_interrupt_work; | |
1318 | int handled = 0; | |
1319 | ||
1320 | while ((intr_status = get_intr_status(dev))) { | |
1321 | handled = 1; | |
1322 | ||
1323 | /* Acknowledge all of the current interrupt sources ASAP. */ | |
1324 | if (intr_status & IntrTxDescRace) | |
1325 | iowrite8(0x08, ioaddr + IntrStatus2); | |
1326 | iowrite16(intr_status & 0xffff, ioaddr + IntrStatus); | |
1327 | IOSYNC; | |
1328 | ||
1329 | if (debug > 4) | |
1330 | printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n", | |
1331 | dev->name, intr_status); | |
1332 | ||
1333 | if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | | |
633949a1 | 1334 | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { |
633949a1 RL |
1335 | iowrite16(IntrTxAborted | |
1336 | IntrTxDone | IntrTxError | IntrTxUnderrun | | |
1337 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | |
1338 | ioaddr + IntrEnable); | |
1339 | ||
288379f0 | 1340 | napi_schedule(&rp->napi); |
633949a1 | 1341 | } |
1da177e4 LT |
1342 | |
1343 | if (intr_status & (IntrTxErrSummary | IntrTxDone)) { | |
1344 | if (intr_status & IntrTxErrSummary) { | |
1345 | /* Avoid scavenging before Tx engine turned off */ | |
1346 | RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn)); | |
1347 | if (debug > 2 && | |
1348 | ioread8(ioaddr+ChipCmd) & CmdTxOn) | |
1349 | printk(KERN_WARNING "%s: " | |
2450022a | 1350 | "rhine_interrupt() Tx engine " |
1da177e4 LT |
1351 | "still on.\n", dev->name); |
1352 | } | |
1353 | rhine_tx(dev); | |
1354 | } | |
1355 | ||
1356 | /* Abnormal error summary/uncommon events handlers. */ | |
1357 | if (intr_status & (IntrPCIErr | IntrLinkChange | | |
1358 | IntrStatsMax | IntrTxError | IntrTxAborted | | |
1359 | IntrTxUnderrun | IntrTxDescRace)) | |
1360 | rhine_error(dev, intr_status); | |
1361 | ||
1362 | if (--boguscnt < 0) { | |
1363 | printk(KERN_WARNING "%s: Too much work at interrupt, " | |
1364 | "status=%#8.8x.\n", | |
1365 | dev->name, intr_status); | |
1366 | break; | |
1367 | } | |
1368 | } | |
1369 | ||
1370 | if (debug > 3) | |
1371 | printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n", | |
1372 | dev->name, ioread16(ioaddr + IntrStatus)); | |
1373 | return IRQ_RETVAL(handled); | |
1374 | } | |
1375 | ||
1376 | /* This routine is logically part of the interrupt handler, but isolated | |
1377 | for clarity. */ | |
1378 | static void rhine_tx(struct net_device *dev) | |
1379 | { | |
1380 | struct rhine_private *rp = netdev_priv(dev); | |
1381 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; | |
1382 | ||
1383 | spin_lock(&rp->lock); | |
1384 | ||
1385 | /* find and cleanup dirty tx descriptors */ | |
1386 | while (rp->dirty_tx != rp->cur_tx) { | |
1387 | txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); | |
1388 | if (debug > 6) | |
ed4030d1 | 1389 | printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n", |
1da177e4 LT |
1390 | entry, txstatus); |
1391 | if (txstatus & DescOwn) | |
1392 | break; | |
1393 | if (txstatus & 0x8000) { | |
1394 | if (debug > 1) | |
1395 | printk(KERN_DEBUG "%s: Transmit error, " | |
1396 | "Tx status %8.8x.\n", | |
1397 | dev->name, txstatus); | |
553e2335 ED |
1398 | dev->stats.tx_errors++; |
1399 | if (txstatus & 0x0400) | |
1400 | dev->stats.tx_carrier_errors++; | |
1401 | if (txstatus & 0x0200) | |
1402 | dev->stats.tx_window_errors++; | |
1403 | if (txstatus & 0x0100) | |
1404 | dev->stats.tx_aborted_errors++; | |
1405 | if (txstatus & 0x0080) | |
1406 | dev->stats.tx_heartbeat_errors++; | |
1da177e4 LT |
1407 | if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || |
1408 | (txstatus & 0x0800) || (txstatus & 0x1000)) { | |
553e2335 | 1409 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
1410 | rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); |
1411 | break; /* Keep the skb - we try again */ | |
1412 | } | |
1413 | /* Transmitter restarted in 'abnormal' handler. */ | |
1414 | } else { | |
1415 | if (rp->quirks & rqRhineI) | |
553e2335 | 1416 | dev->stats.collisions += (txstatus >> 3) & 0x0F; |
1da177e4 | 1417 | else |
553e2335 | 1418 | dev->stats.collisions += txstatus & 0x0F; |
1da177e4 LT |
1419 | if (debug > 6) |
1420 | printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n", | |
1421 | (txstatus >> 3) & 0xF, | |
1422 | txstatus & 0xF); | |
553e2335 ED |
1423 | dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; |
1424 | dev->stats.tx_packets++; | |
1da177e4 LT |
1425 | } |
1426 | /* Free the original skb. */ | |
1427 | if (rp->tx_skbuff_dma[entry]) { | |
1428 | pci_unmap_single(rp->pdev, | |
1429 | rp->tx_skbuff_dma[entry], | |
1430 | rp->tx_skbuff[entry]->len, | |
1431 | PCI_DMA_TODEVICE); | |
1432 | } | |
1433 | dev_kfree_skb_irq(rp->tx_skbuff[entry]); | |
1434 | rp->tx_skbuff[entry] = NULL; | |
1435 | entry = (++rp->dirty_tx) % TX_RING_SIZE; | |
1436 | } | |
1437 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) | |
1438 | netif_wake_queue(dev); | |
1439 | ||
1440 | spin_unlock(&rp->lock); | |
1441 | } | |
1442 | ||
633949a1 RL |
1443 | /* Process up to limit frames from receive ring */ |
1444 | static int rhine_rx(struct net_device *dev, int limit) | |
1da177e4 LT |
1445 | { |
1446 | struct rhine_private *rp = netdev_priv(dev); | |
633949a1 | 1447 | int count; |
1da177e4 | 1448 | int entry = rp->cur_rx % RX_RING_SIZE; |
1da177e4 LT |
1449 | |
1450 | if (debug > 4) { | |
1451 | printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", | |
1452 | dev->name, entry, | |
1453 | le32_to_cpu(rp->rx_head_desc->rx_status)); | |
1454 | } | |
1455 | ||
1456 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | |
633949a1 | 1457 | for (count = 0; count < limit; ++count) { |
1da177e4 LT |
1458 | struct rx_desc *desc = rp->rx_head_desc; |
1459 | u32 desc_status = le32_to_cpu(desc->rx_status); | |
1460 | int data_size = desc_status >> 16; | |
1461 | ||
633949a1 RL |
1462 | if (desc_status & DescOwn) |
1463 | break; | |
1464 | ||
1da177e4 | 1465 | if (debug > 4) |
ed4030d1 | 1466 | printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", |
1da177e4 | 1467 | desc_status); |
633949a1 | 1468 | |
1da177e4 LT |
1469 | if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { |
1470 | if ((desc_status & RxWholePkt) != RxWholePkt) { | |
1471 | printk(KERN_WARNING "%s: Oversized Ethernet " | |
1472 | "frame spanned multiple buffers, entry " | |
1473 | "%#x length %d status %8.8x!\n", | |
1474 | dev->name, entry, data_size, | |
1475 | desc_status); | |
1476 | printk(KERN_WARNING "%s: Oversized Ethernet " | |
1477 | "frame %p vs %p.\n", dev->name, | |
1478 | rp->rx_head_desc, &rp->rx_ring[entry]); | |
553e2335 | 1479 | dev->stats.rx_length_errors++; |
1da177e4 LT |
1480 | } else if (desc_status & RxErr) { |
1481 | /* There was a error. */ | |
1482 | if (debug > 2) | |
ed4030d1 | 1483 | printk(KERN_DEBUG "rhine_rx() Rx " |
1da177e4 LT |
1484 | "error was %8.8x.\n", |
1485 | desc_status); | |
553e2335 ED |
1486 | dev->stats.rx_errors++; |
1487 | if (desc_status & 0x0030) | |
1488 | dev->stats.rx_length_errors++; | |
1489 | if (desc_status & 0x0048) | |
1490 | dev->stats.rx_fifo_errors++; | |
1491 | if (desc_status & 0x0004) | |
1492 | dev->stats.rx_frame_errors++; | |
1da177e4 LT |
1493 | if (desc_status & 0x0002) { |
1494 | /* this can also be updated outside the interrupt handler */ | |
1495 | spin_lock(&rp->lock); | |
553e2335 | 1496 | dev->stats.rx_crc_errors++; |
1da177e4 LT |
1497 | spin_unlock(&rp->lock); |
1498 | } | |
1499 | } | |
1500 | } else { | |
89d71a66 | 1501 | struct sk_buff *skb = NULL; |
1da177e4 LT |
1502 | /* Length should omit the CRC */ |
1503 | int pkt_len = data_size - 4; | |
1504 | ||
1505 | /* Check if the packet is long enough to accept without | |
1506 | copying to a minimally-sized skbuff. */ | |
89d71a66 ED |
1507 | if (pkt_len < rx_copybreak) |
1508 | skb = netdev_alloc_skb_ip_align(dev, pkt_len); | |
1509 | if (skb) { | |
1da177e4 LT |
1510 | pci_dma_sync_single_for_cpu(rp->pdev, |
1511 | rp->rx_skbuff_dma[entry], | |
1512 | rp->rx_buf_sz, | |
1513 | PCI_DMA_FROMDEVICE); | |
1514 | ||
8c7b7faa | 1515 | skb_copy_to_linear_data(skb, |
689be439 | 1516 | rp->rx_skbuff[entry]->data, |
8c7b7faa | 1517 | pkt_len); |
1da177e4 LT |
1518 | skb_put(skb, pkt_len); |
1519 | pci_dma_sync_single_for_device(rp->pdev, | |
1520 | rp->rx_skbuff_dma[entry], | |
1521 | rp->rx_buf_sz, | |
1522 | PCI_DMA_FROMDEVICE); | |
1523 | } else { | |
1524 | skb = rp->rx_skbuff[entry]; | |
1525 | if (skb == NULL) { | |
1526 | printk(KERN_ERR "%s: Inconsistent Rx " | |
1527 | "descriptor chain.\n", | |
1528 | dev->name); | |
1529 | break; | |
1530 | } | |
1531 | rp->rx_skbuff[entry] = NULL; | |
1532 | skb_put(skb, pkt_len); | |
1533 | pci_unmap_single(rp->pdev, | |
1534 | rp->rx_skbuff_dma[entry], | |
1535 | rp->rx_buf_sz, | |
1536 | PCI_DMA_FROMDEVICE); | |
1537 | } | |
1538 | skb->protocol = eth_type_trans(skb, dev); | |
633949a1 | 1539 | netif_receive_skb(skb); |
553e2335 ED |
1540 | dev->stats.rx_bytes += pkt_len; |
1541 | dev->stats.rx_packets++; | |
1da177e4 LT |
1542 | } |
1543 | entry = (++rp->cur_rx) % RX_RING_SIZE; | |
1544 | rp->rx_head_desc = &rp->rx_ring[entry]; | |
1545 | } | |
1546 | ||
1547 | /* Refill the Rx ring buffers. */ | |
1548 | for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) { | |
1549 | struct sk_buff *skb; | |
1550 | entry = rp->dirty_rx % RX_RING_SIZE; | |
1551 | if (rp->rx_skbuff[entry] == NULL) { | |
b26b555a | 1552 | skb = netdev_alloc_skb(dev, rp->rx_buf_sz); |
1da177e4 LT |
1553 | rp->rx_skbuff[entry] = skb; |
1554 | if (skb == NULL) | |
1555 | break; /* Better luck next round. */ | |
1556 | skb->dev = dev; /* Mark as being used by this device. */ | |
1557 | rp->rx_skbuff_dma[entry] = | |
689be439 | 1558 | pci_map_single(rp->pdev, skb->data, |
1da177e4 LT |
1559 | rp->rx_buf_sz, |
1560 | PCI_DMA_FROMDEVICE); | |
1561 | rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); | |
1562 | } | |
1563 | rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); | |
1564 | } | |
633949a1 RL |
1565 | |
1566 | return count; | |
1da177e4 LT |
1567 | } |
1568 | ||
1569 | /* | |
1570 | * Clears the "tally counters" for CRC errors and missed frames(?). | |
1571 | * It has been reported that some chips need a write of 0 to clear | |
1572 | * these, for others the counters are set to 1 when written to and | |
1573 | * instead cleared when read. So we clear them both ways ... | |
1574 | */ | |
1575 | static inline void clear_tally_counters(void __iomem *ioaddr) | |
1576 | { | |
1577 | iowrite32(0, ioaddr + RxMissed); | |
1578 | ioread16(ioaddr + RxCRCErrs); | |
1579 | ioread16(ioaddr + RxMissed); | |
1580 | } | |
1581 | ||
1582 | static void rhine_restart_tx(struct net_device *dev) { | |
1583 | struct rhine_private *rp = netdev_priv(dev); | |
1584 | void __iomem *ioaddr = rp->base; | |
1585 | int entry = rp->dirty_tx % TX_RING_SIZE; | |
1586 | u32 intr_status; | |
1587 | ||
1588 | /* | |
1589 | * If new errors occured, we need to sort them out before doing Tx. | |
1590 | * In that case the ISR will be back here RSN anyway. | |
1591 | */ | |
1592 | intr_status = get_intr_status(dev); | |
1593 | ||
1594 | if ((intr_status & IntrTxErrSummary) == 0) { | |
1595 | ||
1596 | /* We know better than the chip where it should continue. */ | |
1597 | iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), | |
1598 | ioaddr + TxRingPtr); | |
1599 | ||
1600 | iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, | |
1601 | ioaddr + ChipCmd); | |
1602 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, | |
1603 | ioaddr + ChipCmd1); | |
1604 | IOSYNC; | |
1605 | } | |
1606 | else { | |
1607 | /* This should never happen */ | |
1608 | if (debug > 1) | |
1609 | printk(KERN_WARNING "%s: rhine_restart_tx() " | |
1610 | "Another error occured %8.8x.\n", | |
1611 | dev->name, intr_status); | |
1612 | } | |
1613 | ||
1614 | } | |
1615 | ||
1616 | static void rhine_error(struct net_device *dev, int intr_status) | |
1617 | { | |
1618 | struct rhine_private *rp = netdev_priv(dev); | |
1619 | void __iomem *ioaddr = rp->base; | |
1620 | ||
1621 | spin_lock(&rp->lock); | |
1622 | ||
1623 | if (intr_status & IntrLinkChange) | |
38bb6b28 | 1624 | rhine_check_media(dev, 0); |
1da177e4 | 1625 | if (intr_status & IntrStatsMax) { |
553e2335 ED |
1626 | dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); |
1627 | dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); | |
1da177e4 LT |
1628 | clear_tally_counters(ioaddr); |
1629 | } | |
1630 | if (intr_status & IntrTxAborted) { | |
1631 | if (debug > 1) | |
1632 | printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n", | |
1633 | dev->name, intr_status); | |
1634 | } | |
1635 | if (intr_status & IntrTxUnderrun) { | |
1636 | if (rp->tx_thresh < 0xE0) | |
1637 | iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); | |
1638 | if (debug > 1) | |
1639 | printk(KERN_INFO "%s: Transmitter underrun, Tx " | |
1640 | "threshold now %2.2x.\n", | |
1641 | dev->name, rp->tx_thresh); | |
1642 | } | |
1643 | if (intr_status & IntrTxDescRace) { | |
1644 | if (debug > 2) | |
1645 | printk(KERN_INFO "%s: Tx descriptor write-back race.\n", | |
1646 | dev->name); | |
1647 | } | |
1648 | if ((intr_status & IntrTxError) && | |
1649 | (intr_status & (IntrTxAborted | | |
1650 | IntrTxUnderrun | IntrTxDescRace)) == 0) { | |
1651 | if (rp->tx_thresh < 0xE0) { | |
1652 | iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); | |
1653 | } | |
1654 | if (debug > 1) | |
1655 | printk(KERN_INFO "%s: Unspecified error. Tx " | |
1656 | "threshold now %2.2x.\n", | |
1657 | dev->name, rp->tx_thresh); | |
1658 | } | |
1659 | if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace | | |
1660 | IntrTxError)) | |
1661 | rhine_restart_tx(dev); | |
1662 | ||
1663 | if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun | | |
1664 | IntrTxError | IntrTxAborted | IntrNormalSummary | | |
1665 | IntrTxDescRace)) { | |
1666 | if (debug > 1) | |
1667 | printk(KERN_ERR "%s: Something Wicked happened! " | |
1668 | "%8.8x.\n", dev->name, intr_status); | |
1669 | } | |
1670 | ||
1671 | spin_unlock(&rp->lock); | |
1672 | } | |
1673 | ||
1674 | static struct net_device_stats *rhine_get_stats(struct net_device *dev) | |
1675 | { | |
1676 | struct rhine_private *rp = netdev_priv(dev); | |
1677 | void __iomem *ioaddr = rp->base; | |
1678 | unsigned long flags; | |
1679 | ||
1680 | spin_lock_irqsave(&rp->lock, flags); | |
553e2335 ED |
1681 | dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); |
1682 | dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); | |
1da177e4 LT |
1683 | clear_tally_counters(ioaddr); |
1684 | spin_unlock_irqrestore(&rp->lock, flags); | |
1685 | ||
553e2335 | 1686 | return &dev->stats; |
1da177e4 LT |
1687 | } |
1688 | ||
1689 | static void rhine_set_rx_mode(struct net_device *dev) | |
1690 | { | |
1691 | struct rhine_private *rp = netdev_priv(dev); | |
1692 | void __iomem *ioaddr = rp->base; | |
1693 | u32 mc_filter[2]; /* Multicast hash filter */ | |
1694 | u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ | |
1695 | ||
1696 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | |
1da177e4 LT |
1697 | rx_mode = 0x1C; |
1698 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); | |
1699 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); | |
8e95a202 JP |
1700 | } else if ((dev->mc_count > multicast_filter_limit) || |
1701 | (dev->flags & IFF_ALLMULTI)) { | |
1da177e4 LT |
1702 | /* Too many to match, or accept all multicasts. */ |
1703 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); | |
1704 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); | |
1705 | rx_mode = 0x0C; | |
1706 | } else { | |
1707 | struct dev_mc_list *mclist; | |
1708 | int i; | |
1709 | memset(mc_filter, 0, sizeof(mc_filter)); | |
1710 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | |
1711 | i++, mclist = mclist->next) { | |
1712 | int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; | |
1713 | ||
1714 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | |
1715 | } | |
1716 | iowrite32(mc_filter[0], ioaddr + MulticastFilter0); | |
1717 | iowrite32(mc_filter[1], ioaddr + MulticastFilter1); | |
1718 | rx_mode = 0x0C; | |
1719 | } | |
1720 | iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig); | |
1721 | } | |
1722 | ||
1723 | static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
1724 | { | |
1725 | struct rhine_private *rp = netdev_priv(dev); | |
1726 | ||
1727 | strcpy(info->driver, DRV_NAME); | |
1728 | strcpy(info->version, DRV_VERSION); | |
1729 | strcpy(info->bus_info, pci_name(rp->pdev)); | |
1730 | } | |
1731 | ||
1732 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1733 | { | |
1734 | struct rhine_private *rp = netdev_priv(dev); | |
1735 | int rc; | |
1736 | ||
1737 | spin_lock_irq(&rp->lock); | |
1738 | rc = mii_ethtool_gset(&rp->mii_if, cmd); | |
1739 | spin_unlock_irq(&rp->lock); | |
1740 | ||
1741 | return rc; | |
1742 | } | |
1743 | ||
1744 | static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1745 | { | |
1746 | struct rhine_private *rp = netdev_priv(dev); | |
1747 | int rc; | |
1748 | ||
1749 | spin_lock_irq(&rp->lock); | |
1750 | rc = mii_ethtool_sset(&rp->mii_if, cmd); | |
1751 | spin_unlock_irq(&rp->lock); | |
00b428c2 | 1752 | rhine_set_carrier(&rp->mii_if); |
1da177e4 LT |
1753 | |
1754 | return rc; | |
1755 | } | |
1756 | ||
1757 | static int netdev_nway_reset(struct net_device *dev) | |
1758 | { | |
1759 | struct rhine_private *rp = netdev_priv(dev); | |
1760 | ||
1761 | return mii_nway_restart(&rp->mii_if); | |
1762 | } | |
1763 | ||
1764 | static u32 netdev_get_link(struct net_device *dev) | |
1765 | { | |
1766 | struct rhine_private *rp = netdev_priv(dev); | |
1767 | ||
1768 | return mii_link_ok(&rp->mii_if); | |
1769 | } | |
1770 | ||
1771 | static u32 netdev_get_msglevel(struct net_device *dev) | |
1772 | { | |
1773 | return debug; | |
1774 | } | |
1775 | ||
1776 | static void netdev_set_msglevel(struct net_device *dev, u32 value) | |
1777 | { | |
1778 | debug = value; | |
1779 | } | |
1780 | ||
1781 | static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
1782 | { | |
1783 | struct rhine_private *rp = netdev_priv(dev); | |
1784 | ||
1785 | if (!(rp->quirks & rqWOL)) | |
1786 | return; | |
1787 | ||
1788 | spin_lock_irq(&rp->lock); | |
1789 | wol->supported = WAKE_PHY | WAKE_MAGIC | | |
1790 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ | |
1791 | wol->wolopts = rp->wolopts; | |
1792 | spin_unlock_irq(&rp->lock); | |
1793 | } | |
1794 | ||
1795 | static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
1796 | { | |
1797 | struct rhine_private *rp = netdev_priv(dev); | |
1798 | u32 support = WAKE_PHY | WAKE_MAGIC | | |
1799 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ | |
1800 | ||
1801 | if (!(rp->quirks & rqWOL)) | |
1802 | return -EINVAL; | |
1803 | ||
1804 | if (wol->wolopts & ~support) | |
1805 | return -EINVAL; | |
1806 | ||
1807 | spin_lock_irq(&rp->lock); | |
1808 | rp->wolopts = wol->wolopts; | |
1809 | spin_unlock_irq(&rp->lock); | |
1810 | ||
1811 | return 0; | |
1812 | } | |
1813 | ||
7282d491 | 1814 | static const struct ethtool_ops netdev_ethtool_ops = { |
1da177e4 LT |
1815 | .get_drvinfo = netdev_get_drvinfo, |
1816 | .get_settings = netdev_get_settings, | |
1817 | .set_settings = netdev_set_settings, | |
1818 | .nway_reset = netdev_nway_reset, | |
1819 | .get_link = netdev_get_link, | |
1820 | .get_msglevel = netdev_get_msglevel, | |
1821 | .set_msglevel = netdev_set_msglevel, | |
1822 | .get_wol = rhine_get_wol, | |
1823 | .set_wol = rhine_set_wol, | |
1da177e4 LT |
1824 | }; |
1825 | ||
1826 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1827 | { | |
1828 | struct rhine_private *rp = netdev_priv(dev); | |
1829 | int rc; | |
1830 | ||
1831 | if (!netif_running(dev)) | |
1832 | return -EINVAL; | |
1833 | ||
1834 | spin_lock_irq(&rp->lock); | |
1835 | rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); | |
1836 | spin_unlock_irq(&rp->lock); | |
00b428c2 | 1837 | rhine_set_carrier(&rp->mii_if); |
1da177e4 LT |
1838 | |
1839 | return rc; | |
1840 | } | |
1841 | ||
1842 | static int rhine_close(struct net_device *dev) | |
1843 | { | |
1844 | struct rhine_private *rp = netdev_priv(dev); | |
1845 | void __iomem *ioaddr = rp->base; | |
1846 | ||
bea3348e | 1847 | napi_disable(&rp->napi); |
c0d7a021 JP |
1848 | cancel_work_sync(&rp->reset_task); |
1849 | netif_stop_queue(dev); | |
1850 | ||
1851 | spin_lock_irq(&rp->lock); | |
1da177e4 LT |
1852 | |
1853 | if (debug > 1) | |
1854 | printk(KERN_DEBUG "%s: Shutting down ethercard, " | |
1855 | "status was %4.4x.\n", | |
1856 | dev->name, ioread16(ioaddr + ChipCmd)); | |
1857 | ||
1858 | /* Switch to loopback mode to avoid hardware races. */ | |
1859 | iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); | |
1860 | ||
1861 | /* Disable interrupts by clearing the interrupt mask. */ | |
1862 | iowrite16(0x0000, ioaddr + IntrEnable); | |
1863 | ||
1864 | /* Stop the chip's Tx and Rx processes. */ | |
1865 | iowrite16(CmdStop, ioaddr + ChipCmd); | |
1866 | ||
1867 | spin_unlock_irq(&rp->lock); | |
1868 | ||
1869 | free_irq(rp->pdev->irq, dev); | |
1870 | free_rbufs(dev); | |
1871 | free_tbufs(dev); | |
1872 | free_ring(dev); | |
1873 | ||
1874 | return 0; | |
1875 | } | |
1876 | ||
1877 | ||
1878 | static void __devexit rhine_remove_one(struct pci_dev *pdev) | |
1879 | { | |
1880 | struct net_device *dev = pci_get_drvdata(pdev); | |
1881 | struct rhine_private *rp = netdev_priv(dev); | |
1882 | ||
1883 | unregister_netdev(dev); | |
1884 | ||
1885 | pci_iounmap(pdev, rp->base); | |
1886 | pci_release_regions(pdev); | |
1887 | ||
1888 | free_netdev(dev); | |
1889 | pci_disable_device(pdev); | |
1890 | pci_set_drvdata(pdev, NULL); | |
1891 | } | |
1892 | ||
d18c3db5 | 1893 | static void rhine_shutdown (struct pci_dev *pdev) |
1da177e4 | 1894 | { |
1da177e4 LT |
1895 | struct net_device *dev = pci_get_drvdata(pdev); |
1896 | struct rhine_private *rp = netdev_priv(dev); | |
1897 | void __iomem *ioaddr = rp->base; | |
1898 | ||
1899 | if (!(rp->quirks & rqWOL)) | |
1900 | return; /* Nothing to do for non-WOL adapters */ | |
1901 | ||
1902 | rhine_power_init(dev); | |
1903 | ||
1904 | /* Make sure we use pattern 0, 1 and not 4, 5 */ | |
1905 | if (rp->quirks & rq6patterns) | |
f11cf25e | 1906 | iowrite8(0x04, ioaddr + WOLcgClr); |
1da177e4 LT |
1907 | |
1908 | if (rp->wolopts & WAKE_MAGIC) { | |
1909 | iowrite8(WOLmagic, ioaddr + WOLcrSet); | |
1910 | /* | |
1911 | * Turn EEPROM-controlled wake-up back on -- some hardware may | |
1912 | * not cooperate otherwise. | |
1913 | */ | |
1914 | iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); | |
1915 | } | |
1916 | ||
1917 | if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) | |
1918 | iowrite8(WOLbmcast, ioaddr + WOLcgSet); | |
1919 | ||
1920 | if (rp->wolopts & WAKE_PHY) | |
1921 | iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); | |
1922 | ||
1923 | if (rp->wolopts & WAKE_UCAST) | |
1924 | iowrite8(WOLucast, ioaddr + WOLcrSet); | |
1925 | ||
1926 | if (rp->wolopts) { | |
1927 | /* Enable legacy WOL (for old motherboards) */ | |
1928 | iowrite8(0x01, ioaddr + PwcfgSet); | |
1929 | iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); | |
1930 | } | |
1931 | ||
1932 | /* Hit power state D3 (sleep) */ | |
b933b4d9 RL |
1933 | if (!avoid_D3) |
1934 | iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); | |
1da177e4 LT |
1935 | |
1936 | /* TODO: Check use of pci_enable_wake() */ | |
1937 | ||
1938 | } | |
1939 | ||
1940 | #ifdef CONFIG_PM | |
1941 | static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) | |
1942 | { | |
1943 | struct net_device *dev = pci_get_drvdata(pdev); | |
1944 | struct rhine_private *rp = netdev_priv(dev); | |
1945 | unsigned long flags; | |
1946 | ||
1947 | if (!netif_running(dev)) | |
1948 | return 0; | |
1949 | ||
bea3348e | 1950 | napi_disable(&rp->napi); |
32b0f53e | 1951 | |
1da177e4 LT |
1952 | netif_device_detach(dev); |
1953 | pci_save_state(pdev); | |
1954 | ||
1955 | spin_lock_irqsave(&rp->lock, flags); | |
d18c3db5 | 1956 | rhine_shutdown(pdev); |
1da177e4 LT |
1957 | spin_unlock_irqrestore(&rp->lock, flags); |
1958 | ||
1959 | free_irq(dev->irq, dev); | |
1960 | return 0; | |
1961 | } | |
1962 | ||
1963 | static int rhine_resume(struct pci_dev *pdev) | |
1964 | { | |
1965 | struct net_device *dev = pci_get_drvdata(pdev); | |
1966 | struct rhine_private *rp = netdev_priv(dev); | |
1967 | unsigned long flags; | |
1968 | int ret; | |
1969 | ||
1970 | if (!netif_running(dev)) | |
1971 | return 0; | |
1972 | ||
1fb9df5d | 1973 | if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) |
1da177e4 LT |
1974 | printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); |
1975 | ||
1976 | ret = pci_set_power_state(pdev, PCI_D0); | |
1977 | if (debug > 1) | |
1978 | printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n", | |
1979 | dev->name, ret ? "failed" : "succeeded", ret); | |
1980 | ||
1981 | pci_restore_state(pdev); | |
1982 | ||
1983 | spin_lock_irqsave(&rp->lock, flags); | |
1984 | #ifdef USE_MMIO | |
1985 | enable_mmio(rp->pioaddr, rp->quirks); | |
1986 | #endif | |
1987 | rhine_power_init(dev); | |
1988 | free_tbufs(dev); | |
1989 | free_rbufs(dev); | |
1990 | alloc_tbufs(dev); | |
1991 | alloc_rbufs(dev); | |
1992 | init_registers(dev); | |
1993 | spin_unlock_irqrestore(&rp->lock, flags); | |
1994 | ||
1995 | netif_device_attach(dev); | |
1996 | ||
1997 | return 0; | |
1998 | } | |
1999 | #endif /* CONFIG_PM */ | |
2000 | ||
2001 | static struct pci_driver rhine_driver = { | |
2002 | .name = DRV_NAME, | |
2003 | .id_table = rhine_pci_tbl, | |
2004 | .probe = rhine_init_one, | |
2005 | .remove = __devexit_p(rhine_remove_one), | |
2006 | #ifdef CONFIG_PM | |
2007 | .suspend = rhine_suspend, | |
2008 | .resume = rhine_resume, | |
2009 | #endif /* CONFIG_PM */ | |
d18c3db5 | 2010 | .shutdown = rhine_shutdown, |
1da177e4 LT |
2011 | }; |
2012 | ||
e84df485 RL |
2013 | static struct dmi_system_id __initdata rhine_dmi_table[] = { |
2014 | { | |
2015 | .ident = "EPIA-M", | |
2016 | .matches = { | |
2017 | DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), | |
2018 | DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), | |
2019 | }, | |
2020 | }, | |
2021 | { | |
2022 | .ident = "KV7", | |
2023 | .matches = { | |
2024 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), | |
2025 | DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), | |
2026 | }, | |
2027 | }, | |
2028 | { NULL } | |
2029 | }; | |
1da177e4 LT |
2030 | |
2031 | static int __init rhine_init(void) | |
2032 | { | |
2033 | /* when a module, this is printed whether or not devices are found in probe */ | |
2034 | #ifdef MODULE | |
2035 | printk(version); | |
2036 | #endif | |
e84df485 RL |
2037 | if (dmi_check_system(rhine_dmi_table)) { |
2038 | /* these BIOSes fail at PXE boot if chip is in D3 */ | |
2039 | avoid_D3 = 1; | |
2040 | printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 " | |
2041 | "enabled.\n", | |
2042 | DRV_NAME); | |
2043 | } | |
2044 | else if (avoid_D3) | |
2045 | printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME); | |
2046 | ||
29917620 | 2047 | return pci_register_driver(&rhine_driver); |
1da177e4 LT |
2048 | } |
2049 | ||
2050 | ||
2051 | static void __exit rhine_cleanup(void) | |
2052 | { | |
2053 | pci_unregister_driver(&rhine_driver); | |
2054 | } | |
2055 | ||
2056 | ||
2057 | module_init(rhine_init); | |
2058 | module_exit(rhine_cleanup); |