net: via-rhine: constify of_device_id array
[deliverable/linux.git] / drivers / net / ethernet / via / via-rhine.c
CommitLineData
1da177e4
LT
1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
03a8c661 28 [link no longer provides useful info -jgarzik]
1da177e4
LT
29
30*/
31
df4511fe
JP
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
1da177e4 34#define DRV_NAME "via-rhine"
207070f5 35#define DRV_VERSION "1.5.1"
38f49e88 36#define DRV_RELDATE "2010-10-09"
1da177e4 37
eb939922 38#include <linux/types.h>
1da177e4
LT
39
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
fc3e0f8a
FR
42static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
1da177e4
LT
45
46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
8e95a202
JP
48#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
b47157f0
DM
51static int rx_copybreak = 1518;
52#else
1da177e4 53static int rx_copybreak;
b47157f0 54#endif
1da177e4 55
b933b4d9
RL
56/* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
eb939922 58static bool avoid_D3;
b933b4d9 59
1da177e4
LT
60/*
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
63 */
64
65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67static const int multicast_filter_limit = 32;
68
69
70/* Operational parameters that are set at compile time. */
71
72/* Keep the ring sizes a power of two for compile efficiency.
92bf2008
TR
73 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 * Making the Tx ring too large decreases the effectiveness of channel
75 * bonding and packet priority.
76 * With BQL support, we can increase TX ring safely.
77 * There are no ill effects from too-large receive rings.
78 */
79#define TX_RING_SIZE 64
80#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
633949a1 81#define RX_RING_SIZE 64
1da177e4
LT
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87
88#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89
90#include <linux/module.h>
91#include <linux/moduleparam.h>
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/timer.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
1da177e4
LT
97#include <linux/interrupt.h>
98#include <linux/pci.h>
2d283862
AC
99#include <linux/of_address.h>
100#include <linux/of_device.h>
101#include <linux/of_irq.h>
102#include <linux/platform_device.h>
1e7f0bd8 103#include <linux/dma-mapping.h>
1da177e4
LT
104#include <linux/netdevice.h>
105#include <linux/etherdevice.h>
106#include <linux/skbuff.h>
107#include <linux/init.h>
108#include <linux/delay.h>
109#include <linux/mii.h>
110#include <linux/ethtool.h>
111#include <linux/crc32.h>
38f49e88 112#include <linux/if_vlan.h>
1da177e4 113#include <linux/bitops.h>
c0d7a021 114#include <linux/workqueue.h>
1da177e4
LT
115#include <asm/processor.h> /* Processor type for cache alignment. */
116#include <asm/io.h>
117#include <asm/irq.h>
118#include <asm/uaccess.h>
e84df485 119#include <linux/dmi.h>
1da177e4
LT
120
121/* These identify the driver base version and may not be removed. */
76e239e1 122static const char version[] =
df4511fe 123 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
1da177e4 124
1da177e4
LT
125MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
126MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
127MODULE_LICENSE("GPL");
128
1da177e4
LT
129module_param(debug, int, 0);
130module_param(rx_copybreak, int, 0);
b933b4d9 131module_param(avoid_D3, bool, 0);
fc3e0f8a 132MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
1da177e4 133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
b933b4d9 134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
1da177e4 135
38f49e88
RL
136#define MCAM_SIZE 32
137#define VCAM_SIZE 32
138
1da177e4
LT
139/*
140 Theory of Operation
141
142I. Board Compatibility
143
144This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
145controller.
146
147II. Board-specific settings
148
149Boards with this chip are functional only in a bus-master PCI slot.
150
151Many operational settings are loaded from the EEPROM to the Config word at
152offset 0x78. For most of these settings, this driver assumes that they are
153correct.
154If this driver is compiled to use PCI memory space operations the EEPROM
155must be configured to enable memory ops.
156
157III. Driver operation
158
159IIIa. Ring buffers
160
161This driver uses two statically allocated fixed-size descriptor lists
162formed into rings by a branch from the final descriptor to the beginning of
163the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
164
165IIIb/c. Transmit/Receive Structure
166
167This driver attempts to use a zero-copy receive and transmit scheme.
168
169Alas, all data buffers are required to start on a 32 bit boundary, so
170the driver must often copy transmit packets into bounce buffers.
171
172The driver allocates full frame size skbuffs for the Rx ring buffers at
173open() time and passes the skb->data field to the chip as receive data
174buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
175a fresh skbuff is allocated and the frame is copied to the new skbuff.
176When the incoming frame is larger, the skbuff is passed directly up the
177protocol stack. Buffers consumed this way are replaced by newly allocated
178skbuffs in the last phase of rhine_rx().
179
180The RX_COPYBREAK value is chosen to trade-off the memory wasted by
181using a full-sized skbuff for small frames vs. the copying costs of larger
182frames. New boards are typically used in generously configured machines
183and the underfilled buffers have negligible impact compared to the benefit of
184a single allocation size, so the default value of zero results in never
185copying packets. When copying is done, the cost is usually mitigated by using
186a combined copy/checksum routine. Copying also preloads the cache, which is
187most useful with small frames.
188
189Since the VIA chips are only able to transfer data to buffers on 32 bit
190boundaries, the IP header at offset 14 in an ethernet frame isn't
191longword aligned for further processing. Copying these unaligned buffers
192has the beneficial effect of 16-byte aligning the IP header.
193
194IIId. Synchronization
195
196The driver runs as two independent, single-threaded flows of control. One
197is the send-packet routine, which enforces single-threaded use by the
b74ca3a8
WC
198netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
199which is single threaded by the hardware and interrupt handling software.
1da177e4
LT
200
201The send packet thread has partial control over the Tx ring. It locks the
b74ca3a8
WC
202netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
203the ring is not available it stops the transmit queue by
204calling netif_stop_queue.
1da177e4
LT
205
206The interrupt handler has exclusive control over the Rx ring and records stats
207from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208empty by incrementing the dirty_tx mark. If at least half of the entries in
209the Rx ring are available the transmit queue is woken up if it was stopped.
210
211IV. Notes
212
213IVb. References
214
215Preliminary VT86C100A manual from http://www.via.com.tw/
216http://www.scyld.com/expert/100mbps.html
217http://www.scyld.com/expert/NWay.html
218ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
220
221
222IVc. Errata
223
224The VT86C100A manual is not reliable information.
225The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226in significant performance degradation for bounce buffer copies on transmit
227and unaligned IP headers on receive.
228The chip does not pad to minimum transmit length.
229
230*/
231
232
233/* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
237*/
238
239enum rhine_revs {
240 VT86C100A = 0x00,
241 VTunknown0 = 0x20,
242 VT6102 = 0x40,
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
247 VTunknown1 = 0x7C,
248 VT6105 = 0x80,
249 VT6105_B0 = 0x83,
250 VT6105L = 0x8A,
251 VT6107 = 0x8C,
252 VTunknown2 = 0x8E,
253 VT6105M = 0x90, /* Management adapter */
254};
255
256enum rhine_quirks {
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
ca8b6e04
AC
262 rqIntPHY = 0x0200, /* Integrated PHY */
263 rqMgmt = 0x0400, /* Management adapter */
5b579e21
AC
264 rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
265 * switched from PIO mode to MMIO
266 * (only applies to PCI)
267 */
1da177e4
LT
268};
269/*
270 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
271 * MMIO as well as for the collision counter and the Tx FIFO underflow
272 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
273 */
274
275/* Beware of PCI posted writes */
276#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
277
9baa3c34 278static const struct pci_device_id rhine_pci_tbl[] = {
46009c8b
JG
279 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
280 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
281 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
282 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
1da177e4
LT
283 { } /* terminate list */
284};
285MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
286
2d283862 287/* OpenFirmware identifiers for platform-bus devices
ca8b6e04 288 * The .data field is currently only used to store quirks
2d283862 289 */
ca8b6e04 290static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
d2b75a3f 291static const struct of_device_id rhine_of_tbl[] = {
ca8b6e04 292 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
2d283862
AC
293 { } /* terminate list */
294};
295MODULE_DEVICE_TABLE(of, rhine_of_tbl);
1da177e4
LT
296
297/* Offsets to the device registers. */
298enum register_offsets {
299 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
38f49e88 300 ChipCmd1=0x09, TQWake=0x0A,
1da177e4
LT
301 IntrStatus=0x0C, IntrEnable=0x0E,
302 MulticastFilter0=0x10, MulticastFilter1=0x14,
303 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
38f49e88 304 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
1da177e4
LT
305 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
306 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
307 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
308 StickyHW=0x83, IntrStatus2=0x84,
38f49e88 309 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
1da177e4
LT
310 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
311 WOLcrClr1=0xA6, WOLcgClr=0xA7,
312 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
313};
314
315/* Bits in ConfigD */
316enum backoff_bits {
317 BackOptional=0x01, BackModify=0x02,
318 BackCaptureEffect=0x04, BackRandom=0x08
319};
320
38f49e88
RL
321/* Bits in the TxConfig (TCR) register */
322enum tcr_bits {
323 TCR_PQEN=0x01,
324 TCR_LB0=0x02, /* loopback[0] */
325 TCR_LB1=0x04, /* loopback[1] */
326 TCR_OFSET=0x08,
327 TCR_RTGOPT=0x10,
328 TCR_RTFT0=0x20,
329 TCR_RTFT1=0x40,
330 TCR_RTSF=0x80,
331};
332
333/* Bits in the CamCon (CAMC) register */
334enum camcon_bits {
335 CAMC_CAMEN=0x01,
336 CAMC_VCAMSL=0x02,
337 CAMC_CAMWR=0x04,
338 CAMC_CAMRD=0x08,
339};
340
341/* Bits in the PCIBusConfig1 (BCR1) register */
342enum bcr1_bits {
343 BCR1_POT0=0x01,
344 BCR1_POT1=0x02,
345 BCR1_POT2=0x04,
346 BCR1_CTFT0=0x08,
347 BCR1_CTFT1=0x10,
348 BCR1_CTSF=0x20,
349 BCR1_TXQNOBK=0x40, /* for VT6105 */
350 BCR1_VIDFR=0x80, /* for VT6105 */
351 BCR1_MED0=0x40, /* for VT6102 */
352 BCR1_MED1=0x80, /* for VT6102 */
353};
354
1da177e4
LT
355/* Registers we check that mmio and reg are the same. */
356static const int mmio_verify_registers[] = {
357 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
358 0
359};
1da177e4
LT
360
361/* Bits in the interrupt status/mask registers. */
362enum intr_status_bits {
7ab87ff4
FR
363 IntrRxDone = 0x0001,
364 IntrTxDone = 0x0002,
365 IntrRxErr = 0x0004,
366 IntrTxError = 0x0008,
367 IntrRxEmpty = 0x0020,
368 IntrPCIErr = 0x0040,
369 IntrStatsMax = 0x0080,
370 IntrRxEarly = 0x0100,
371 IntrTxUnderrun = 0x0210,
372 IntrRxOverflow = 0x0400,
373 IntrRxDropped = 0x0800,
374 IntrRxNoBuf = 0x1000,
375 IntrTxAborted = 0x2000,
376 IntrLinkChange = 0x4000,
377 IntrRxWakeUp = 0x8000,
378 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
379 IntrNormalSummary = IntrRxDone | IntrTxDone,
380 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
381 IntrTxUnderrun,
1da177e4
LT
382};
383
384/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
385enum wol_bits {
386 WOLucast = 0x10,
387 WOLmagic = 0x20,
388 WOLbmcast = 0x30,
389 WOLlnkon = 0x40,
390 WOLlnkoff = 0x80,
391};
392
393/* The Rx and Tx buffer descriptors. */
394struct rx_desc {
53c03f5c
AV
395 __le32 rx_status;
396 __le32 desc_length; /* Chain flag, Buffer/frame length */
397 __le32 addr;
398 __le32 next_desc;
1da177e4
LT
399};
400struct tx_desc {
53c03f5c
AV
401 __le32 tx_status;
402 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
403 __le32 addr;
404 __le32 next_desc;
1da177e4
LT
405};
406
407/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
408#define TXDESC 0x00e08000
409
410enum rx_status_bits {
411 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
412};
413
414/* Bits in *_desc.*_status */
415enum desc_status_bits {
416 DescOwn=0x80000000
417};
418
38f49e88
RL
419/* Bits in *_desc.*_length */
420enum desc_length_bits {
421 DescTag=0x00010000
422};
423
1da177e4
LT
424/* Bits in ChipCmd. */
425enum chip_cmd_bits {
426 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
427 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
428 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
429 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
430};
431
f7b5d1b9
JG
432struct rhine_stats {
433 u64 packets;
434 u64 bytes;
435 struct u64_stats_sync syncp;
436};
437
1da177e4 438struct rhine_private {
38f49e88
RL
439 /* Bit mask for configured VLAN ids */
440 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
441
1da177e4
LT
442 /* Descriptor rings */
443 struct rx_desc *rx_ring;
444 struct tx_desc *tx_ring;
445 dma_addr_t rx_ring_dma;
446 dma_addr_t tx_ring_dma;
447
448 /* The addresses of receive-in-place skbuffs. */
449 struct sk_buff *rx_skbuff[RX_RING_SIZE];
450 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
451
452 /* The saved address of a sent-in-place packet/buffer, for later free(). */
453 struct sk_buff *tx_skbuff[TX_RING_SIZE];
454 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
455
4be5de25 456 /* Tx bounce buffers (Rhine-I only) */
1da177e4
LT
457 unsigned char *tx_buf[TX_RING_SIZE];
458 unsigned char *tx_bufs;
459 dma_addr_t tx_bufs_dma;
460
f7630d18 461 int irq;
1da177e4 462 long pioaddr;
bea3348e
SH
463 struct net_device *dev;
464 struct napi_struct napi;
1da177e4 465 spinlock_t lock;
7ab87ff4
FR
466 struct mutex task_lock;
467 bool task_enable;
468 struct work_struct slow_event_task;
c0d7a021 469 struct work_struct reset_task;
1da177e4 470
fc3e0f8a
FR
471 u32 msg_enable;
472
1da177e4
LT
473 /* Frequently used values: keep some adjacent for cache effect. */
474 u32 quirks;
475 struct rx_desc *rx_head_desc;
476 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
477 unsigned int cur_tx, dirty_tx;
478 unsigned int rx_buf_sz; /* Based on MTU+slack. */
f7b5d1b9
JG
479 struct rhine_stats rx_stats;
480 struct rhine_stats tx_stats;
1da177e4
LT
481 u8 wolopts;
482
483 u8 tx_thresh, rx_thresh;
484
485 struct mii_if_info mii_if;
486 void __iomem *base;
487};
488
38f49e88
RL
489#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
490#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
491#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
492
493#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
494#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
495#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
496
497#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
498#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
499#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
500
501#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
502#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
503#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
504
505
1da177e4
LT
506static int mdio_read(struct net_device *dev, int phy_id, int location);
507static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
508static int rhine_open(struct net_device *dev);
c0d7a021 509static void rhine_reset_task(struct work_struct *work);
7ab87ff4 510static void rhine_slow_event_task(struct work_struct *work);
1da177e4 511static void rhine_tx_timeout(struct net_device *dev);
61357325
SH
512static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
513 struct net_device *dev);
7d12e780 514static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
1da177e4 515static void rhine_tx(struct net_device *dev);
633949a1 516static int rhine_rx(struct net_device *dev, int limit);
1da177e4 517static void rhine_set_rx_mode(struct net_device *dev);
f7b5d1b9
JG
518static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
519 struct rtnl_link_stats64 *stats);
1da177e4 520static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 521static const struct ethtool_ops netdev_ethtool_ops;
1da177e4 522static int rhine_close(struct net_device *dev);
80d5c368
PM
523static int rhine_vlan_rx_add_vid(struct net_device *dev,
524 __be16 proto, u16 vid);
525static int rhine_vlan_rx_kill_vid(struct net_device *dev,
526 __be16 proto, u16 vid);
7ab87ff4 527static void rhine_restart_tx(struct net_device *dev);
1da177e4 528
3f8c91a7 529static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
a384a33b
FR
530{
531 void __iomem *ioaddr = rp->base;
532 int i;
533
534 for (i = 0; i < 1024; i++) {
3f8c91a7
AM
535 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
536
537 if (low ^ has_mask_bits)
a384a33b
FR
538 break;
539 udelay(10);
540 }
541 if (i > 64) {
fc3e0f8a 542 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
3f8c91a7 543 "count: %04d\n", low ? "low" : "high", reg, mask, i);
a384a33b
FR
544 }
545}
546
547static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
548{
3f8c91a7 549 rhine_wait_bit(rp, reg, mask, false);
a384a33b
FR
550}
551
552static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
553{
3f8c91a7 554 rhine_wait_bit(rp, reg, mask, true);
a384a33b 555}
1da177e4 556
a20a28bc 557static u32 rhine_get_events(struct rhine_private *rp)
1da177e4 558{
1da177e4
LT
559 void __iomem *ioaddr = rp->base;
560 u32 intr_status;
561
562 intr_status = ioread16(ioaddr + IntrStatus);
563 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
564 if (rp->quirks & rqStatusWBRace)
565 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
566 return intr_status;
567}
568
a20a28bc
FR
569static void rhine_ack_events(struct rhine_private *rp, u32 mask)
570{
571 void __iomem *ioaddr = rp->base;
572
573 if (rp->quirks & rqStatusWBRace)
574 iowrite8(mask >> 16, ioaddr + IntrStatus2);
575 iowrite16(mask, ioaddr + IntrStatus);
7ab87ff4 576 mmiowb();
a20a28bc
FR
577}
578
1da177e4
LT
579/*
580 * Get power related registers into sane state.
581 * Notify user about past WOL event.
582 */
583static void rhine_power_init(struct net_device *dev)
584{
585 struct rhine_private *rp = netdev_priv(dev);
586 void __iomem *ioaddr = rp->base;
587 u16 wolstat;
588
589 if (rp->quirks & rqWOL) {
590 /* Make sure chip is in power state D0 */
591 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
592
593 /* Disable "force PME-enable" */
594 iowrite8(0x80, ioaddr + WOLcgClr);
595
596 /* Clear power-event config bits (WOL) */
597 iowrite8(0xFF, ioaddr + WOLcrClr);
598 /* More recent cards can manage two additional patterns */
599 if (rp->quirks & rq6patterns)
600 iowrite8(0x03, ioaddr + WOLcrClr1);
601
602 /* Save power-event status bits */
603 wolstat = ioread8(ioaddr + PwrcsrSet);
604 if (rp->quirks & rq6patterns)
605 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
606
607 /* Clear power-event status bits */
608 iowrite8(0xFF, ioaddr + PwrcsrClr);
609 if (rp->quirks & rq6patterns)
610 iowrite8(0x03, ioaddr + PwrcsrClr1);
611
612 if (wolstat) {
613 char *reason;
614 switch (wolstat) {
615 case WOLmagic:
616 reason = "Magic packet";
617 break;
618 case WOLlnkon:
619 reason = "Link went up";
620 break;
621 case WOLlnkoff:
622 reason = "Link went down";
623 break;
624 case WOLucast:
625 reason = "Unicast packet";
626 break;
627 case WOLbmcast:
628 reason = "Multicast/broadcast packet";
629 break;
630 default:
631 reason = "Unknown";
632 }
df4511fe
JP
633 netdev_info(dev, "Woke system up. Reason: %s\n",
634 reason);
1da177e4
LT
635 }
636 }
637}
638
639static void rhine_chip_reset(struct net_device *dev)
640{
641 struct rhine_private *rp = netdev_priv(dev);
642 void __iomem *ioaddr = rp->base;
fc3e0f8a 643 u8 cmd1;
1da177e4
LT
644
645 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
646 IOSYNC;
647
648 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
df4511fe 649 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
1da177e4
LT
650
651 /* Force reset */
652 if (rp->quirks & rqForceReset)
653 iowrite8(0x40, ioaddr + MiscCmd);
654
655 /* Reset can take somewhat longer (rare) */
a384a33b 656 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
1da177e4
LT
657 }
658
fc3e0f8a
FR
659 cmd1 = ioread8(ioaddr + ChipCmd1);
660 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
661 "failed" : "succeeded");
1da177e4
LT
662}
663
1da177e4
LT
664static void enable_mmio(long pioaddr, u32 quirks)
665{
666 int n;
5b579e21
AC
667
668 if (quirks & rqNeedEnMMIO) {
669 if (quirks & rqRhineI) {
670 /* More recent docs say that this bit is reserved */
671 n = inb(pioaddr + ConfigA) | 0x20;
672 outb(n, pioaddr + ConfigA);
673 } else {
674 n = inb(pioaddr + ConfigD) | 0x80;
675 outb(n, pioaddr + ConfigD);
676 }
1da177e4
LT
677 }
678}
5b579e21
AC
679
680static inline int verify_mmio(struct device *hwdev,
681 long pioaddr,
682 void __iomem *ioaddr,
683 u32 quirks)
684{
685 if (quirks & rqNeedEnMMIO) {
686 int i = 0;
687
688 /* Check that selected MMIO registers match the PIO ones */
689 while (mmio_verify_registers[i]) {
690 int reg = mmio_verify_registers[i++];
691 unsigned char a = inb(pioaddr+reg);
692 unsigned char b = readb(ioaddr+reg);
693
694 if (a != b) {
695 dev_err(hwdev,
696 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
697 reg, a, b);
698 return -EIO;
699 }
700 }
701 }
702 return 0;
703}
1da177e4
LT
704
705/*
706 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
707 * (plus 0x6C for Rhine-I/II)
708 */
76e239e1 709static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
1da177e4
LT
710{
711 struct rhine_private *rp = netdev_priv(dev);
712 void __iomem *ioaddr = rp->base;
a384a33b 713 int i;
1da177e4
LT
714
715 outb(0x20, pioaddr + MACRegEEcsr);
a384a33b
FR
716 for (i = 0; i < 1024; i++) {
717 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
718 break;
719 }
720 if (i > 512)
721 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
1da177e4 722
1da177e4
LT
723 /*
724 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
725 * MMIO. If reloading EEPROM was done first this could be avoided, but
726 * it is not known if that still works with the "win98-reboot" problem.
727 */
728 enable_mmio(pioaddr, rp->quirks);
1da177e4
LT
729
730 /* Turn off EEPROM-controlled wake-up (magic packet) */
731 if (rp->quirks & rqWOL)
732 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
733
734}
735
736#ifdef CONFIG_NET_POLL_CONTROLLER
737static void rhine_poll(struct net_device *dev)
738{
05d334ec 739 struct rhine_private *rp = netdev_priv(dev);
f7630d18 740 const int irq = rp->irq;
05d334ec
FR
741
742 disable_irq(irq);
743 rhine_interrupt(irq, dev);
744 enable_irq(irq);
1da177e4
LT
745}
746#endif
747
269f3114
FR
748static void rhine_kick_tx_threshold(struct rhine_private *rp)
749{
750 if (rp->tx_thresh < 0xe0) {
751 void __iomem *ioaddr = rp->base;
752
753 rp->tx_thresh += 0x20;
754 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
755 }
756}
757
7ab87ff4
FR
758static void rhine_tx_err(struct rhine_private *rp, u32 status)
759{
760 struct net_device *dev = rp->dev;
761
762 if (status & IntrTxAborted) {
fc3e0f8a
FR
763 netif_info(rp, tx_err, dev,
764 "Abort %08x, frame dropped\n", status);
7ab87ff4
FR
765 }
766
767 if (status & IntrTxUnderrun) {
768 rhine_kick_tx_threshold(rp);
fc3e0f8a
FR
769 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
770 "Tx threshold now %02x\n", rp->tx_thresh);
7ab87ff4
FR
771 }
772
fc3e0f8a
FR
773 if (status & IntrTxDescRace)
774 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
7ab87ff4
FR
775
776 if ((status & IntrTxError) &&
777 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
778 rhine_kick_tx_threshold(rp);
fc3e0f8a
FR
779 netif_info(rp, tx_err, dev, "Unspecified error. "
780 "Tx threshold now %02x\n", rp->tx_thresh);
7ab87ff4
FR
781 }
782
783 rhine_restart_tx(dev);
784}
785
786static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
787{
788 void __iomem *ioaddr = rp->base;
789 struct net_device_stats *stats = &rp->dev->stats;
790
791 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
792 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
793
794 /*
795 * Clears the "tally counters" for CRC errors and missed frames(?).
796 * It has been reported that some chips need a write of 0 to clear
797 * these, for others the counters are set to 1 when written to and
798 * instead cleared when read. So we clear them both ways ...
799 */
800 iowrite32(0, ioaddr + RxMissed);
801 ioread16(ioaddr + RxCRCErrs);
802 ioread16(ioaddr + RxMissed);
803}
804
805#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
806 IntrRxErr | \
807 IntrRxEmpty | \
808 IntrRxOverflow | \
809 IntrRxDropped | \
810 IntrRxNoBuf | \
811 IntrRxWakeUp)
812
813#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
814 IntrTxAborted | \
815 IntrTxUnderrun | \
816 IntrTxDescRace)
817#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
818
819#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
820 RHINE_EVENT_NAPI_TX | \
821 IntrStatsMax)
822#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
823#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
824
bea3348e 825static int rhine_napipoll(struct napi_struct *napi, int budget)
633949a1 826{
bea3348e
SH
827 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
828 struct net_device *dev = rp->dev;
633949a1 829 void __iomem *ioaddr = rp->base;
7ab87ff4
FR
830 u16 enable_mask = RHINE_EVENT & 0xffff;
831 int work_done = 0;
832 u32 status;
833
834 status = rhine_get_events(rp);
835 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
836
837 if (status & RHINE_EVENT_NAPI_RX)
838 work_done += rhine_rx(dev, budget);
839
840 if (status & RHINE_EVENT_NAPI_TX) {
841 if (status & RHINE_EVENT_NAPI_TX_ERR) {
7ab87ff4 842 /* Avoid scavenging before Tx engine turned off */
a384a33b 843 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
fc3e0f8a
FR
844 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
845 netif_warn(rp, tx_err, dev, "Tx still on\n");
7ab87ff4 846 }
fc3e0f8a 847
7ab87ff4
FR
848 rhine_tx(dev);
849
850 if (status & RHINE_EVENT_NAPI_TX_ERR)
851 rhine_tx_err(rp, status);
852 }
853
854 if (status & IntrStatsMax) {
855 spin_lock(&rp->lock);
856 rhine_update_rx_crc_and_missed_errord(rp);
857 spin_unlock(&rp->lock);
858 }
633949a1 859
7ab87ff4
FR
860 if (status & RHINE_EVENT_SLOW) {
861 enable_mask &= ~RHINE_EVENT_SLOW;
862 schedule_work(&rp->slow_event_task);
863 }
633949a1 864
bea3348e 865 if (work_done < budget) {
288379f0 866 napi_complete(napi);
7ab87ff4
FR
867 iowrite16(enable_mask, ioaddr + IntrEnable);
868 mmiowb();
633949a1 869 }
bea3348e 870 return work_done;
633949a1 871}
633949a1 872
76e239e1 873static void rhine_hw_init(struct net_device *dev, long pioaddr)
1da177e4
LT
874{
875 struct rhine_private *rp = netdev_priv(dev);
876
877 /* Reset the chip to erase previous misconfiguration. */
878 rhine_chip_reset(dev);
879
880 /* Rhine-I needs extra time to recuperate before EEPROM reload */
881 if (rp->quirks & rqRhineI)
882 msleep(5);
883
884 /* Reload EEPROM controlled bytes cleared by soft reset */
2d283862
AC
885 if (dev_is_pci(dev->dev.parent))
886 rhine_reload_eeprom(pioaddr, dev);
1da177e4
LT
887}
888
5d1d07d8
SH
889static const struct net_device_ops rhine_netdev_ops = {
890 .ndo_open = rhine_open,
891 .ndo_stop = rhine_close,
892 .ndo_start_xmit = rhine_start_tx,
f7b5d1b9 893 .ndo_get_stats64 = rhine_get_stats64,
afc4b13d 894 .ndo_set_rx_mode = rhine_set_rx_mode,
635ecaa7 895 .ndo_change_mtu = eth_change_mtu,
5d1d07d8 896 .ndo_validate_addr = eth_validate_addr,
fe96aaa1 897 .ndo_set_mac_address = eth_mac_addr,
5d1d07d8
SH
898 .ndo_do_ioctl = netdev_ioctl,
899 .ndo_tx_timeout = rhine_tx_timeout,
38f49e88
RL
900 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
901 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
5d1d07d8
SH
902#ifdef CONFIG_NET_POLL_CONTROLLER
903 .ndo_poll_controller = rhine_poll,
904#endif
905};
906
ca8b6e04 907static int rhine_init_one_common(struct device *hwdev, u32 quirks,
2d283862 908 long pioaddr, void __iomem *ioaddr, int irq)
1da177e4
LT
909{
910 struct net_device *dev;
911 struct rhine_private *rp;
2d283862 912 int i, rc, phy_id;
1da177e4 913 const char *name;
1da177e4
LT
914
915 /* this should always be supported */
f7630d18 916 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
1da177e4 917 if (rc) {
f7630d18 918 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
2d283862 919 goto err_out;
1da177e4
LT
920 }
921
1da177e4
LT
922 dev = alloc_etherdev(sizeof(struct rhine_private));
923 if (!dev) {
924 rc = -ENOMEM;
2d283862 925 goto err_out;
1da177e4 926 }
f7630d18 927 SET_NETDEV_DEV(dev, hwdev);
1da177e4
LT
928
929 rp = netdev_priv(dev);
bea3348e 930 rp->dev = dev;
ca8b6e04 931 rp->quirks = quirks;
1da177e4 932 rp->pioaddr = pioaddr;
2d283862
AC
933 rp->base = ioaddr;
934 rp->irq = irq;
fc3e0f8a 935 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
1da177e4 936
ca8b6e04 937 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
1da177e4 938
827da44c
JS
939 u64_stats_init(&rp->tx_stats.syncp);
940 u64_stats_init(&rp->rx_stats.syncp);
941
1da177e4
LT
942 /* Get chip registers into a sane state */
943 rhine_power_init(dev);
944 rhine_hw_init(dev, pioaddr);
945
946 for (i = 0; i < 6; i++)
947 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
948
482e3feb
JP
949 if (!is_valid_ether_addr(dev->dev_addr)) {
950 /* Report it and use a random ethernet address instead */
951 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
f2cedb63 952 eth_hw_addr_random(dev);
482e3feb
JP
953 netdev_info(dev, "Using random MAC address: %pM\n",
954 dev->dev_addr);
1da177e4
LT
955 }
956
957 /* For Rhine-I/II, phy_id is loaded from EEPROM */
958 if (!phy_id)
959 phy_id = ioread8(ioaddr + 0x6C);
960
1da177e4 961 spin_lock_init(&rp->lock);
7ab87ff4 962 mutex_init(&rp->task_lock);
c0d7a021 963 INIT_WORK(&rp->reset_task, rhine_reset_task);
7ab87ff4 964 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
c0d7a021 965
1da177e4
LT
966 rp->mii_if.dev = dev;
967 rp->mii_if.mdio_read = mdio_read;
968 rp->mii_if.mdio_write = mdio_write;
969 rp->mii_if.phy_id_mask = 0x1f;
970 rp->mii_if.reg_num_mask = 0x1f;
971
972 /* The chip-specific entries in the device structure. */
5d1d07d8 973 dev->netdev_ops = &rhine_netdev_ops;
e76070f2 974 dev->ethtool_ops = &netdev_ethtool_ops;
1da177e4 975 dev->watchdog_timeo = TX_TIMEOUT;
5d1d07d8 976
bea3348e 977 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
32b0f53e 978
1da177e4
LT
979 if (rp->quirks & rqRhineI)
980 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
981
ca8b6e04 982 if (rp->quirks & rqMgmt)
f646968f
PM
983 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
984 NETIF_F_HW_VLAN_CTAG_RX |
985 NETIF_F_HW_VLAN_CTAG_FILTER;
38f49e88 986
1da177e4
LT
987 /* dev->name not defined before register_netdev()! */
988 rc = register_netdev(dev);
989 if (rc)
2d283862 990 goto err_out_free_netdev;
1da177e4 991
ca8b6e04
AC
992 if (rp->quirks & rqRhineI)
993 name = "Rhine";
994 else if (rp->quirks & rqStatusWBRace)
995 name = "Rhine II";
996 else if (rp->quirks & rqMgmt)
997 name = "Rhine III (Management Adapter)";
998 else
999 name = "Rhine III";
1000
df4511fe 1001 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
2d283862 1002 name, (long)ioaddr, dev->dev_addr, rp->irq);
1da177e4 1003
f7630d18 1004 dev_set_drvdata(hwdev, dev);
1da177e4
LT
1005
1006 {
1007 u16 mii_cmd;
1008 int mii_status = mdio_read(dev, phy_id, 1);
1009 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1010 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1011 if (mii_status != 0xffff && mii_status != 0x0000) {
1012 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
df4511fe
JP
1013 netdev_info(dev,
1014 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1015 phy_id,
1016 mii_status, rp->mii_if.advertising,
1017 mdio_read(dev, phy_id, 5));
1da177e4
LT
1018
1019 /* set IFF_RUNNING */
1020 if (mii_status & BMSR_LSTATUS)
1021 netif_carrier_on(dev);
1022 else
1023 netif_carrier_off(dev);
1024
1025 }
1026 }
1027 rp->mii_if.phy_id = phy_id;
fc3e0f8a
FR
1028 if (avoid_D3)
1029 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1da177e4
LT
1030
1031 return 0;
1032
2d283862
AC
1033err_out_free_netdev:
1034 free_netdev(dev);
1035err_out:
1036 return rc;
1037}
1038
1039static int rhine_init_one_pci(struct pci_dev *pdev,
1040 const struct pci_device_id *ent)
1041{
1042 struct device *hwdev = &pdev->dev;
5b579e21 1043 int rc;
2d283862
AC
1044 long pioaddr, memaddr;
1045 void __iomem *ioaddr;
1046 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
5b579e21
AC
1047
1048/* This driver was written to use PCI memory space. Some early versions
1049 * of the Rhine may only work correctly with I/O space accesses.
1050 * TODO: determine for which revisions this is true and assign the flag
1051 * in code as opposed to this Kconfig option (???)
1052 */
1053#ifdef CONFIG_VIA_RHINE_MMIO
1054 u32 quirks = rqNeedEnMMIO;
2d283862 1055#else
5b579e21 1056 u32 quirks = 0;
2d283862
AC
1057#endif
1058
1059/* when built into the kernel, we only print version if device is found */
1060#ifndef MODULE
1061 pr_info_once("%s\n", version);
1062#endif
1063
1064 rc = pci_enable_device(pdev);
1065 if (rc)
1066 goto err_out;
1067
ca8b6e04 1068 if (pdev->revision < VTunknown0) {
5b579e21 1069 quirks |= rqRhineI;
ca8b6e04 1070 } else if (pdev->revision >= VT6102) {
5b579e21 1071 quirks |= rqWOL | rqForceReset;
ca8b6e04
AC
1072 if (pdev->revision < VT6105) {
1073 quirks |= rqStatusWBRace;
1074 } else {
1075 quirks |= rqIntPHY;
1076 if (pdev->revision >= VT6105_B0)
1077 quirks |= rq6patterns;
1078 if (pdev->revision >= VT6105M)
1079 quirks |= rqMgmt;
1080 }
1081 }
1082
2d283862
AC
1083 /* sanity check */
1084 if ((pci_resource_len(pdev, 0) < io_size) ||
1085 (pci_resource_len(pdev, 1) < io_size)) {
1086 rc = -EIO;
1087 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1088 goto err_out_pci_disable;
1089 }
1090
1091 pioaddr = pci_resource_start(pdev, 0);
1092 memaddr = pci_resource_start(pdev, 1);
1093
1094 pci_set_master(pdev);
1095
1096 rc = pci_request_regions(pdev, DRV_NAME);
1097 if (rc)
1098 goto err_out_pci_disable;
1099
5b579e21 1100 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
2d283862
AC
1101 if (!ioaddr) {
1102 rc = -EIO;
1103 dev_err(hwdev,
1104 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1105 dev_name(hwdev), io_size, memaddr);
1106 goto err_out_free_res;
1107 }
1108
2d283862
AC
1109 enable_mmio(pioaddr, quirks);
1110
5b579e21
AC
1111 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1112 if (rc)
1113 goto err_out_unmap;
2d283862 1114
ca8b6e04 1115 rc = rhine_init_one_common(&pdev->dev, quirks,
2d283862
AC
1116 pioaddr, ioaddr, pdev->irq);
1117 if (!rc)
1118 return 0;
1119
1da177e4
LT
1120err_out_unmap:
1121 pci_iounmap(pdev, ioaddr);
1122err_out_free_res:
1123 pci_release_regions(pdev);
ae996154
RL
1124err_out_pci_disable:
1125 pci_disable_device(pdev);
1da177e4
LT
1126err_out:
1127 return rc;
1128}
1129
2d283862
AC
1130static int rhine_init_one_platform(struct platform_device *pdev)
1131{
1132 const struct of_device_id *match;
ca8b6e04 1133 const u32 *quirks;
2d283862
AC
1134 int irq;
1135 struct resource *res;
1136 void __iomem *ioaddr;
1137
1138 match = of_match_device(rhine_of_tbl, &pdev->dev);
1139 if (!match)
1140 return -EINVAL;
1141
1142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1143 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1144 if (IS_ERR(ioaddr))
1145 return PTR_ERR(ioaddr);
1146
1147 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1148 if (!irq)
1149 return -EINVAL;
1150
ca8b6e04
AC
1151 quirks = match->data;
1152 if (!quirks)
2d283862
AC
1153 return -EINVAL;
1154
ca8b6e04 1155 return rhine_init_one_common(&pdev->dev, *quirks,
2d283862
AC
1156 (long)ioaddr, ioaddr, irq);
1157}
1158
1da177e4
LT
1159static int alloc_ring(struct net_device* dev)
1160{
1161 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1162 struct device *hwdev = dev->dev.parent;
1da177e4
LT
1163 void *ring;
1164 dma_addr_t ring_dma;
1165
f7630d18 1166 ring = dma_alloc_coherent(hwdev,
4087c4dc
AC
1167 RX_RING_SIZE * sizeof(struct rx_desc) +
1168 TX_RING_SIZE * sizeof(struct tx_desc),
1169 &ring_dma,
1170 GFP_ATOMIC);
1da177e4 1171 if (!ring) {
df4511fe 1172 netdev_err(dev, "Could not allocate DMA memory\n");
1da177e4
LT
1173 return -ENOMEM;
1174 }
1175 if (rp->quirks & rqRhineI) {
f7630d18 1176 rp->tx_bufs = dma_alloc_coherent(hwdev,
4087c4dc
AC
1177 PKT_BUF_SZ * TX_RING_SIZE,
1178 &rp->tx_bufs_dma,
1179 GFP_ATOMIC);
1da177e4 1180 if (rp->tx_bufs == NULL) {
f7630d18 1181 dma_free_coherent(hwdev,
4087c4dc
AC
1182 RX_RING_SIZE * sizeof(struct rx_desc) +
1183 TX_RING_SIZE * sizeof(struct tx_desc),
1184 ring, ring_dma);
1da177e4
LT
1185 return -ENOMEM;
1186 }
1187 }
1188
1189 rp->rx_ring = ring;
1190 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1191 rp->rx_ring_dma = ring_dma;
1192 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1193
1194 return 0;
1195}
1196
1197static void free_ring(struct net_device* dev)
1198{
1199 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1200 struct device *hwdev = dev->dev.parent;
1da177e4 1201
f7630d18 1202 dma_free_coherent(hwdev,
4087c4dc
AC
1203 RX_RING_SIZE * sizeof(struct rx_desc) +
1204 TX_RING_SIZE * sizeof(struct tx_desc),
1205 rp->rx_ring, rp->rx_ring_dma);
1da177e4
LT
1206 rp->tx_ring = NULL;
1207
1208 if (rp->tx_bufs)
f7630d18 1209 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
4087c4dc 1210 rp->tx_bufs, rp->tx_bufs_dma);
1da177e4
LT
1211
1212 rp->tx_bufs = NULL;
1213
1214}
1215
1216static void alloc_rbufs(struct net_device *dev)
1217{
1218 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1219 struct device *hwdev = dev->dev.parent;
1da177e4
LT
1220 dma_addr_t next;
1221 int i;
1222
1223 rp->dirty_rx = rp->cur_rx = 0;
1224
1225 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1226 rp->rx_head_desc = &rp->rx_ring[0];
1227 next = rp->rx_ring_dma;
1228
1229 /* Init the ring entries */
1230 for (i = 0; i < RX_RING_SIZE; i++) {
1231 rp->rx_ring[i].rx_status = 0;
1232 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1233 next += sizeof(struct rx_desc);
1234 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1235 rp->rx_skbuff[i] = NULL;
1236 }
1237 /* Mark the last entry as wrapping the ring. */
1238 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1239
1240 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1241 for (i = 0; i < RX_RING_SIZE; i++) {
b26b555a 1242 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1da177e4
LT
1243 rp->rx_skbuff[i] = skb;
1244 if (skb == NULL)
1245 break;
1da177e4
LT
1246
1247 rp->rx_skbuff_dma[i] =
f7630d18 1248 dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
4087c4dc 1249 DMA_FROM_DEVICE);
f7630d18 1250 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
9b4fe5fb
NH
1251 rp->rx_skbuff_dma[i] = 0;
1252 dev_kfree_skb(skb);
1253 break;
1254 }
1da177e4
LT
1255 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1256 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1257 }
1258 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1259}
1260
1261static void free_rbufs(struct net_device* dev)
1262{
1263 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1264 struct device *hwdev = dev->dev.parent;
1da177e4
LT
1265 int i;
1266
1267 /* Free all the skbuffs in the Rx queue. */
1268 for (i = 0; i < RX_RING_SIZE; i++) {
1269 rp->rx_ring[i].rx_status = 0;
1270 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1271 if (rp->rx_skbuff[i]) {
f7630d18 1272 dma_unmap_single(hwdev,
1da177e4 1273 rp->rx_skbuff_dma[i],
4087c4dc 1274 rp->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1275 dev_kfree_skb(rp->rx_skbuff[i]);
1276 }
1277 rp->rx_skbuff[i] = NULL;
1278 }
1279}
1280
1281static void alloc_tbufs(struct net_device* dev)
1282{
1283 struct rhine_private *rp = netdev_priv(dev);
1284 dma_addr_t next;
1285 int i;
1286
1287 rp->dirty_tx = rp->cur_tx = 0;
1288 next = rp->tx_ring_dma;
1289 for (i = 0; i < TX_RING_SIZE; i++) {
1290 rp->tx_skbuff[i] = NULL;
1291 rp->tx_ring[i].tx_status = 0;
1292 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1293 next += sizeof(struct tx_desc);
1294 rp->tx_ring[i].next_desc = cpu_to_le32(next);
4be5de25
RL
1295 if (rp->quirks & rqRhineI)
1296 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1da177e4
LT
1297 }
1298 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1299
92bf2008 1300 netdev_reset_queue(dev);
1da177e4
LT
1301}
1302
1303static void free_tbufs(struct net_device* dev)
1304{
1305 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1306 struct device *hwdev = dev->dev.parent;
1da177e4
LT
1307 int i;
1308
1309 for (i = 0; i < TX_RING_SIZE; i++) {
1310 rp->tx_ring[i].tx_status = 0;
1311 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1312 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1313 if (rp->tx_skbuff[i]) {
1314 if (rp->tx_skbuff_dma[i]) {
f7630d18 1315 dma_unmap_single(hwdev,
1da177e4
LT
1316 rp->tx_skbuff_dma[i],
1317 rp->tx_skbuff[i]->len,
4087c4dc 1318 DMA_TO_DEVICE);
1da177e4
LT
1319 }
1320 dev_kfree_skb(rp->tx_skbuff[i]);
1321 }
1322 rp->tx_skbuff[i] = NULL;
1323 rp->tx_buf[i] = NULL;
1324 }
1325}
1326
1327static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1328{
1329 struct rhine_private *rp = netdev_priv(dev);
1330 void __iomem *ioaddr = rp->base;
1331
5bdc7380
BH
1332 if (!rp->mii_if.force_media)
1333 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1da177e4
LT
1334
1335 if (rp->mii_if.full_duplex)
1336 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1337 ioaddr + ChipCmd1);
1338 else
1339 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1340 ioaddr + ChipCmd1);
fc3e0f8a
FR
1341
1342 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1343 rp->mii_if.force_media, netif_carrier_ok(dev));
00b428c2
RL
1344}
1345
1346/* Called after status of force_media possibly changed */
0761be4f 1347static void rhine_set_carrier(struct mii_if_info *mii)
00b428c2 1348{
fc3e0f8a
FR
1349 struct net_device *dev = mii->dev;
1350 struct rhine_private *rp = netdev_priv(dev);
1351
00b428c2
RL
1352 if (mii->force_media) {
1353 /* autoneg is off: Link is always assumed to be up */
fc3e0f8a
FR
1354 if (!netif_carrier_ok(dev))
1355 netif_carrier_on(dev);
17958438
FC
1356 }
1357
1358 rhine_check_media(dev, 0);
fc3e0f8a
FR
1359
1360 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1361 mii->force_media, netif_carrier_ok(dev));
1da177e4
LT
1362}
1363
38f49e88
RL
1364/**
1365 * rhine_set_cam - set CAM multicast filters
1366 * @ioaddr: register block of this Rhine
1367 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1368 * @addr: multicast address (6 bytes)
1369 *
1370 * Load addresses into multicast filters.
1371 */
1372static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1373{
1374 int i;
1375
1376 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1377 wmb();
1378
1379 /* Paranoid -- idx out of range should never happen */
1380 idx &= (MCAM_SIZE - 1);
1381
1382 iowrite8((u8) idx, ioaddr + CamAddr);
1383
1384 for (i = 0; i < 6; i++, addr++)
1385 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1386 udelay(10);
1387 wmb();
1388
1389 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1390 udelay(10);
1391
1392 iowrite8(0, ioaddr + CamCon);
1393}
1394
1395/**
1396 * rhine_set_vlan_cam - set CAM VLAN filters
1397 * @ioaddr: register block of this Rhine
1398 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1399 * @addr: VLAN ID (2 bytes)
1400 *
1401 * Load addresses into VLAN filters.
1402 */
1403static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1404{
1405 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1406 wmb();
1407
1408 /* Paranoid -- idx out of range should never happen */
1409 idx &= (VCAM_SIZE - 1);
1410
1411 iowrite8((u8) idx, ioaddr + CamAddr);
1412
1413 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1414 udelay(10);
1415 wmb();
1416
1417 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1418 udelay(10);
1419
1420 iowrite8(0, ioaddr + CamCon);
1421}
1422
1423/**
1424 * rhine_set_cam_mask - set multicast CAM mask
1425 * @ioaddr: register block of this Rhine
1426 * @mask: multicast CAM mask
1427 *
1428 * Mask sets multicast filters active/inactive.
1429 */
1430static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1431{
1432 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1433 wmb();
1434
1435 /* write mask */
1436 iowrite32(mask, ioaddr + CamMask);
1437
1438 /* disable CAMEN */
1439 iowrite8(0, ioaddr + CamCon);
1440}
1441
1442/**
1443 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1444 * @ioaddr: register block of this Rhine
1445 * @mask: VLAN CAM mask
1446 *
1447 * Mask sets VLAN filters active/inactive.
1448 */
1449static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1450{
1451 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1452 wmb();
1453
1454 /* write mask */
1455 iowrite32(mask, ioaddr + CamMask);
1456
1457 /* disable CAMEN */
1458 iowrite8(0, ioaddr + CamCon);
1459}
1460
1461/**
1462 * rhine_init_cam_filter - initialize CAM filters
1463 * @dev: network device
1464 *
1465 * Initialize (disable) hardware VLAN and multicast support on this
1466 * Rhine.
1467 */
1468static void rhine_init_cam_filter(struct net_device *dev)
1469{
1470 struct rhine_private *rp = netdev_priv(dev);
1471 void __iomem *ioaddr = rp->base;
1472
1473 /* Disable all CAMs */
1474 rhine_set_vlan_cam_mask(ioaddr, 0);
1475 rhine_set_cam_mask(ioaddr, 0);
1476
1477 /* disable hardware VLAN support */
1478 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1479 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1480}
1481
1482/**
1483 * rhine_update_vcam - update VLAN CAM filters
1484 * @rp: rhine_private data of this Rhine
1485 *
1486 * Update VLAN CAM filters to match configuration change.
1487 */
1488static void rhine_update_vcam(struct net_device *dev)
1489{
1490 struct rhine_private *rp = netdev_priv(dev);
1491 void __iomem *ioaddr = rp->base;
1492 u16 vid;
1493 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1494 unsigned int i = 0;
1495
1496 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1497 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1498 vCAMmask |= 1 << i;
1499 if (++i >= VCAM_SIZE)
1500 break;
1501 }
1502 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1503}
1504
80d5c368 1505static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
38f49e88
RL
1506{
1507 struct rhine_private *rp = netdev_priv(dev);
1508
7ab87ff4 1509 spin_lock_bh(&rp->lock);
38f49e88
RL
1510 set_bit(vid, rp->active_vlans);
1511 rhine_update_vcam(dev);
7ab87ff4 1512 spin_unlock_bh(&rp->lock);
8e586137 1513 return 0;
38f49e88
RL
1514}
1515
80d5c368 1516static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
38f49e88
RL
1517{
1518 struct rhine_private *rp = netdev_priv(dev);
1519
7ab87ff4 1520 spin_lock_bh(&rp->lock);
38f49e88
RL
1521 clear_bit(vid, rp->active_vlans);
1522 rhine_update_vcam(dev);
7ab87ff4 1523 spin_unlock_bh(&rp->lock);
8e586137 1524 return 0;
38f49e88
RL
1525}
1526
1da177e4
LT
1527static void init_registers(struct net_device *dev)
1528{
1529 struct rhine_private *rp = netdev_priv(dev);
1530 void __iomem *ioaddr = rp->base;
1531 int i;
1532
1533 for (i = 0; i < 6; i++)
1534 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1535
1536 /* Initialize other registers. */
1537 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1538 /* Configure initial FIFO thresholds. */
1539 iowrite8(0x20, ioaddr + TxConfig);
1540 rp->tx_thresh = 0x20;
1541 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1542
1543 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1544 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1545
1546 rhine_set_rx_mode(dev);
1547
ca8b6e04 1548 if (rp->quirks & rqMgmt)
38f49e88
RL
1549 rhine_init_cam_filter(dev);
1550
bea3348e 1551 napi_enable(&rp->napi);
ab197668 1552
7ab87ff4 1553 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1da177e4
LT
1554
1555 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1556 ioaddr + ChipCmd);
1557 rhine_check_media(dev, 1);
1558}
1559
1560/* Enable MII link status auto-polling (required for IntrLinkChange) */
a384a33b 1561static void rhine_enable_linkmon(struct rhine_private *rp)
1da177e4 1562{
a384a33b
FR
1563 void __iomem *ioaddr = rp->base;
1564
1da177e4
LT
1565 iowrite8(0, ioaddr + MIICmd);
1566 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1567 iowrite8(0x80, ioaddr + MIICmd);
1568
a384a33b 1569 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1da177e4
LT
1570
1571 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1572}
1573
1574/* Disable MII link status auto-polling (required for MDIO access) */
a384a33b 1575static void rhine_disable_linkmon(struct rhine_private *rp)
1da177e4 1576{
a384a33b
FR
1577 void __iomem *ioaddr = rp->base;
1578
1da177e4
LT
1579 iowrite8(0, ioaddr + MIICmd);
1580
a384a33b 1581 if (rp->quirks & rqRhineI) {
1da177e4
LT
1582 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1583
38bb6b28
JL
1584 /* Can be called from ISR. Evil. */
1585 mdelay(1);
1da177e4
LT
1586
1587 /* 0x80 must be set immediately before turning it off */
1588 iowrite8(0x80, ioaddr + MIICmd);
1589
a384a33b 1590 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1da177e4
LT
1591
1592 /* Heh. Now clear 0x80 again. */
1593 iowrite8(0, ioaddr + MIICmd);
1594 }
1595 else
a384a33b 1596 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1da177e4
LT
1597}
1598
1599/* Read and write over the MII Management Data I/O (MDIO) interface. */
1600
1601static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1602{
1603 struct rhine_private *rp = netdev_priv(dev);
1604 void __iomem *ioaddr = rp->base;
1605 int result;
1606
a384a33b 1607 rhine_disable_linkmon(rp);
1da177e4
LT
1608
1609 /* rhine_disable_linkmon already cleared MIICmd */
1610 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1611 iowrite8(regnum, ioaddr + MIIRegAddr);
1612 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
a384a33b 1613 rhine_wait_bit_low(rp, MIICmd, 0x40);
1da177e4
LT
1614 result = ioread16(ioaddr + MIIData);
1615
a384a33b 1616 rhine_enable_linkmon(rp);
1da177e4
LT
1617 return result;
1618}
1619
1620static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1621{
1622 struct rhine_private *rp = netdev_priv(dev);
1623 void __iomem *ioaddr = rp->base;
1624
a384a33b 1625 rhine_disable_linkmon(rp);
1da177e4
LT
1626
1627 /* rhine_disable_linkmon already cleared MIICmd */
1628 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1629 iowrite8(regnum, ioaddr + MIIRegAddr);
1630 iowrite16(value, ioaddr + MIIData);
1631 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
a384a33b 1632 rhine_wait_bit_low(rp, MIICmd, 0x20);
1da177e4 1633
a384a33b 1634 rhine_enable_linkmon(rp);
1da177e4
LT
1635}
1636
7ab87ff4
FR
1637static void rhine_task_disable(struct rhine_private *rp)
1638{
1639 mutex_lock(&rp->task_lock);
1640 rp->task_enable = false;
1641 mutex_unlock(&rp->task_lock);
1642
1643 cancel_work_sync(&rp->slow_event_task);
1644 cancel_work_sync(&rp->reset_task);
1645}
1646
1647static void rhine_task_enable(struct rhine_private *rp)
1648{
1649 mutex_lock(&rp->task_lock);
1650 rp->task_enable = true;
1651 mutex_unlock(&rp->task_lock);
1652}
1653
1da177e4
LT
1654static int rhine_open(struct net_device *dev)
1655{
1656 struct rhine_private *rp = netdev_priv(dev);
1657 void __iomem *ioaddr = rp->base;
1658 int rc;
1659
f7630d18 1660 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4
LT
1661 if (rc)
1662 return rc;
1663
f7630d18 1664 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1da177e4
LT
1665
1666 rc = alloc_ring(dev);
1667 if (rc) {
f7630d18 1668 free_irq(rp->irq, dev);
1da177e4
LT
1669 return rc;
1670 }
1671 alloc_rbufs(dev);
1672 alloc_tbufs(dev);
1673 rhine_chip_reset(dev);
7ab87ff4 1674 rhine_task_enable(rp);
1da177e4 1675 init_registers(dev);
fc3e0f8a
FR
1676
1677 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1678 __func__, ioread16(ioaddr + ChipCmd),
1679 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1da177e4
LT
1680
1681 netif_start_queue(dev);
1682
1683 return 0;
1684}
1685
c0d7a021 1686static void rhine_reset_task(struct work_struct *work)
1da177e4 1687{
c0d7a021
JP
1688 struct rhine_private *rp = container_of(work, struct rhine_private,
1689 reset_task);
1690 struct net_device *dev = rp->dev;
1da177e4 1691
7ab87ff4 1692 mutex_lock(&rp->task_lock);
1da177e4 1693
7ab87ff4
FR
1694 if (!rp->task_enable)
1695 goto out_unlock;
bea3348e 1696
7ab87ff4 1697 napi_disable(&rp->napi);
a926592f 1698 netif_tx_disable(dev);
c0d7a021 1699 spin_lock_bh(&rp->lock);
1da177e4
LT
1700
1701 /* clear all descriptors */
1702 free_tbufs(dev);
1703 free_rbufs(dev);
1704 alloc_tbufs(dev);
1705 alloc_rbufs(dev);
1706
1707 /* Reinitialize the hardware. */
1708 rhine_chip_reset(dev);
1709 init_registers(dev);
1710
c0d7a021 1711 spin_unlock_bh(&rp->lock);
1da177e4 1712
1ae5dc34 1713 dev->trans_start = jiffies; /* prevent tx timeout */
553e2335 1714 dev->stats.tx_errors++;
1da177e4 1715 netif_wake_queue(dev);
7ab87ff4
FR
1716
1717out_unlock:
1718 mutex_unlock(&rp->task_lock);
1da177e4
LT
1719}
1720
c0d7a021
JP
1721static void rhine_tx_timeout(struct net_device *dev)
1722{
1723 struct rhine_private *rp = netdev_priv(dev);
1724 void __iomem *ioaddr = rp->base;
1725
df4511fe
JP
1726 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1727 ioread16(ioaddr + IntrStatus),
1728 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
c0d7a021
JP
1729
1730 schedule_work(&rp->reset_task);
1731}
1732
61357325
SH
1733static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1734 struct net_device *dev)
1da177e4
LT
1735{
1736 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1737 struct device *hwdev = dev->dev.parent;
1da177e4
LT
1738 void __iomem *ioaddr = rp->base;
1739 unsigned entry;
1740
1741 /* Caution: the write order is important here, set the field
1742 with the "ownership" bits last. */
1743
1744 /* Calculate the next Tx descriptor entry. */
1745 entry = rp->cur_tx % TX_RING_SIZE;
1746
5b057c6b 1747 if (skb_padto(skb, ETH_ZLEN))
6ed10654 1748 return NETDEV_TX_OK;
1da177e4
LT
1749
1750 rp->tx_skbuff[entry] = skb;
1751
1752 if ((rp->quirks & rqRhineI) &&
84fa7933 1753 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1da177e4
LT
1754 /* Must use alignment buffer. */
1755 if (skb->len > PKT_BUF_SZ) {
1756 /* packet too long, drop it */
4b3afc6e 1757 dev_kfree_skb_any(skb);
1da177e4 1758 rp->tx_skbuff[entry] = NULL;
553e2335 1759 dev->stats.tx_dropped++;
6ed10654 1760 return NETDEV_TX_OK;
1da177e4 1761 }
3e0d167a
CB
1762
1763 /* Padding is not copied and so must be redone. */
1da177e4 1764 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
3e0d167a
CB
1765 if (skb->len < ETH_ZLEN)
1766 memset(rp->tx_buf[entry] + skb->len, 0,
1767 ETH_ZLEN - skb->len);
1da177e4
LT
1768 rp->tx_skbuff_dma[entry] = 0;
1769 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1770 (rp->tx_buf[entry] -
1771 rp->tx_bufs));
1772 } else {
1773 rp->tx_skbuff_dma[entry] =
f7630d18 1774 dma_map_single(hwdev, skb->data, skb->len,
4087c4dc 1775 DMA_TO_DEVICE);
f7630d18 1776 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
4b3afc6e 1777 dev_kfree_skb_any(skb);
9b4fe5fb
NH
1778 rp->tx_skbuff_dma[entry] = 0;
1779 dev->stats.tx_dropped++;
1780 return NETDEV_TX_OK;
1781 }
1da177e4
LT
1782 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1783 }
1784
1785 rp->tx_ring[entry].desc_length =
1786 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1787
df8a39de
JP
1788 if (unlikely(skb_vlan_tag_present(skb))) {
1789 u16 vid_pcp = skb_vlan_tag_get(skb);
207070f5
RL
1790
1791 /* drop CFI/DEI bit, register needs VID and PCP */
1792 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1793 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1794 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
38f49e88
RL
1795 /* request tagging */
1796 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1797 }
1798 else
1799 rp->tx_ring[entry].tx_status = 0;
1800
92bf2008 1801 netdev_sent_queue(dev, skb->len);
1da177e4 1802 /* lock eth irq */
1da177e4 1803 wmb();
38f49e88 1804 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1da177e4
LT
1805 wmb();
1806
1807 rp->cur_tx++;
1808
1809 /* Non-x86 Todo: explicitly flush cache lines here. */
1810
df8a39de 1811 if (skb_vlan_tag_present(skb))
38f49e88
RL
1812 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1813 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1814
1da177e4
LT
1815 /* Wake the potentially-idle transmit channel */
1816 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1817 ioaddr + ChipCmd1);
1818 IOSYNC;
1819
1820 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1821 netif_stop_queue(dev);
1822
fc3e0f8a
FR
1823 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1824 rp->cur_tx - 1, entry);
1825
6ed10654 1826 return NETDEV_TX_OK;
1da177e4
LT
1827}
1828
7ab87ff4
FR
1829static void rhine_irq_disable(struct rhine_private *rp)
1830{
1831 iowrite16(0x0000, rp->base + IntrEnable);
1832 mmiowb();
1833}
1834
1da177e4
LT
1835/* The interrupt handler does all of the Rx thread work and cleans up
1836 after the Tx thread. */
7d12e780 1837static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1da177e4
LT
1838{
1839 struct net_device *dev = dev_instance;
1840 struct rhine_private *rp = netdev_priv(dev);
7ab87ff4 1841 u32 status;
1da177e4
LT
1842 int handled = 0;
1843
7ab87ff4 1844 status = rhine_get_events(rp);
1da177e4 1845
fc3e0f8a 1846 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
633949a1 1847
7ab87ff4
FR
1848 if (status & RHINE_EVENT) {
1849 handled = 1;
1da177e4 1850
7ab87ff4
FR
1851 rhine_irq_disable(rp);
1852 napi_schedule(&rp->napi);
1853 }
1da177e4 1854
7ab87ff4 1855 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
fc3e0f8a
FR
1856 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1857 status);
1da177e4
LT
1858 }
1859
1da177e4
LT
1860 return IRQ_RETVAL(handled);
1861}
1862
1863/* This routine is logically part of the interrupt handler, but isolated
1864 for clarity. */
1865static void rhine_tx(struct net_device *dev)
1866{
1867 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1868 struct device *hwdev = dev->dev.parent;
1da177e4 1869 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
92bf2008
TR
1870 unsigned int pkts_compl = 0, bytes_compl = 0;
1871 struct sk_buff *skb;
1da177e4 1872
1da177e4
LT
1873 /* find and cleanup dirty tx descriptors */
1874 while (rp->dirty_tx != rp->cur_tx) {
1875 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
fc3e0f8a
FR
1876 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1877 entry, txstatus);
1da177e4
LT
1878 if (txstatus & DescOwn)
1879 break;
92bf2008 1880 skb = rp->tx_skbuff[entry];
1da177e4 1881 if (txstatus & 0x8000) {
fc3e0f8a
FR
1882 netif_dbg(rp, tx_done, dev,
1883 "Transmit error, Tx status %08x\n", txstatus);
553e2335
ED
1884 dev->stats.tx_errors++;
1885 if (txstatus & 0x0400)
1886 dev->stats.tx_carrier_errors++;
1887 if (txstatus & 0x0200)
1888 dev->stats.tx_window_errors++;
1889 if (txstatus & 0x0100)
1890 dev->stats.tx_aborted_errors++;
1891 if (txstatus & 0x0080)
1892 dev->stats.tx_heartbeat_errors++;
1da177e4
LT
1893 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1894 (txstatus & 0x0800) || (txstatus & 0x1000)) {
553e2335 1895 dev->stats.tx_fifo_errors++;
1da177e4
LT
1896 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1897 break; /* Keep the skb - we try again */
1898 }
1899 /* Transmitter restarted in 'abnormal' handler. */
1900 } else {
1901 if (rp->quirks & rqRhineI)
553e2335 1902 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1da177e4 1903 else
553e2335 1904 dev->stats.collisions += txstatus & 0x0F;
fc3e0f8a
FR
1905 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1906 (txstatus >> 3) & 0xF, txstatus & 0xF);
f7b5d1b9
JG
1907
1908 u64_stats_update_begin(&rp->tx_stats.syncp);
92bf2008 1909 rp->tx_stats.bytes += skb->len;
f7b5d1b9
JG
1910 rp->tx_stats.packets++;
1911 u64_stats_update_end(&rp->tx_stats.syncp);
1da177e4
LT
1912 }
1913 /* Free the original skb. */
1914 if (rp->tx_skbuff_dma[entry]) {
f7630d18 1915 dma_unmap_single(hwdev,
1da177e4 1916 rp->tx_skbuff_dma[entry],
92bf2008 1917 skb->len,
4087c4dc 1918 DMA_TO_DEVICE);
1da177e4 1919 }
92bf2008
TR
1920 bytes_compl += skb->len;
1921 pkts_compl++;
1922 dev_consume_skb_any(skb);
1da177e4
LT
1923 rp->tx_skbuff[entry] = NULL;
1924 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1925 }
92bf2008
TR
1926
1927 netdev_completed_queue(dev, pkts_compl, bytes_compl);
1da177e4
LT
1928 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1929 netif_wake_queue(dev);
1da177e4
LT
1930}
1931
38f49e88
RL
1932/**
1933 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1934 * @skb: pointer to sk_buff
1935 * @data_size: used data area of the buffer including CRC
1936 *
1937 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1938 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1939 * aligned following the CRC.
1940 */
1941static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1942{
1943 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
4562b2fe 1944 return be16_to_cpup((__be16 *)trailer);
38f49e88
RL
1945}
1946
633949a1
RL
1947/* Process up to limit frames from receive ring */
1948static int rhine_rx(struct net_device *dev, int limit)
1da177e4
LT
1949{
1950 struct rhine_private *rp = netdev_priv(dev);
f7630d18 1951 struct device *hwdev = dev->dev.parent;
633949a1 1952 int count;
1da177e4 1953 int entry = rp->cur_rx % RX_RING_SIZE;
1da177e4 1954
fc3e0f8a
FR
1955 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1956 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1da177e4
LT
1957
1958 /* If EOP is set on the next entry, it's a new packet. Send it up. */
633949a1 1959 for (count = 0; count < limit; ++count) {
1da177e4
LT
1960 struct rx_desc *desc = rp->rx_head_desc;
1961 u32 desc_status = le32_to_cpu(desc->rx_status);
38f49e88 1962 u32 desc_length = le32_to_cpu(desc->desc_length);
1da177e4
LT
1963 int data_size = desc_status >> 16;
1964
633949a1
RL
1965 if (desc_status & DescOwn)
1966 break;
1967
fc3e0f8a
FR
1968 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1969 desc_status);
633949a1 1970
1da177e4
LT
1971 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1972 if ((desc_status & RxWholePkt) != RxWholePkt) {
df4511fe
JP
1973 netdev_warn(dev,
1974 "Oversized Ethernet frame spanned multiple buffers, "
1975 "entry %#x length %d status %08x!\n",
1976 entry, data_size,
1977 desc_status);
1978 netdev_warn(dev,
1979 "Oversized Ethernet frame %p vs %p\n",
1980 rp->rx_head_desc,
1981 &rp->rx_ring[entry]);
553e2335 1982 dev->stats.rx_length_errors++;
1da177e4
LT
1983 } else if (desc_status & RxErr) {
1984 /* There was a error. */
fc3e0f8a
FR
1985 netif_dbg(rp, rx_err, dev,
1986 "%s() Rx error %08x\n", __func__,
1987 desc_status);
553e2335
ED
1988 dev->stats.rx_errors++;
1989 if (desc_status & 0x0030)
1990 dev->stats.rx_length_errors++;
1991 if (desc_status & 0x0048)
1992 dev->stats.rx_fifo_errors++;
1993 if (desc_status & 0x0004)
1994 dev->stats.rx_frame_errors++;
1da177e4
LT
1995 if (desc_status & 0x0002) {
1996 /* this can also be updated outside the interrupt handler */
1997 spin_lock(&rp->lock);
553e2335 1998 dev->stats.rx_crc_errors++;
1da177e4
LT
1999 spin_unlock(&rp->lock);
2000 }
2001 }
2002 } else {
89d71a66 2003 struct sk_buff *skb = NULL;
1da177e4
LT
2004 /* Length should omit the CRC */
2005 int pkt_len = data_size - 4;
38f49e88 2006 u16 vlan_tci = 0;
1da177e4
LT
2007
2008 /* Check if the packet is long enough to accept without
2009 copying to a minimally-sized skbuff. */
89d71a66
ED
2010 if (pkt_len < rx_copybreak)
2011 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2012 if (skb) {
f7630d18 2013 dma_sync_single_for_cpu(hwdev,
4087c4dc
AC
2014 rp->rx_skbuff_dma[entry],
2015 rp->rx_buf_sz,
2016 DMA_FROM_DEVICE);
1da177e4 2017
8c7b7faa 2018 skb_copy_to_linear_data(skb,
689be439 2019 rp->rx_skbuff[entry]->data,
8c7b7faa 2020 pkt_len);
1da177e4 2021 skb_put(skb, pkt_len);
f7630d18 2022 dma_sync_single_for_device(hwdev,
4087c4dc
AC
2023 rp->rx_skbuff_dma[entry],
2024 rp->rx_buf_sz,
2025 DMA_FROM_DEVICE);
1da177e4
LT
2026 } else {
2027 skb = rp->rx_skbuff[entry];
2028 if (skb == NULL) {
df4511fe 2029 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1da177e4
LT
2030 break;
2031 }
2032 rp->rx_skbuff[entry] = NULL;
2033 skb_put(skb, pkt_len);
f7630d18 2034 dma_unmap_single(hwdev,
1da177e4
LT
2035 rp->rx_skbuff_dma[entry],
2036 rp->rx_buf_sz,
4087c4dc 2037 DMA_FROM_DEVICE);
1da177e4 2038 }
38f49e88
RL
2039
2040 if (unlikely(desc_length & DescTag))
2041 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2042
1da177e4 2043 skb->protocol = eth_type_trans(skb, dev);
38f49e88
RL
2044
2045 if (unlikely(desc_length & DescTag))
86a9bad3 2046 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
633949a1 2047 netif_receive_skb(skb);
f7b5d1b9
JG
2048
2049 u64_stats_update_begin(&rp->rx_stats.syncp);
2050 rp->rx_stats.bytes += pkt_len;
2051 rp->rx_stats.packets++;
2052 u64_stats_update_end(&rp->rx_stats.syncp);
1da177e4
LT
2053 }
2054 entry = (++rp->cur_rx) % RX_RING_SIZE;
2055 rp->rx_head_desc = &rp->rx_ring[entry];
2056 }
2057
2058 /* Refill the Rx ring buffers. */
2059 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2060 struct sk_buff *skb;
2061 entry = rp->dirty_rx % RX_RING_SIZE;
2062 if (rp->rx_skbuff[entry] == NULL) {
b26b555a 2063 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1da177e4
LT
2064 rp->rx_skbuff[entry] = skb;
2065 if (skb == NULL)
2066 break; /* Better luck next round. */
1da177e4 2067 rp->rx_skbuff_dma[entry] =
f7630d18 2068 dma_map_single(hwdev, skb->data,
1da177e4 2069 rp->rx_buf_sz,
4087c4dc 2070 DMA_FROM_DEVICE);
f7630d18
AC
2071 if (dma_mapping_error(hwdev,
2072 rp->rx_skbuff_dma[entry])) {
9b4fe5fb
NH
2073 dev_kfree_skb(skb);
2074 rp->rx_skbuff_dma[entry] = 0;
2075 break;
2076 }
1da177e4
LT
2077 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2078 }
2079 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2080 }
633949a1
RL
2081
2082 return count;
1da177e4
LT
2083}
2084
1da177e4
LT
2085static void rhine_restart_tx(struct net_device *dev) {
2086 struct rhine_private *rp = netdev_priv(dev);
2087 void __iomem *ioaddr = rp->base;
2088 int entry = rp->dirty_tx % TX_RING_SIZE;
2089 u32 intr_status;
2090
2091 /*
25985edc 2092 * If new errors occurred, we need to sort them out before doing Tx.
1da177e4
LT
2093 * In that case the ISR will be back here RSN anyway.
2094 */
a20a28bc 2095 intr_status = rhine_get_events(rp);
1da177e4
LT
2096
2097 if ((intr_status & IntrTxErrSummary) == 0) {
2098
2099 /* We know better than the chip where it should continue. */
2100 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2101 ioaddr + TxRingPtr);
2102
2103 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2104 ioaddr + ChipCmd);
38f49e88
RL
2105
2106 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2107 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2108 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2109
1da177e4
LT
2110 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2111 ioaddr + ChipCmd1);
2112 IOSYNC;
2113 }
2114 else {
2115 /* This should never happen */
fc3e0f8a
FR
2116 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2117 intr_status);
1da177e4
LT
2118 }
2119
2120}
2121
7ab87ff4 2122static void rhine_slow_event_task(struct work_struct *work)
1da177e4 2123{
7ab87ff4
FR
2124 struct rhine_private *rp =
2125 container_of(work, struct rhine_private, slow_event_task);
2126 struct net_device *dev = rp->dev;
2127 u32 intr_status;
1da177e4 2128
7ab87ff4
FR
2129 mutex_lock(&rp->task_lock);
2130
2131 if (!rp->task_enable)
2132 goto out_unlock;
2133
2134 intr_status = rhine_get_events(rp);
2135 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
1da177e4
LT
2136
2137 if (intr_status & IntrLinkChange)
38bb6b28 2138 rhine_check_media(dev, 0);
1da177e4 2139
fc3e0f8a
FR
2140 if (intr_status & IntrPCIErr)
2141 netif_warn(rp, hw, dev, "PCI error\n");
2142
559bcac3 2143 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
1da177e4 2144
7ab87ff4
FR
2145out_unlock:
2146 mutex_unlock(&rp->task_lock);
1da177e4
LT
2147}
2148
f7b5d1b9
JG
2149static struct rtnl_link_stats64 *
2150rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1da177e4
LT
2151{
2152 struct rhine_private *rp = netdev_priv(dev);
f7b5d1b9 2153 unsigned int start;
1da177e4 2154
7ab87ff4
FR
2155 spin_lock_bh(&rp->lock);
2156 rhine_update_rx_crc_and_missed_errord(rp);
2157 spin_unlock_bh(&rp->lock);
1da177e4 2158
f7b5d1b9
JG
2159 netdev_stats_to_stats64(stats, &dev->stats);
2160
2161 do {
57a7744e 2162 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
f7b5d1b9
JG
2163 stats->rx_packets = rp->rx_stats.packets;
2164 stats->rx_bytes = rp->rx_stats.bytes;
57a7744e 2165 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
f7b5d1b9
JG
2166
2167 do {
57a7744e 2168 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
f7b5d1b9
JG
2169 stats->tx_packets = rp->tx_stats.packets;
2170 stats->tx_bytes = rp->tx_stats.bytes;
57a7744e 2171 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
f7b5d1b9
JG
2172
2173 return stats;
1da177e4
LT
2174}
2175
2176static void rhine_set_rx_mode(struct net_device *dev)
2177{
2178 struct rhine_private *rp = netdev_priv(dev);
2179 void __iomem *ioaddr = rp->base;
2180 u32 mc_filter[2]; /* Multicast hash filter */
38f49e88
RL
2181 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2182 struct netdev_hw_addr *ha;
1da177e4
LT
2183
2184 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
2185 rx_mode = 0x1C;
2186 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2187 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
4cd24eaf 2188 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
8e95a202 2189 (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
2190 /* Too many to match, or accept all multicasts. */
2191 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2192 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
ca8b6e04 2193 } else if (rp->quirks & rqMgmt) {
38f49e88
RL
2194 int i = 0;
2195 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2196 netdev_for_each_mc_addr(ha, dev) {
2197 if (i == MCAM_SIZE)
2198 break;
2199 rhine_set_cam(ioaddr, i, ha->addr);
2200 mCAMmask |= 1 << i;
2201 i++;
2202 }
2203 rhine_set_cam_mask(ioaddr, mCAMmask);
1da177e4 2204 } else {
1da177e4 2205 memset(mc_filter, 0, sizeof(mc_filter));
22bedad3
JP
2206 netdev_for_each_mc_addr(ha, dev) {
2207 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1da177e4
LT
2208
2209 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2210 }
2211 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2212 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1da177e4 2213 }
38f49e88 2214 /* enable/disable VLAN receive filtering */
ca8b6e04 2215 if (rp->quirks & rqMgmt) {
38f49e88
RL
2216 if (dev->flags & IFF_PROMISC)
2217 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2218 else
2219 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2220 }
2221 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
1da177e4
LT
2222}
2223
2224static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2225{
f7630d18 2226 struct device *hwdev = dev->dev.parent;
1da177e4 2227
23020ab3
RJ
2228 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2229 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
f7630d18 2230 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
1da177e4
LT
2231}
2232
2233static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2234{
2235 struct rhine_private *rp = netdev_priv(dev);
2236 int rc;
2237
7ab87ff4 2238 mutex_lock(&rp->task_lock);
1da177e4 2239 rc = mii_ethtool_gset(&rp->mii_if, cmd);
7ab87ff4 2240 mutex_unlock(&rp->task_lock);
1da177e4
LT
2241
2242 return rc;
2243}
2244
2245static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2246{
2247 struct rhine_private *rp = netdev_priv(dev);
2248 int rc;
2249
7ab87ff4 2250 mutex_lock(&rp->task_lock);
1da177e4 2251 rc = mii_ethtool_sset(&rp->mii_if, cmd);
00b428c2 2252 rhine_set_carrier(&rp->mii_if);
7ab87ff4 2253 mutex_unlock(&rp->task_lock);
1da177e4
LT
2254
2255 return rc;
2256}
2257
2258static int netdev_nway_reset(struct net_device *dev)
2259{
2260 struct rhine_private *rp = netdev_priv(dev);
2261
2262 return mii_nway_restart(&rp->mii_if);
2263}
2264
2265static u32 netdev_get_link(struct net_device *dev)
2266{
2267 struct rhine_private *rp = netdev_priv(dev);
2268
2269 return mii_link_ok(&rp->mii_if);
2270}
2271
2272static u32 netdev_get_msglevel(struct net_device *dev)
2273{
fc3e0f8a
FR
2274 struct rhine_private *rp = netdev_priv(dev);
2275
2276 return rp->msg_enable;
1da177e4
LT
2277}
2278
2279static void netdev_set_msglevel(struct net_device *dev, u32 value)
2280{
fc3e0f8a
FR
2281 struct rhine_private *rp = netdev_priv(dev);
2282
2283 rp->msg_enable = value;
1da177e4
LT
2284}
2285
2286static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2287{
2288 struct rhine_private *rp = netdev_priv(dev);
2289
2290 if (!(rp->quirks & rqWOL))
2291 return;
2292
2293 spin_lock_irq(&rp->lock);
2294 wol->supported = WAKE_PHY | WAKE_MAGIC |
2295 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2296 wol->wolopts = rp->wolopts;
2297 spin_unlock_irq(&rp->lock);
2298}
2299
2300static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2301{
2302 struct rhine_private *rp = netdev_priv(dev);
2303 u32 support = WAKE_PHY | WAKE_MAGIC |
2304 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2305
2306 if (!(rp->quirks & rqWOL))
2307 return -EINVAL;
2308
2309 if (wol->wolopts & ~support)
2310 return -EINVAL;
2311
2312 spin_lock_irq(&rp->lock);
2313 rp->wolopts = wol->wolopts;
2314 spin_unlock_irq(&rp->lock);
2315
2316 return 0;
2317}
2318
7282d491 2319static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
2320 .get_drvinfo = netdev_get_drvinfo,
2321 .get_settings = netdev_get_settings,
2322 .set_settings = netdev_set_settings,
2323 .nway_reset = netdev_nway_reset,
2324 .get_link = netdev_get_link,
2325 .get_msglevel = netdev_get_msglevel,
2326 .set_msglevel = netdev_set_msglevel,
2327 .get_wol = rhine_get_wol,
2328 .set_wol = rhine_set_wol,
1da177e4
LT
2329};
2330
2331static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2332{
2333 struct rhine_private *rp = netdev_priv(dev);
2334 int rc;
2335
2336 if (!netif_running(dev))
2337 return -EINVAL;
2338
7ab87ff4 2339 mutex_lock(&rp->task_lock);
1da177e4 2340 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
00b428c2 2341 rhine_set_carrier(&rp->mii_if);
7ab87ff4 2342 mutex_unlock(&rp->task_lock);
1da177e4
LT
2343
2344 return rc;
2345}
2346
2347static int rhine_close(struct net_device *dev)
2348{
2349 struct rhine_private *rp = netdev_priv(dev);
2350 void __iomem *ioaddr = rp->base;
2351
7ab87ff4 2352 rhine_task_disable(rp);
bea3348e 2353 napi_disable(&rp->napi);
c0d7a021
JP
2354 netif_stop_queue(dev);
2355
fc3e0f8a
FR
2356 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2357 ioread16(ioaddr + ChipCmd));
1da177e4
LT
2358
2359 /* Switch to loopback mode to avoid hardware races. */
2360 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2361
7ab87ff4 2362 rhine_irq_disable(rp);
1da177e4
LT
2363
2364 /* Stop the chip's Tx and Rx processes. */
2365 iowrite16(CmdStop, ioaddr + ChipCmd);
2366
f7630d18 2367 free_irq(rp->irq, dev);
1da177e4
LT
2368 free_rbufs(dev);
2369 free_tbufs(dev);
2370 free_ring(dev);
2371
2372 return 0;
2373}
2374
2375
2d283862 2376static void rhine_remove_one_pci(struct pci_dev *pdev)
1da177e4
LT
2377{
2378 struct net_device *dev = pci_get_drvdata(pdev);
2379 struct rhine_private *rp = netdev_priv(dev);
2380
2381 unregister_netdev(dev);
2382
2383 pci_iounmap(pdev, rp->base);
2384 pci_release_regions(pdev);
2385
2386 free_netdev(dev);
2387 pci_disable_device(pdev);
1da177e4
LT
2388}
2389
2d283862
AC
2390static int rhine_remove_one_platform(struct platform_device *pdev)
2391{
2392 struct net_device *dev = platform_get_drvdata(pdev);
2393 struct rhine_private *rp = netdev_priv(dev);
2394
2395 unregister_netdev(dev);
2396
2397 iounmap(rp->base);
2398
2399 free_netdev(dev);
2400
2401 return 0;
2402}
2403
2404static void rhine_shutdown_pci(struct pci_dev *pdev)
1da177e4 2405{
1da177e4
LT
2406 struct net_device *dev = pci_get_drvdata(pdev);
2407 struct rhine_private *rp = netdev_priv(dev);
2408 void __iomem *ioaddr = rp->base;
2409
2410 if (!(rp->quirks & rqWOL))
2411 return; /* Nothing to do for non-WOL adapters */
2412
2413 rhine_power_init(dev);
2414
2415 /* Make sure we use pattern 0, 1 and not 4, 5 */
2416 if (rp->quirks & rq6patterns)
f11cf25e 2417 iowrite8(0x04, ioaddr + WOLcgClr);
1da177e4 2418
7ab87ff4
FR
2419 spin_lock(&rp->lock);
2420
1da177e4
LT
2421 if (rp->wolopts & WAKE_MAGIC) {
2422 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2423 /*
2424 * Turn EEPROM-controlled wake-up back on -- some hardware may
2425 * not cooperate otherwise.
2426 */
2427 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2428 }
2429
2430 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2431 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2432
2433 if (rp->wolopts & WAKE_PHY)
2434 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2435
2436 if (rp->wolopts & WAKE_UCAST)
2437 iowrite8(WOLucast, ioaddr + WOLcrSet);
2438
2439 if (rp->wolopts) {
2440 /* Enable legacy WOL (for old motherboards) */
2441 iowrite8(0x01, ioaddr + PwcfgSet);
2442 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2443 }
2444
7ab87ff4
FR
2445 spin_unlock(&rp->lock);
2446
e92b9b3b 2447 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
b933b4d9 2448 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1da177e4 2449
e92b9b3b
FR
2450 pci_wake_from_d3(pdev, true);
2451 pci_set_power_state(pdev, PCI_D3hot);
2452 }
1da177e4
LT
2453}
2454
e92b9b3b
FR
2455#ifdef CONFIG_PM_SLEEP
2456static int rhine_suspend(struct device *device)
1da177e4 2457{
f7630d18 2458 struct net_device *dev = dev_get_drvdata(device);
1da177e4 2459 struct rhine_private *rp = netdev_priv(dev);
1da177e4
LT
2460
2461 if (!netif_running(dev))
2462 return 0;
2463
7ab87ff4
FR
2464 rhine_task_disable(rp);
2465 rhine_irq_disable(rp);
bea3348e 2466 napi_disable(&rp->napi);
32b0f53e 2467
1da177e4 2468 netif_device_detach(dev);
1da177e4 2469
f7630d18 2470 if (dev_is_pci(device))
2d283862 2471 rhine_shutdown_pci(to_pci_dev(device));
1da177e4 2472
1da177e4
LT
2473 return 0;
2474}
2475
e92b9b3b 2476static int rhine_resume(struct device *device)
1da177e4 2477{
f7630d18 2478 struct net_device *dev = dev_get_drvdata(device);
1da177e4 2479 struct rhine_private *rp = netdev_priv(dev);
1da177e4
LT
2480
2481 if (!netif_running(dev))
2482 return 0;
2483
1da177e4 2484 enable_mmio(rp->pioaddr, rp->quirks);
1da177e4
LT
2485 rhine_power_init(dev);
2486 free_tbufs(dev);
2487 free_rbufs(dev);
2488 alloc_tbufs(dev);
2489 alloc_rbufs(dev);
7ab87ff4
FR
2490 rhine_task_enable(rp);
2491 spin_lock_bh(&rp->lock);
1da177e4 2492 init_registers(dev);
7ab87ff4 2493 spin_unlock_bh(&rp->lock);
1da177e4
LT
2494
2495 netif_device_attach(dev);
2496
2497 return 0;
2498}
e92b9b3b
FR
2499
2500static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2501#define RHINE_PM_OPS (&rhine_pm_ops)
2502
2503#else
2504
2505#define RHINE_PM_OPS NULL
2506
2507#endif /* !CONFIG_PM_SLEEP */
1da177e4 2508
2d283862 2509static struct pci_driver rhine_driver_pci = {
1da177e4
LT
2510 .name = DRV_NAME,
2511 .id_table = rhine_pci_tbl,
2d283862
AC
2512 .probe = rhine_init_one_pci,
2513 .remove = rhine_remove_one_pci,
2514 .shutdown = rhine_shutdown_pci,
e92b9b3b 2515 .driver.pm = RHINE_PM_OPS,
1da177e4
LT
2516};
2517
2d283862
AC
2518static struct platform_driver rhine_driver_platform = {
2519 .probe = rhine_init_one_platform,
2520 .remove = rhine_remove_one_platform,
2521 .driver = {
2522 .name = DRV_NAME,
2d283862
AC
2523 .of_match_table = rhine_of_tbl,
2524 .pm = RHINE_PM_OPS,
2525 }
2526};
2527
77273eaa 2528static struct dmi_system_id rhine_dmi_table[] __initdata = {
e84df485
RL
2529 {
2530 .ident = "EPIA-M",
2531 .matches = {
2532 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2533 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2534 },
2535 },
2536 {
2537 .ident = "KV7",
2538 .matches = {
2539 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2540 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2541 },
2542 },
2543 { NULL }
2544};
1da177e4
LT
2545
2546static int __init rhine_init(void)
2547{
2d283862
AC
2548 int ret_pci, ret_platform;
2549
1da177e4
LT
2550/* when a module, this is printed whether or not devices are found in probe */
2551#ifdef MODULE
df4511fe 2552 pr_info("%s\n", version);
1da177e4 2553#endif
e84df485
RL
2554 if (dmi_check_system(rhine_dmi_table)) {
2555 /* these BIOSes fail at PXE boot if chip is in D3 */
eb939922 2556 avoid_D3 = true;
df4511fe 2557 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
e84df485
RL
2558 }
2559 else if (avoid_D3)
df4511fe 2560 pr_info("avoid_D3 set\n");
e84df485 2561
2d283862
AC
2562 ret_pci = pci_register_driver(&rhine_driver_pci);
2563 ret_platform = platform_driver_register(&rhine_driver_platform);
2564 if ((ret_pci < 0) && (ret_platform < 0))
2565 return ret_pci;
2566
2567 return 0;
1da177e4
LT
2568}
2569
2570
2571static void __exit rhine_cleanup(void)
2572{
2d283862
AC
2573 platform_driver_unregister(&rhine_driver_platform);
2574 pci_unregister_driver(&rhine_driver_pci);
1da177e4
LT
2575}
2576
2577
2578module_init(rhine_init);
2579module_exit(rhine_cleanup);
This page took 3.782446 seconds and 5 git commands to generate.