Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / drivers / net / ethernet / intel / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
72001762
AM
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
1da177e4
LT
148 */
149
fa05e1ad
JP
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
a6b7a407
AD
152#include <linux/hardirq.h>
153#include <linux/interrupt.h>
1da177e4
LT
154#include <linux/module.h>
155#include <linux/moduleparam.h>
156#include <linux/kernel.h>
157#include <linux/types.h>
d43c36dc 158#include <linux/sched.h>
1da177e4
LT
159#include <linux/slab.h>
160#include <linux/delay.h>
161#include <linux/init.h>
162#include <linux/pci.h>
1e7f0bd8 163#include <linux/dma-mapping.h>
98468efd 164#include <linux/dmapool.h>
1da177e4
LT
165#include <linux/netdevice.h>
166#include <linux/etherdevice.h>
167#include <linux/mii.h>
168#include <linux/if_vlan.h>
169#include <linux/skbuff.h>
170#include <linux/ethtool.h>
171#include <linux/string.h>
9ac32e1b 172#include <linux/firmware.h>
401da6ae 173#include <linux/rtnetlink.h>
1da177e4
LT
174#include <asm/unaligned.h>
175
176
177#define DRV_NAME "e100"
4e1dc97d 178#define DRV_EXT "-NAPI"
b55de80e 179#define DRV_VERSION "3.5.24-k2"DRV_EXT
1da177e4 180#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 181#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
182
183#define E100_WATCHDOG_PERIOD (2 * HZ)
184#define E100_NAPI_WEIGHT 16
185
9ac32e1b
JSR
186#define FIRMWARE_D101M "e100/d101m_ucode.bin"
187#define FIRMWARE_D101S "e100/d101s_ucode.bin"
188#define FIRMWARE_D102E "e100/d102e_ucode.bin"
189
1da177e4
LT
190MODULE_DESCRIPTION(DRV_DESCRIPTION);
191MODULE_AUTHOR(DRV_COPYRIGHT);
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
194MODULE_FIRMWARE(FIRMWARE_D101M);
195MODULE_FIRMWARE(FIRMWARE_D101S);
196MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
197
198static int debug = 3;
8fb6f732 199static int eeprom_bad_csum_allow = 0;
27345bb6 200static int use_io = 0;
1da177e4 201module_param(debug, int, 0);
8fb6f732 202module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 203module_param(use_io, int, 0);
1da177e4 204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
a3aa1884 211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
1da177e4
LT
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
b55de80e 247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
1da177e4
LT
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
b55de80e 283 phy_82552_v = 0xd061004d,
1da177e4
LT
284 phy_unknown = 0xFFFFFFFF,
285};
286
287/* CSR (Control/Status Registers) */
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
7734f6e6 305 rus_no_res = 0x08,
1da177e4
LT
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
ca93ca42
JG
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
1da177e4
LT
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
05479938 350
1da177e4
LT
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
72001762 379 eeprom_phy_iface = 0x06,
1da177e4
LT
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
72001762
AM
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
1da177e4
LT
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
75f58a53
BG
415/**
416 * cb_command - Command Block flags
417 * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory
418 */
1da177e4
LT
419enum cb_command {
420 cb_nop = 0x0000,
421 cb_iaaddr = 0x0001,
422 cb_config = 0x0002,
423 cb_multi = 0x0003,
424 cb_tx = 0x0004,
425 cb_ucode = 0x0005,
426 cb_dump = 0x0006,
427 cb_tx_sf = 0x0008,
75f58a53 428 cb_tx_nc = 0x0010,
1da177e4
LT
429 cb_cid = 0x1f00,
430 cb_i = 0x2000,
431 cb_s = 0x4000,
432 cb_el = 0x8000,
433};
434
435struct rfd {
aaf918ba
AV
436 __le16 status;
437 __le16 command;
438 __le32 link;
439 __le32 rbd;
440 __le16 actual_size;
441 __le16 size;
1da177e4
LT
442};
443
444struct rx {
445 struct rx *next, *prev;
446 struct sk_buff *skb;
447 dma_addr_t dma_addr;
448};
449
450#if defined(__BIG_ENDIAN_BITFIELD)
451#define X(a,b) b,a
452#else
453#define X(a,b) a,b
454#endif
455struct config {
456/*0*/ u8 X(byte_count:6, pad0:2);
457/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458/*2*/ u8 adaptive_ifs;
459/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
462/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
0bf61e66 465 rx_save_overruns : 1), rx_save_bad_frames : 1);
1da177e4
LT
466/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468 tx_dynamic_tbd:1);
469/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473 loopback:2);
474/*11*/ u8 X(linear_priority:3, pad11:5);
475/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476/*13*/ u8 ip_addr_lo;
477/*14*/ u8 ip_addr_hi;
478/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481/*16*/ u8 fc_delay_lo;
482/*17*/ u8 fc_delay_hi;
483/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
491 u8 pad_d102[9];
492};
493
494#define E100_MAX_MULTICAST_ADDRS 64
495struct multi {
aaf918ba 496 __le16 count;
1da177e4
LT
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
498};
499
500/* Important: keep total struct u32-aligned */
501#define UCODE_SIZE 134
502struct cb {
aaf918ba
AV
503 __le16 status;
504 __le16 command;
505 __le32 link;
1da177e4
LT
506 union {
507 u8 iaaddr[ETH_ALEN];
aaf918ba 508 __le32 ucode[UCODE_SIZE];
1da177e4
LT
509 struct config config;
510 struct multi multi;
511 struct {
512 u32 tbd_array;
513 u16 tcb_byte_count;
514 u8 threshold;
515 u8 tbd_count;
516 struct {
aaf918ba
AV
517 __le32 buf_addr;
518 __le16 size;
1da177e4
LT
519 u16 eol;
520 } tbd;
521 } tcb;
aaf918ba 522 __le32 dump_buffer_addr;
1da177e4
LT
523 } u;
524 struct cb *next, *prev;
525 dma_addr_t dma_addr;
526 struct sk_buff *skb;
527};
528
529enum loopback {
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
531};
532
533struct stats {
aaf918ba 534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
aaf918ba 537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
aaf918ba
AV
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
542 __le32 complete;
1da177e4
LT
543};
544
545struct mem {
546 struct {
547 u32 signature;
548 u32 result;
549 } selftest;
550 struct stats stats;
551 u8 dump_buf[596];
552};
553
554struct param_range {
555 u32 min;
556 u32 max;
557 u32 count;
558};
559
560struct params {
561 struct param_range rfds;
562 struct param_range cbs;
563};
564
565struct nic {
566 /* Begin: frequently used values: keep adjacent for cache effect */
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
72001762 570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
1da177e4
LT
571
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
ca93ca42 576 enum ru_state ru_running;
1da177e4
LT
577
578 spinlock_t cb_lock ____cacheline_aligned;
579 spinlock_t cmd_lock;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
bea3348e 583 struct napi_struct napi;
1da177e4
LT
584 struct cb *cbs;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
aaf918ba 588 __le16 tx_command;
1da177e4
LT
589 /* End: frequently used values: keep adjacent for cache effect */
590
591 enum {
592 ich = (1 << 0),
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
598
599 enum mac mac;
600 enum phy phy;
601 struct params params;
1da177e4 602 struct timer_list watchdog;
1da177e4 603 struct mii_if_info mii;
2acdb1e0 604 struct work_struct tx_timeout_task;
1da177e4
LT
605 enum loopback loopback;
606
607 struct mem *mem;
608 dma_addr_t dma_addr;
609
98468efd 610 struct pci_pool *cbs_pool;
1da177e4
LT
611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs;
613 u8 tx_threshold;
614 u32 tx_frames;
615 u32 tx_collisions;
616 u32 tx_deferred;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
619 u32 tx_fc_pause;
620 u32 tx_tco_frames;
621
622 u32 rx_fc_pause;
623 u32 rx_fc_unsupported;
624 u32 rx_tco_frames;
d24d65ed 625 u32 rx_short_frame_errors;
1da177e4
LT
626 u32 rx_over_length_errors;
627
1da177e4 628 u16 eeprom_wc;
aaf918ba 629 __le16 eeprom[256];
ac7c6669 630 spinlock_t mdio_lock;
7e15b0c9 631 const struct firmware *fw;
1da177e4
LT
632};
633
634static inline void e100_write_flush(struct nic *nic)
635{
636 /* Flush previous PCI writes through intermediate bridges
637 * by doing a benign read */
27345bb6 638 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
639}
640
858119e1 641static void e100_enable_irq(struct nic *nic)
1da177e4
LT
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 647 e100_write_flush(nic);
ad8c48ad 648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
649}
650
858119e1 651static void e100_disable_irq(struct nic *nic)
1da177e4
LT
652{
653 unsigned long flags;
654
655 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 657 e100_write_flush(nic);
ad8c48ad 658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
659}
660
661static void e100_hw_reset(struct nic *nic)
662{
663 /* Put CU and RU into idle with a selective reset to get
664 * device off of PCI bus */
27345bb6 665 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
666 e100_write_flush(nic); udelay(20);
667
668 /* Now fully reset device */
27345bb6 669 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
670 e100_write_flush(nic); udelay(20);
671
672 /* Mask off our interrupt line - it's unmasked after reset */
673 e100_disable_irq(nic);
674}
675
676static int e100_self_test(struct nic *nic)
677{
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679
680 /* Passing the self-test is a pretty good indication
681 * that the device can DMA to/from host memory */
682
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
685
27345bb6 686 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
687 e100_write_flush(nic);
688 /* Wait 10 msec for self-test to complete */
689 msleep(10);
690
691 /* Interrupts are enabled after self-test */
692 e100_disable_irq(nic);
693
694 /* Check results of self-test */
f26251eb 695 if (nic->mem->selftest.result != 0) {
fa05e1ad
JP
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
1da177e4
LT
699 return -ETIMEDOUT;
700 }
f26251eb 701 if (nic->mem->selftest.signature == 0) {
fa05e1ad 702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
1da177e4
LT
703 return -ETIMEDOUT;
704 }
705
706 return 0;
707}
708
aaf918ba 709static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
710{
711 u32 cmd_addr_data[3];
712 u8 ctrl;
713 int i, j;
714
715 /* Three cmds: write/erase enable, write data, write/erase disable */
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 718 le16_to_cpu(data);
1da177e4
LT
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
720
721 /* Bit-bang cmds to write word to eeprom */
f26251eb 722 for (j = 0; j < 3; j++) {
1da177e4
LT
723
724 /* Chip select */
27345bb6 725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
726 e100_write_flush(nic); udelay(4);
727
f26251eb 728 for (i = 31; i >= 0; i--) {
1da177e4
LT
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
730 eecs | eedi : eecs;
27345bb6 731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
732 e100_write_flush(nic); udelay(4);
733
27345bb6 734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
735 e100_write_flush(nic); udelay(4);
736 }
737 /* Wait 10 msec for cmd to complete */
738 msleep(10);
739
740 /* Chip deselect */
27345bb6 741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
742 e100_write_flush(nic); udelay(4);
743 }
744};
745
746/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 747static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
748{
749 u32 cmd_addr_data;
750 u16 data = 0;
751 u8 ctrl;
752 int i;
753
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755
756 /* Chip select */
27345bb6 757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
758 e100_write_flush(nic); udelay(4);
759
760 /* Bit-bang to read word from eeprom */
f26251eb 761 for (i = 31; i >= 0; i--) {
1da177e4 762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 764 e100_write_flush(nic); udelay(4);
05479938 765
27345bb6 766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 767 e100_write_flush(nic); udelay(4);
05479938 768
1da177e4
LT
769 /* Eeprom drives a dummy zero to EEDO after receiving
770 * complete address. Use this to adjust addr_len. */
27345bb6 771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 772 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
773 *addr_len -= (i - 16);
774 i = 17;
775 }
05479938 776
1da177e4
LT
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
778 }
779
780 /* Chip deselect */
27345bb6 781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
782 e100_write_flush(nic); udelay(4);
783
aaf918ba 784 return cpu_to_le16(data);
1da177e4
LT
785};
786
787/* Load entire EEPROM image into driver cache and validate checksum */
788static int e100_eeprom_load(struct nic *nic)
789{
790 u16 addr, addr_len = 8, checksum = 0;
791
792 /* Try reading with an 8-bit addr len to discover actual addr len */
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
795
f26251eb 796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 798 if (addr < nic->eeprom_wc - 1)
aaf918ba 799 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
800 }
801
802 /* The checksum, stored in the last word, is calculated such that
803 * the sum of words should be 0xBABA */
aaf918ba 804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
fa05e1ad 805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
8fb6f732
DM
806 if (!eeprom_bad_csum_allow)
807 return -EAGAIN;
1da177e4
LT
808 }
809
810 return 0;
811}
812
813/* Save (portion of) driver EEPROM cache to device and update checksum */
814static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815{
816 u16 addr, addr_len = 8, checksum = 0;
817
818 /* Try reading with an 8-bit addr len to discover actual addr len */
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
821
f26251eb 822 if (start + count >= nic->eeprom_wc)
1da177e4
LT
823 return -EINVAL;
824
f26251eb 825 for (addr = start; addr < start + count; addr++)
1da177e4
LT
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827
828 /* The checksum, stored in the last word, is calculated such that
829 * the sum of words should be 0xBABA */
f26251eb 830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
835
836 return 0;
837}
838
962082b6 839#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 840#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 841static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
842{
843 unsigned long flags;
844 unsigned int i;
845 int err = 0;
846
847 spin_lock_irqsave(&nic->cmd_lock, flags);
848
849 /* Previous command is accepted when SCB clears */
f26251eb
BA
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
852 break;
853 cpu_relax();
f26251eb 854 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
855 udelay(5);
856 }
f26251eb 857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
858 err = -EAGAIN;
859 goto err_unlock;
860 }
861
f26251eb 862 if (unlikely(cmd != cuc_resume))
27345bb6
JB
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
865
866err_unlock:
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
868
869 return err;
870}
871
858119e1 872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
873 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{
875 struct cb *cb;
876 unsigned long flags;
877 int err = 0;
878
879 spin_lock_irqsave(&nic->cb_lock, flags);
880
f26251eb 881 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
882 err = -ENOMEM;
883 goto err_unlock;
884 }
885
886 cb = nic->cb_to_use;
887 nic->cb_to_use = cb->next;
888 nic->cbs_avail--;
889 cb->skb = skb;
890
f26251eb 891 if (unlikely(!nic->cbs_avail))
1da177e4
LT
892 err = -ENOSPC;
893
894 cb_prepare(nic, cb, skb);
895
896 /* Order is important otherwise we'll be in a race with h/w:
897 * set S-bit in current first, then clear S-bit in previous. */
898 cb->command |= cpu_to_le16(cb_s);
899 wmb();
900 cb->prev->command &= cpu_to_le16(~cb_s);
901
f26251eb
BA
902 while (nic->cb_to_send != nic->cb_to_use) {
903 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
904 nic->cb_to_send->dma_addr))) {
905 /* Ok, here's where things get sticky. It's
906 * possible that we can't schedule the command
907 * because the controller is too busy, so
908 * let's just queue the command and try again
909 * when another command is scheduled. */
f26251eb 910 if (err == -ENOSPC) {
962082b6
MC
911 //request a reset
912 schedule_work(&nic->tx_timeout_task);
913 }
1da177e4
LT
914 break;
915 } else {
916 nic->cuc_cmd = cuc_resume;
917 nic->cb_to_send = nic->cb_to_send->next;
918 }
919 }
920
921err_unlock:
922 spin_unlock_irqrestore(&nic->cb_lock, flags);
923
924 return err;
925}
926
72001762
AM
927static int mdio_read(struct net_device *netdev, int addr, int reg)
928{
929 struct nic *nic = netdev_priv(netdev);
930 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
931}
932
933static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
934{
935 struct nic *nic = netdev_priv(netdev);
936
937 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
938}
939
940/* the standard mdio_ctrl() function for usual MII-compliant hardware */
941static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
1da177e4
LT
942{
943 u32 data_out = 0;
944 unsigned int i;
ac7c6669 945 unsigned long flags;
1da177e4 946
ac7c6669
OM
947
948 /*
949 * Stratus87247: we shouldn't be writing the MDI control
950 * register until the Ready bit shows True. Also, since
951 * manipulation of the MDI control registers is a multi-step
952 * procedure it should be done under lock.
953 */
954 spin_lock_irqsave(&nic->mdio_lock, flags);
955 for (i = 100; i; --i) {
27345bb6 956 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
957 break;
958 udelay(20);
959 }
960 if (unlikely(!i)) {
fa05e1ad 961 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
ac7c6669
OM
962 spin_unlock_irqrestore(&nic->mdio_lock, flags);
963 return 0; /* No way to indicate timeout error */
964 }
27345bb6 965 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 966
ac7c6669 967 for (i = 0; i < 100; i++) {
1da177e4 968 udelay(20);
27345bb6 969 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
970 break;
971 }
ac7c6669 972 spin_unlock_irqrestore(&nic->mdio_lock, flags);
fa05e1ad
JP
973 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
974 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
975 dir == mdi_read ? "READ" : "WRITE",
976 addr, reg, data, data_out);
1da177e4
LT
977 return (u16)data_out;
978}
979
72001762
AM
980/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
981static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
982 u32 addr,
983 u32 dir,
984 u32 reg,
985 u16 data)
986{
987 if ((reg == MII_BMCR) && (dir == mdi_write)) {
988 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
989 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
990 MII_ADVERTISE);
991
992 /*
993 * Workaround Si issue where sometimes the part will not
994 * autoneg to 100Mbps even when advertised.
995 */
996 if (advert & ADVERTISE_100FULL)
997 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
998 else if (advert & ADVERTISE_100HALF)
999 data |= BMCR_SPEED100;
1000 }
1001 }
1002 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1da177e4
LT
1003}
1004
72001762
AM
1005/* Fully software-emulated mdio_ctrl() function for cards without
1006 * MII-compliant PHYs.
1007 * For now, this is mainly geared towards 80c24 support; in case of further
1008 * requirements for other types (i82503, ...?) either extend this mechanism
1009 * or split it, whichever is cleaner.
1010 */
1011static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1012 u32 addr,
1013 u32 dir,
1014 u32 reg,
1015 u16 data)
1016{
1017 /* might need to allocate a netdev_priv'ed register array eventually
1018 * to be able to record state changes, but for now
1019 * some fully hardcoded register handling ought to be ok I guess. */
1020
1021 if (dir == mdi_read) {
1022 switch (reg) {
1023 case MII_BMCR:
1024 /* Auto-negotiation, right? */
1025 return BMCR_ANENABLE |
1026 BMCR_FULLDPLX;
1027 case MII_BMSR:
1028 return BMSR_LSTATUS /* for mii_link_ok() */ |
1029 BMSR_ANEGCAPABLE |
1030 BMSR_10FULL;
1031 case MII_ADVERTISE:
1032 /* 80c24 is a "combo card" PHY, right? */
1033 return ADVERTISE_10HALF |
1034 ADVERTISE_10FULL;
1035 default:
fa05e1ad
JP
1036 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1037 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1038 dir == mdi_read ? "READ" : "WRITE",
1039 addr, reg, data);
72001762
AM
1040 return 0xFFFF;
1041 }
1042 } else {
1043 switch (reg) {
1044 default:
fa05e1ad
JP
1045 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1046 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1047 dir == mdi_read ? "READ" : "WRITE",
1048 addr, reg, data);
72001762
AM
1049 return 0xFFFF;
1050 }
b55de80e 1051 }
72001762
AM
1052}
1053static inline int e100_phy_supports_mii(struct nic *nic)
1054{
1055 /* for now, just check it by comparing whether we
1056 are using MII software emulation.
1057 */
1058 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1da177e4
LT
1059}
1060
1061static void e100_get_defaults(struct nic *nic)
1062{
2afecc04
JB
1063 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1064 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 1065
1da177e4 1066 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 1067 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 1068 if (nic->mac == mac_unknown)
1da177e4
LT
1069 nic->mac = mac_82557_D100_A;
1070
1071 nic->params.rfds = rfds;
1072 nic->params.cbs = cbs;
1073
1074 /* Quadwords to DMA into FIFO before starting frame transmit */
1075 nic->tx_threshold = 0xE0;
1076
0a0863af 1077 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
1078 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1079 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
1080
1081 /* Template for a freshly allocated RFD */
7734f6e6 1082 nic->blank_rfd.command = 0;
1172899a 1083 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
719cdac5 1084 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1da177e4
LT
1085
1086 /* MII setup */
1087 nic->mii.phy_id_mask = 0x1F;
1088 nic->mii.reg_num_mask = 0x1F;
1089 nic->mii.dev = nic->netdev;
1090 nic->mii.mdio_read = mdio_read;
1091 nic->mii.mdio_write = mdio_write;
1092}
1093
1094static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1095{
1096 struct config *config = &cb->u.config;
1097 u8 *c = (u8 *)config;
719cdac5 1098 struct net_device *netdev = nic->netdev;
1da177e4
LT
1099
1100 cb->command = cpu_to_le16(cb_config);
1101
1102 memset(config, 0, sizeof(struct config));
1103
1104 config->byte_count = 0x16; /* bytes in this struct */
1105 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1106 config->direct_rx_dma = 0x1; /* reserved */
1107 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1108 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1109 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1110 config->tx_underrun_retry = 0x3; /* # of underrun retries */
72001762
AM
1111 if (e100_phy_supports_mii(nic))
1112 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1da177e4
LT
1113 config->pad10 = 0x6;
1114 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1115 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1116 config->ifs = 0x6; /* x16 = inter frame spacing */
1117 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1118 config->pad15_1 = 0x1;
1119 config->pad15_2 = 0x1;
1120 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1121 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1122 config->tx_padding = 0x1; /* 1=pad short frames */
1123 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1124 config->pad18 = 0x1;
1125 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1126 config->pad20_1 = 0x1F;
1127 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1128 config->pad21_1 = 0x5;
1129
1130 config->adaptive_ifs = nic->adaptive_ifs;
1131 config->loopback = nic->loopback;
1132
f26251eb 1133 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1134 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1135
f26251eb 1136 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1137 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1138 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1139 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1140 }
1141
719cdac5
BG
1142 if (unlikely(netdev->features & NETIF_F_RXFCS))
1143 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1144
f26251eb 1145 if (nic->flags & multicast_all)
1da177e4
LT
1146 config->multicast_all = 0x1; /* 1=accept, 0=no */
1147
6bdacb1a 1148 /* disable WoL when up */
f26251eb 1149 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1150 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1151
f26251eb 1152 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1153 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1154 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1155 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1156 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1157 if (nic->mac >= mac_82559_D101M) {
1da177e4 1158 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1159 /* Enable TCO in extended config */
1160 if (nic->mac >= mac_82551_10) {
1161 config->byte_count = 0x20; /* extended bytes */
1162 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1163 }
1164 } else {
1da177e4 1165 config->standard_stat_counter = 0x0;
44e4925e 1166 }
1da177e4
LT
1167 }
1168
0bf61e66
BG
1169 if (netdev->features & NETIF_F_RXALL) {
1170 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1171 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1172 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1173 }
1174
fa05e1ad
JP
1175 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1176 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1177 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1179 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1180 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1182 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1183 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1da177e4
LT
1184}
1185
2afecc04
JB
1186/*************************************************************************
1187* CPUSaver parameters
1188*
1189* All CPUSaver parameters are 16-bit literals that are part of a
1190* "move immediate value" instruction. By changing the value of
1191* the literal in the instruction before the code is loaded, the
1192* driver can change the algorithm.
1193*
0779bf2d 1194* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1195* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1196* timer is reset each time a new packet is received. (see
1197* BUNDLEMAX below to set the limit on number of chained packets)
1198* The current default is 0x600 or 1536. Experiments show that
1199* the value should probably stay within the 0x200 - 0x1000.
1200*
05479938 1201* BUNDLEMAX -
2afecc04
JB
1202* This sets the maximum number of frames that will be bundled. In
1203* some situations, such as the TCP windowing algorithm, it may be
1204* better to limit the growth of the bundle size than let it go as
1205* high as it can, because that could cause too much added latency.
1206* The default is six, because this is the number of packets in the
1207* default TCP window size. A value of 1 would make CPUSaver indicate
1208* an interrupt for every frame received. If you do not want to put
1209* a limit on the bundle size, set this value to xFFFF.
1210*
05479938 1211* BUNDLESMALL -
2afecc04
JB
1212* This contains a bit-mask describing the minimum size frame that
1213* will be bundled. The default masks the lower 7 bits, which means
1214* that any frame less than 128 bytes in length will not be bundled,
1215* but will instead immediately generate an interrupt. This does
1216* not affect the current bundle in any way. Any frame that is 128
1217* bytes or large will be bundled normally. This feature is meant
1218* to provide immediate indication of ACK frames in a TCP environment.
1219* Customers were seeing poor performance when a machine with CPUSaver
1220* enabled was sending but not receiving. The delay introduced when
1221* the ACKs were received was enough to reduce total throughput, because
1222* the sender would sit idle until the ACK was finally seen.
1223*
1224* The current default is 0xFF80, which masks out the lower 7 bits.
1225* This means that any frame which is x7F (127) bytes or smaller
05479938 1226* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1227* bit mask, there are only a few valid values that can be used. To
1228* turn this feature off, the driver can write the value xFFFF to the
1229* lower word of this instruction (in the same way that the other
1230* parameters are used). Likewise, a value of 0xF800 (2047) would
1231* cause an interrupt to be generated for every frame, because all
1232* standard Ethernet frames are <= 2047 bytes in length.
1233*************************************************************************/
1234
05479938 1235/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1236 * workarounds it provides, set the following defines to:
1237 * BUNDLESMALL 0
1238 * BUNDLEMAX 1
1239 * INTDELAY 1
1240 */
1241#define BUNDLESMALL 1
1242#define BUNDLEMAX (u16)6
1243#define INTDELAY (u16)1536 /* 0x600 */
1244
9ac32e1b
JSR
1245/* Initialize firmware */
1246static const struct firmware *e100_request_firmware(struct nic *nic)
1247{
1248 const char *fw_name;
7e15b0c9 1249 const struct firmware *fw = nic->fw;
9ac32e1b 1250 u8 timer, bundle, min_size;
7e15b0c9 1251 int err = 0;
8b0d2f9e 1252 bool required = false;
9ac32e1b 1253
2afecc04
JB
1254 /* do not load u-code for ICH devices */
1255 if (nic->flags & ich)
9ac32e1b 1256 return NULL;
2afecc04 1257
8b0d2f9e
BM
1258 /* Search for ucode match against h/w revision
1259 *
1260 * Based on comments in the source code for the FreeBSD fxp
1261 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1262 *
1263 * "fixes for bugs in the B-step hardware (specifically, bugs
1264 * with Inline Receive)."
1265 *
1266 * So we must fail if it cannot be loaded.
1267 *
1268 * The other microcode files are only required for the optional
1269 * CPUSaver feature. Nice to have, but no reason to fail.
1270 */
1271 if (nic->mac == mac_82559_D101M) {
9ac32e1b 1272 fw_name = FIRMWARE_D101M;
8b0d2f9e 1273 } else if (nic->mac == mac_82559_D101S) {
9ac32e1b 1274 fw_name = FIRMWARE_D101S;
8b0d2f9e 1275 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
9ac32e1b 1276 fw_name = FIRMWARE_D102E;
8b0d2f9e
BM
1277 required = true;
1278 } else { /* No ucode on other devices */
9ac32e1b 1279 return NULL;
8b0d2f9e 1280 }
9ac32e1b 1281
7e15b0c9
DG
1282 /* If the firmware has not previously been loaded, request a pointer
1283 * to it. If it was previously loaded, we are reinitializing the
1284 * adapter, possibly in a resume from hibernate, in which case
1285 * request_firmware() cannot be used.
1286 */
1287 if (!fw)
1288 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1289
9ac32e1b 1290 if (err) {
8b0d2f9e
BM
1291 if (required) {
1292 netif_err(nic, probe, nic->netdev,
1293 "Failed to load firmware \"%s\": %d\n",
1294 fw_name, err);
1295 return ERR_PTR(err);
1296 } else {
1297 netif_info(nic, probe, nic->netdev,
1298 "CPUSaver disabled. Needs \"%s\": %d\n",
1299 fw_name, err);
1300 return NULL;
1301 }
9ac32e1b 1302 }
7e15b0c9 1303
9ac32e1b
JSR
1304 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1305 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1306 if (fw->size != UCODE_SIZE * 4 + 3) {
fa05e1ad
JP
1307 netif_err(nic, probe, nic->netdev,
1308 "Firmware \"%s\" has wrong size %zu\n",
1309 fw_name, fw->size);
9ac32e1b
JSR
1310 release_firmware(fw);
1311 return ERR_PTR(-EINVAL);
2afecc04
JB
1312 }
1313
9ac32e1b
JSR
1314 /* Read timer, bundle and min_size from end of firmware blob */
1315 timer = fw->data[UCODE_SIZE * 4];
1316 bundle = fw->data[UCODE_SIZE * 4 + 1];
1317 min_size = fw->data[UCODE_SIZE * 4 + 2];
1318
1319 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1320 min_size >= UCODE_SIZE) {
fa05e1ad
JP
1321 netif_err(nic, probe, nic->netdev,
1322 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1323 fw_name, timer, bundle, min_size);
9ac32e1b
JSR
1324 release_firmware(fw);
1325 return ERR_PTR(-EINVAL);
1326 }
7e15b0c9
DG
1327
1328 /* OK, firmware is validated and ready to use. Save a pointer
1329 * to it in the nic */
1330 nic->fw = fw;
9ac32e1b 1331 return fw;
24180333
JB
1332}
1333
9ac32e1b
JSR
1334static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1335 struct sk_buff *skb)
24180333 1336{
9ac32e1b
JSR
1337 const struct firmware *fw = (void *)skb;
1338 u8 timer, bundle, min_size;
1339
1340 /* It's not a real skb; we just abused the fact that e100_exec_cb
1341 will pass it through to here... */
1342 cb->skb = NULL;
1343
1344 /* firmware is stored as little endian already */
1345 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1346
1347 /* Read timer, bundle and min_size from end of firmware blob */
1348 timer = fw->data[UCODE_SIZE * 4];
1349 bundle = fw->data[UCODE_SIZE * 4 + 1];
1350 min_size = fw->data[UCODE_SIZE * 4 + 2];
1351
1352 /* Insert user-tunable settings in cb->u.ucode */
1353 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1354 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1355 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1356 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1357 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1358 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1359
1360 cb->command = cpu_to_le16(cb_ucode | cb_el);
1361}
1362
1363static inline int e100_load_ucode_wait(struct nic *nic)
1364{
1365 const struct firmware *fw;
24180333
JB
1366 int err = 0, counter = 50;
1367 struct cb *cb = nic->cb_to_clean;
1368
9ac32e1b
JSR
1369 fw = e100_request_firmware(nic);
1370 /* If it's NULL, then no ucode is required */
1371 if (!fw || IS_ERR(fw))
1372 return PTR_ERR(fw);
1373
1374 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
fa05e1ad
JP
1375 netif_err(nic, probe, nic->netdev,
1376 "ucode cmd failed with error %d\n", err);
05479938 1377
24180333
JB
1378 /* must restart cuc */
1379 nic->cuc_cmd = cuc_start;
1380
1381 /* wait for completion */
1382 e100_write_flush(nic);
1383 udelay(10);
1384
1385 /* wait for possibly (ouch) 500ms */
1386 while (!(cb->status & cpu_to_le16(cb_complete))) {
1387 msleep(10);
1388 if (!--counter) break;
1389 }
05479938 1390
3a4fa0a2 1391 /* ack any interrupts, something could have been set */
27345bb6 1392 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1393
1394 /* if the command failed, or is not OK, notify and return */
1395 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
fa05e1ad 1396 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
24180333
JB
1397 err = -EPERM;
1398 }
05479938 1399
24180333 1400 return err;
1da177e4
LT
1401}
1402
1403static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1404 struct sk_buff *skb)
1405{
1406 cb->command = cpu_to_le16(cb_iaaddr);
1407 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1408}
1409
1410static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1411{
1412 cb->command = cpu_to_le16(cb_dump);
1413 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1414 offsetof(struct mem, dump_buf));
1415}
1416
72001762
AM
1417static int e100_phy_check_without_mii(struct nic *nic)
1418{
1419 u8 phy_type;
1420 int without_mii;
1421
1422 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1423
1424 switch (phy_type) {
1425 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1426 case I82503: /* Non-MII PHY; UNTESTED! */
1427 case S80C24: /* Non-MII PHY; tested and working */
1428 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1429 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1430 * doesn't have a programming interface of any sort. The
1431 * media is sensed automatically based on how the link partner
1432 * is configured. This is, in essence, manual configuration.
1433 */
fa05e1ad
JP
1434 netif_info(nic, probe, nic->netdev,
1435 "found MII-less i82503 or 80c24 or other PHY\n");
72001762
AM
1436
1437 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1438 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1439
1440 /* these might be needed for certain MII-less cards...
1441 * nic->flags |= ich;
1442 * nic->flags |= ich_10h_workaround; */
1443
1444 without_mii = 1;
1445 break;
1446 default:
1447 without_mii = 0;
1448 break;
1449 }
1450 return without_mii;
1451}
1452
1da177e4
LT
1453#define NCONFIG_AUTO_SWITCH 0x0080
1454#define MII_NSC_CONG MII_RESV1
1455#define NSC_CONG_ENABLE 0x0100
1456#define NSC_CONG_TXREADY 0x0400
1457#define ADVERTISE_FC_SUPPORTED 0x0400
1458static int e100_phy_init(struct nic *nic)
1459{
1460 struct net_device *netdev = nic->netdev;
1461 u32 addr;
1462 u16 bmcr, stat, id_lo, id_hi, cong;
1463
1464 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1465 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1466 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1467 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1468 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1469 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1470 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1471 break;
1472 }
72001762
AM
1473 if (addr == 32) {
1474 /* uhoh, no PHY detected: check whether we seem to be some
1475 * weird, rare variant which is *known* to not have any MII.
1476 * But do this AFTER MII checking only, since this does
1477 * lookup of EEPROM values which may easily be unreliable. */
1478 if (e100_phy_check_without_mii(nic))
1479 return 0; /* simply return and hope for the best */
1480 else {
1481 /* for unknown cases log a fatal error */
fa05e1ad
JP
1482 netif_err(nic, hw, nic->netdev,
1483 "Failed to locate any known PHY, aborting\n");
72001762
AM
1484 return -EAGAIN;
1485 }
1486 } else
fa05e1ad
JP
1487 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1488 "phy_addr = %d\n", nic->mii.phy_id);
1da177e4 1489
1da177e4
LT
1490 /* Get phy ID */
1491 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1492 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1493 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
fa05e1ad
JP
1494 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1495 "phy ID = 0x%08X\n", nic->phy);
1da177e4 1496
8fbd962e
BA
1497 /* Select the phy and isolate the rest */
1498 for (addr = 0; addr < 32; addr++) {
1499 if (addr != nic->mii.phy_id) {
1500 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1501 } else if (nic->phy != phy_82552_v) {
1502 bmcr = mdio_read(netdev, addr, MII_BMCR);
1503 mdio_write(netdev, addr, MII_BMCR,
1504 bmcr & ~BMCR_ISOLATE);
1505 }
1506 }
1507 /*
1508 * Workaround for 82552:
1509 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1510 * other phy_id's) using bmcr value from addr discovery loop above.
1511 */
1512 if (nic->phy == phy_82552_v)
1513 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1514 bmcr & ~BMCR_ISOLATE);
1515
1da177e4
LT
1516 /* Handle National tx phys */
1517#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1518 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1519 /* Disable congestion control */
1520 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1521 cong |= NSC_CONG_TXREADY;
1522 cong &= ~NSC_CONG_ENABLE;
1523 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1524 }
1525
b55de80e
BA
1526 if (nic->phy == phy_82552_v) {
1527 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1528
72001762
AM
1529 /* assign special tweaked mdio_ctrl() function */
1530 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1531
b55de80e
BA
1532 /* Workaround Si not advertising flow-control during autoneg */
1533 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1534 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1535
1536 /* Reset for the above changes to take effect */
1537 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1538 bmcr |= BMCR_RESET;
1539 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1540 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1541 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1542 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1543 /* enable/disable MDI/MDI-X auto-switching. */
1544 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1545 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1546 }
1da177e4
LT
1547
1548 return 0;
1549}
1550
1551static int e100_hw_init(struct nic *nic)
1552{
dca97ad2 1553 int err = 0;
1da177e4
LT
1554
1555 e100_hw_reset(nic);
1556
fa05e1ad 1557 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
f26251eb 1558 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1559 return err;
1560
f26251eb 1561 if ((err = e100_phy_init(nic)))
1da177e4 1562 return err;
f26251eb 1563 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1564 return err;
f26251eb 1565 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1566 return err;
9ac32e1b 1567 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1568 return err;
f26251eb 1569 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1570 return err;
f26251eb 1571 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1572 return err;
f26251eb 1573 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1574 nic->dma_addr + offsetof(struct mem, stats))))
1575 return err;
f26251eb 1576 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1577 return err;
1578
1579 e100_disable_irq(nic);
1580
1581 return 0;
1582}
1583
1584static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1585{
1586 struct net_device *netdev = nic->netdev;
22bedad3 1587 struct netdev_hw_addr *ha;
4cd24eaf 1588 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1da177e4
LT
1589
1590 cb->command = cpu_to_le16(cb_multi);
1591 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
48e2f183 1592 i = 0;
22bedad3 1593 netdev_for_each_mc_addr(ha, netdev) {
48e2f183
JP
1594 if (i == count)
1595 break;
22bedad3 1596 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1da177e4 1597 ETH_ALEN);
48e2f183 1598 }
1da177e4
LT
1599}
1600
1601static void e100_set_multicast_list(struct net_device *netdev)
1602{
1603 struct nic *nic = netdev_priv(netdev);
1604
fa05e1ad
JP
1605 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1606 "mc_count=%d, flags=0x%04X\n",
1607 netdev_mc_count(netdev), netdev->flags);
1da177e4 1608
f26251eb 1609 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1610 nic->flags |= promiscuous;
1611 else
1612 nic->flags &= ~promiscuous;
1613
f26251eb 1614 if (netdev->flags & IFF_ALLMULTI ||
4cd24eaf 1615 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1da177e4
LT
1616 nic->flags |= multicast_all;
1617 else
1618 nic->flags &= ~multicast_all;
1619
1620 e100_exec_cb(nic, NULL, e100_configure);
1621 e100_exec_cb(nic, NULL, e100_multi);
1622}
1623
1624static void e100_update_stats(struct nic *nic)
1625{
09f75cd7
JG
1626 struct net_device *dev = nic->netdev;
1627 struct net_device_stats *ns = &dev->stats;
1da177e4 1628 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1629 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1630 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1631 &s->complete;
1632
1633 /* Device's stats reporting may take several microseconds to
0a0863af 1634 * complete, so we're always waiting for results of the
1da177e4
LT
1635 * previous command. */
1636
f26251eb 1637 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1638 *complete = 0;
1639 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1640 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1641 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1642 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1643 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1644 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1645 ns->collisions += nic->tx_collisions;
1646 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1647 le32_to_cpu(s->tx_lost_crs);
d24d65ed
BG
1648 nic->rx_short_frame_errors +=
1649 le32_to_cpu(s->rx_short_frame_errors);
1650 ns->rx_length_errors = nic->rx_short_frame_errors +
1da177e4
LT
1651 nic->rx_over_length_errors;
1652 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1653 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1654 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1655 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1656 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1657 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1658 le32_to_cpu(s->rx_alignment_errors) +
1659 le32_to_cpu(s->rx_short_frame_errors) +
1660 le32_to_cpu(s->rx_cdt_errors);
1661 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1662 nic->tx_single_collisions +=
1663 le32_to_cpu(s->tx_single_collisions);
1664 nic->tx_multiple_collisions +=
1665 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1666 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1667 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1668 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1669 nic->rx_fc_unsupported +=
1670 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1671 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1672 nic->tx_tco_frames +=
1673 le16_to_cpu(s->xmt_tco_frames);
1674 nic->rx_tco_frames +=
1675 le16_to_cpu(s->rcv_tco_frames);
1676 }
1677 }
1678 }
1679
05479938 1680
f26251eb 1681 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
fa05e1ad
JP
1682 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1683 "exec cuc_dump_reset failed\n");
1da177e4
LT
1684}
1685
1686static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1687{
1688 /* Adjust inter-frame-spacing (IFS) between two transmits if
1689 * we're getting collisions on a half-duplex connection. */
1690
f26251eb 1691 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1692 u32 prev = nic->adaptive_ifs;
1693 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1694
f26251eb 1695 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1696 (nic->tx_frames > min_frames)) {
f26251eb 1697 if (nic->adaptive_ifs < 60)
1da177e4
LT
1698 nic->adaptive_ifs += 5;
1699 } else if (nic->tx_frames < min_frames) {
f26251eb 1700 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1701 nic->adaptive_ifs -= 5;
1702 }
f26251eb 1703 if (nic->adaptive_ifs != prev)
1da177e4
LT
1704 e100_exec_cb(nic, NULL, e100_configure);
1705 }
1706}
1707
1708static void e100_watchdog(unsigned long data)
1709{
1710 struct nic *nic = (struct nic *)data;
8ae6daca 1711 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
25db0338 1712 u32 speed;
1da177e4 1713
fa05e1ad
JP
1714 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1715 "right now = %ld\n", jiffies);
1da177e4
LT
1716
1717 /* mii library handles link maintenance tasks */
1718
1719 mii_ethtool_gset(&nic->mii, &cmd);
25db0338 1720 speed = ethtool_cmd_speed(&cmd);
1da177e4 1721
f26251eb 1722 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
fa05e1ad 1723 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
25db0338 1724 speed == SPEED_100 ? 100 : 10,
fa05e1ad 1725 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1726 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
fa05e1ad 1727 netdev_info(nic->netdev, "NIC Link is Down\n");
1da177e4
LT
1728 }
1729
1730 mii_check_link(&nic->mii);
1731
1732 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1733 * allocation failure.
1734 * Unfortunately have to use a spinlock to not re-enable interrupts
1735 * accidentally, due to hardware that shares a register between the
1736 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1737 spin_lock_irq(&nic->cmd_lock);
27345bb6 1738 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1739 e100_write_flush(nic);
ad8c48ad 1740 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1741
1742 e100_update_stats(nic);
25db0338 1743 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1da177e4 1744
f26251eb 1745 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1746 /* Issue a multicast command to workaround a 557 lock up */
1747 e100_set_multicast_list(nic->netdev);
1748
25db0338 1749 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1da177e4
LT
1750 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1751 nic->flags |= ich_10h_workaround;
1752 else
1753 nic->flags &= ~ich_10h_workaround;
1754
34c6417b
SH
1755 mod_timer(&nic->watchdog,
1756 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1757}
1758
858119e1 1759static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1760 struct sk_buff *skb)
1761{
1762 cb->command = nic->tx_command;
75f58a53
BG
1763
1764 /*
1765 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1766 * testing, ie sending frames with bad CRC.
1767 */
1768 if (unlikely(skb->no_fcs))
1769 cb->command |= __constant_cpu_to_le16(cb_tx_nc);
1770 else
1771 cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
1772
962082b6 1773 /* interrupt every 16 packets regardless of delay */
f26251eb 1774 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1775 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1776 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1777 cb->u.tcb.tcb_byte_count = 0;
1778 cb->u.tcb.threshold = nic->tx_threshold;
1779 cb->u.tcb.tbd_count = 1;
1780 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1781 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1782 /* check for mapping failure? */
1da177e4 1783 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
48425b14 1784 skb_tx_timestamp(skb);
1da177e4
LT
1785}
1786
3b29a56d
SH
1787static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1788 struct net_device *netdev)
1da177e4
LT
1789{
1790 struct nic *nic = netdev_priv(netdev);
1791 int err;
1792
f26251eb 1793 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1794 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1795 Issue a NOP command followed by a 1us delay before
1796 issuing the Tx command. */
f26251eb 1797 if (e100_exec_cmd(nic, cuc_nop, 0))
fa05e1ad
JP
1798 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1799 "exec cuc_nop failed\n");
1da177e4
LT
1800 udelay(1);
1801 }
1802
1803 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1804
f26251eb 1805 switch (err) {
1da177e4
LT
1806 case -ENOSPC:
1807 /* We queued the skb, but now we're out of space. */
fa05e1ad
JP
1808 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1809 "No space for CB\n");
1da177e4
LT
1810 netif_stop_queue(netdev);
1811 break;
1812 case -ENOMEM:
1813 /* This is a hard error - log it. */
fa05e1ad
JP
1814 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1815 "Out of Tx resources, returning skb\n");
1da177e4 1816 netif_stop_queue(netdev);
5b548140 1817 return NETDEV_TX_BUSY;
1da177e4
LT
1818 }
1819
6ed10654 1820 return NETDEV_TX_OK;
1da177e4
LT
1821}
1822
858119e1 1823static int e100_tx_clean(struct nic *nic)
1da177e4 1824{
09f75cd7 1825 struct net_device *dev = nic->netdev;
1da177e4
LT
1826 struct cb *cb;
1827 int tx_cleaned = 0;
1828
1829 spin_lock(&nic->cb_lock);
1830
1da177e4 1831 /* Clean CBs marked complete */
f26251eb 1832 for (cb = nic->cb_to_clean;
1da177e4
LT
1833 cb->status & cpu_to_le16(cb_complete);
1834 cb = nic->cb_to_clean = cb->next) {
2d0bb1c1 1835 rmb(); /* read skb after status */
fa05e1ad
JP
1836 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1837 "cb[%d]->status = 0x%04X\n",
1838 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1839 cb->status);
dc45010e 1840
f26251eb 1841 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1842 dev->stats.tx_packets++;
1843 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1844
1845 pci_unmap_single(nic->pdev,
1846 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1847 le16_to_cpu(cb->u.tcb.tbd.size),
1848 PCI_DMA_TODEVICE);
1849 dev_kfree_skb_any(cb->skb);
1850 cb->skb = NULL;
1851 tx_cleaned = 1;
1852 }
1853 cb->status = 0;
1854 nic->cbs_avail++;
1855 }
1856
1857 spin_unlock(&nic->cb_lock);
1858
1859 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1860 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1861 netif_wake_queue(nic->netdev);
1862
1863 return tx_cleaned;
1864}
1865
1866static void e100_clean_cbs(struct nic *nic)
1867{
f26251eb
BA
1868 if (nic->cbs) {
1869 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1870 struct cb *cb = nic->cb_to_clean;
f26251eb 1871 if (cb->skb) {
1da177e4
LT
1872 pci_unmap_single(nic->pdev,
1873 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1874 le16_to_cpu(cb->u.tcb.tbd.size),
1875 PCI_DMA_TODEVICE);
1876 dev_kfree_skb(cb->skb);
1877 }
1878 nic->cb_to_clean = nic->cb_to_clean->next;
1879 nic->cbs_avail++;
1880 }
98468efd 1881 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1da177e4
LT
1882 nic->cbs = NULL;
1883 nic->cbs_avail = 0;
1884 }
1885 nic->cuc_cmd = cuc_start;
1886 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1887 nic->cbs;
1888}
1889
1890static int e100_alloc_cbs(struct nic *nic)
1891{
1892 struct cb *cb;
1893 unsigned int i, count = nic->params.cbs.count;
1894
1895 nic->cuc_cmd = cuc_start;
1896 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1897 nic->cbs_avail = 0;
1898
98468efd
RO
1899 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1900 &nic->cbs_dma_addr);
f26251eb 1901 if (!nic->cbs)
1da177e4 1902 return -ENOMEM;
70abc8cb 1903 memset(nic->cbs, 0, count * sizeof(struct cb));
1da177e4 1904
f26251eb 1905 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1906 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1907 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1908
1909 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1910 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1911 ((i+1) % count) * sizeof(struct cb));
1da177e4
LT
1912 }
1913
1914 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1915 nic->cbs_avail = count;
1916
1917 return 0;
1918}
1919
ca93ca42 1920static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1921{
f26251eb
BA
1922 if (!nic->rxs) return;
1923 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1924
1925 /* handle init time starts */
f26251eb 1926 if (!rx) rx = nic->rxs;
ca93ca42
JG
1927
1928 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1929 if (rx->skb) {
ca93ca42
JG
1930 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1931 nic->ru_running = RU_RUNNING;
1932 }
1da177e4
LT
1933}
1934
719cdac5 1935#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
858119e1 1936static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1937{
89d71a66 1938 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1da177e4
LT
1939 return -ENOMEM;
1940
89d71a66 1941 /* Init, and map the RFD. */
27d7ff46 1942 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1943 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1944 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1945
8d8bb39b 1946 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1947 dev_kfree_skb_any(rx->skb);
097688ef 1948 rx->skb = NULL;
1f53367d
MC
1949 rx->dma_addr = 0;
1950 return -ENOMEM;
1951 }
1952
1da177e4 1953 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1954 * this one. We are safe to touch the previous RFD because
1955 * it is protected by the before last buffer's el bit being set */
aaf918ba 1956 if (rx->prev->skb) {
1da177e4 1957 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1958 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1959 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1960 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1961 }
1962
1963 return 0;
1964}
1965
858119e1 1966static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1967 unsigned int *work_done, unsigned int work_to_do)
1968{
09f75cd7 1969 struct net_device *dev = nic->netdev;
1da177e4
LT
1970 struct sk_buff *skb = rx->skb;
1971 struct rfd *rfd = (struct rfd *)skb->data;
1972 u16 rfd_status, actual_size;
719cdac5 1973 u16 fcs_pad = 0;
1da177e4 1974
f26251eb 1975 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1976 return -EAGAIN;
1977
1978 /* Need to sync before taking a peek at cb_complete bit */
1979 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1980 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1981 rfd_status = le16_to_cpu(rfd->status);
1982
fa05e1ad
JP
1983 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1984 "status=0x%04X\n", rfd_status);
2d0bb1c1 1985 rmb(); /* read size after status bit */
1da177e4
LT
1986
1987 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1988 if (unlikely(!(rfd_status & cb_complete))) {
1989 /* If the next buffer has the el bit, but we think the receiver
1990 * is still running, check to see if it really stopped while
1991 * we had interrupts off.
1992 * This allows for a fast restart without re-enabling
1993 * interrupts */
1994 if ((le16_to_cpu(rfd->command) & cb_el) &&
1995 (RU_RUNNING == nic->ru_running))
1996
17393dd6 1997 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1998 nic->ru_running = RU_SUSPENDED;
303d67c2
KH
1999 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2000 sizeof(struct rfd),
6ff9c2e7 2001 PCI_DMA_FROMDEVICE);
1f53367d 2002 return -ENODATA;
7734f6e6 2003 }
1da177e4
LT
2004
2005 /* Get actual data size */
719cdac5
BG
2006 if (unlikely(dev->features & NETIF_F_RXFCS))
2007 fcs_pad = 4;
1da177e4 2008 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 2009 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
2010 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2011
2012 /* Get data */
2013 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2014 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 2015
7734f6e6
DA
2016 /* If this buffer has the el bit, but we think the receiver
2017 * is still running, check to see if it really stopped while
2018 * we had interrupts off.
2019 * This allows for a fast restart without re-enabling interrupts.
2020 * This can happen when the RU sees the size change but also sees
2021 * the el bit set. */
2022 if ((le16_to_cpu(rfd->command) & cb_el) &&
2023 (RU_RUNNING == nic->ru_running)) {
2024
17393dd6 2025 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 2026 nic->ru_running = RU_SUSPENDED;
7734f6e6 2027 }
ca93ca42 2028
1da177e4
LT
2029 /* Pull off the RFD and put the actual data (minus eth hdr) */
2030 skb_reserve(skb, sizeof(struct rfd));
2031 skb_put(skb, actual_size);
2032 skb->protocol = eth_type_trans(skb, nic->netdev);
2033
0bf61e66
BG
2034 /* If we are receiving all frames, then don't bother
2035 * checking for errors.
2036 */
2037 if (unlikely(dev->features & NETIF_F_RXALL)) {
2038 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2039 /* Received oversized frame, but keep it. */
2040 nic->rx_over_length_errors++;
2041 goto process_skb;
2042 }
2043
f26251eb 2044 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 2045 /* Don't indicate if hardware indicates errors */
1da177e4 2046 dev_kfree_skb_any(skb);
719cdac5 2047 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
1da177e4
LT
2048 /* Don't indicate oversized frames */
2049 nic->rx_over_length_errors++;
1da177e4
LT
2050 dev_kfree_skb_any(skb);
2051 } else {
0bf61e66 2052process_skb:
09f75cd7 2053 dev->stats.rx_packets++;
719cdac5 2054 dev->stats.rx_bytes += (actual_size - fcs_pad);
1da177e4 2055 netif_receive_skb(skb);
f26251eb 2056 if (work_done)
1da177e4
LT
2057 (*work_done)++;
2058 }
2059
2060 rx->skb = NULL;
2061
2062 return 0;
2063}
2064
858119e1 2065static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
2066 unsigned int work_to_do)
2067{
2068 struct rx *rx;
7734f6e6
DA
2069 int restart_required = 0, err = 0;
2070 struct rx *old_before_last_rx, *new_before_last_rx;
2071 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
2072
2073 /* Indicate newly arrived packets */
f26251eb 2074 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
2075 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2076 /* Hit quota or no more to clean */
2077 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 2078 break;
1da177e4
LT
2079 }
2080
7734f6e6
DA
2081
2082 /* On EAGAIN, hit quota so have more work to do, restart once
2083 * cleanup is complete.
2084 * Else, are we already rnr? then pay attention!!! this ensures that
2085 * the state machine progression never allows a start with a
2086 * partially cleaned list, avoiding a race between hardware
2087 * and rx_to_clean when in NAPI mode */
2088 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2089 restart_required = 1;
2090
2091 old_before_last_rx = nic->rx_to_use->prev->prev;
2092 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 2093
1da177e4 2094 /* Alloc new skbs to refill list */
f26251eb
BA
2095 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2096 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
2097 break; /* Better luck next time (see watchdog) */
2098 }
ca93ca42 2099
7734f6e6
DA
2100 new_before_last_rx = nic->rx_to_use->prev->prev;
2101 if (new_before_last_rx != old_before_last_rx) {
2102 /* Set the el-bit on the buffer that is before the last buffer.
2103 * This lets us update the next pointer on the last buffer
2104 * without worrying about hardware touching it.
2105 * We set the size to 0 to prevent hardware from touching this
2106 * buffer.
2107 * When the hardware hits the before last buffer with el-bit
2108 * and size of 0, it will RNR interrupt, the RUS will go into
2109 * the No Resources state. It will not complete nor write to
2110 * this buffer. */
2111 new_before_last_rfd =
2112 (struct rfd *)new_before_last_rx->skb->data;
2113 new_before_last_rfd->size = 0;
2114 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2115 pci_dma_sync_single_for_device(nic->pdev,
2116 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2117 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2118
2119 /* Now that we have a new stopping point, we can clear the old
2120 * stopping point. We must sync twice to get the proper
2121 * ordering on the hardware side of things. */
2122 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2123 pci_dma_sync_single_for_device(nic->pdev,
2124 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2125 PCI_DMA_BIDIRECTIONAL);
719cdac5
BG
2126 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2127 + ETH_FCS_LEN);
7734f6e6
DA
2128 pci_dma_sync_single_for_device(nic->pdev,
2129 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2130 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2131 }
2132
f26251eb 2133 if (restart_required) {
ca93ca42 2134 // ack the rnr?
915e91d7 2135 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 2136 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 2137 if (work_done)
ca93ca42
JG
2138 (*work_done)++;
2139 }
1da177e4
LT
2140}
2141
2142static void e100_rx_clean_list(struct nic *nic)
2143{
2144 struct rx *rx;
2145 unsigned int i, count = nic->params.rfds.count;
2146
ca93ca42
JG
2147 nic->ru_running = RU_UNINITIALIZED;
2148
f26251eb
BA
2149 if (nic->rxs) {
2150 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2151 if (rx->skb) {
1da177e4 2152 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2153 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2154 dev_kfree_skb(rx->skb);
2155 }
2156 }
2157 kfree(nic->rxs);
2158 nic->rxs = NULL;
2159 }
2160
2161 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
2162}
2163
2164static int e100_rx_alloc_list(struct nic *nic)
2165{
2166 struct rx *rx;
2167 unsigned int i, count = nic->params.rfds.count;
7734f6e6 2168 struct rfd *before_last;
1da177e4
LT
2169
2170 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 2171 nic->ru_running = RU_UNINITIALIZED;
1da177e4 2172
f26251eb 2173 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2174 return -ENOMEM;
1da177e4 2175
f26251eb 2176 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
2177 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2178 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 2179 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
2180 e100_rx_clean_list(nic);
2181 return -ENOMEM;
2182 }
2183 }
7734f6e6
DA
2184 /* Set the el-bit on the buffer that is before the last buffer.
2185 * This lets us update the next pointer on the last buffer without
2186 * worrying about hardware touching it.
2187 * We set the size to 0 to prevent hardware from touching this buffer.
2188 * When the hardware hits the before last buffer with el-bit and size
2189 * of 0, it will RNR interrupt, the RU will go into the No Resources
2190 * state. It will not complete nor write to this buffer. */
2191 rx = nic->rxs->prev->prev;
2192 before_last = (struct rfd *)rx->skb->data;
2193 before_last->command |= cpu_to_le16(cb_el);
2194 before_last->size = 0;
2195 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 2196 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2197
2198 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2199 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2200
2201 return 0;
2202}
2203
7d12e780 2204static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2205{
2206 struct net_device *netdev = dev_id;
2207 struct nic *nic = netdev_priv(netdev);
27345bb6 2208 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4 2209
fa05e1ad
JP
2210 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2211 "stat_ack = 0x%02X\n", stat_ack);
1da177e4 2212
f26251eb 2213 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
2214 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2215 return IRQ_NONE;
2216
2217 /* Ack interrupt(s) */
27345bb6 2218 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2219
ca93ca42 2220 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 2221 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
2222 nic->ru_running = RU_SUSPENDED;
2223
288379f0 2224 if (likely(napi_schedule_prep(&nic->napi))) {
0685c31b 2225 e100_disable_irq(nic);
288379f0 2226 __napi_schedule(&nic->napi);
0685c31b 2227 }
1da177e4
LT
2228
2229 return IRQ_HANDLED;
2230}
2231
bea3348e 2232static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2233{
bea3348e 2234 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 2235 unsigned int work_done = 0;
1da177e4 2236
bea3348e 2237 e100_rx_clean(nic, &work_done, budget);
53e52c72 2238 e100_tx_clean(nic);
1da177e4 2239
53e52c72
DM
2240 /* If budget not fully consumed, exit the polling mode */
2241 if (work_done < budget) {
288379f0 2242 napi_complete(napi);
1da177e4 2243 e100_enable_irq(nic);
1da177e4
LT
2244 }
2245
bea3348e 2246 return work_done;
1da177e4
LT
2247}
2248
2249#ifdef CONFIG_NET_POLL_CONTROLLER
2250static void e100_netpoll(struct net_device *netdev)
2251{
2252 struct nic *nic = netdev_priv(netdev);
611494dc 2253
1da177e4 2254 e100_disable_irq(nic);
7d12e780 2255 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2256 e100_tx_clean(nic);
2257 e100_enable_irq(nic);
2258}
2259#endif
2260
1da177e4
LT
2261static int e100_set_mac_address(struct net_device *netdev, void *p)
2262{
2263 struct nic *nic = netdev_priv(netdev);
2264 struct sockaddr *addr = p;
2265
2266 if (!is_valid_ether_addr(addr->sa_data))
2267 return -EADDRNOTAVAIL;
2268
2269 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2270 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2271
2272 return 0;
2273}
2274
2275static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2276{
f26251eb 2277 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2278 return -EINVAL;
2279 netdev->mtu = new_mtu;
2280 return 0;
2281}
2282
2283static int e100_asf(struct nic *nic)
2284{
2285 /* ASF can be enabled from eeprom */
807540ba 2286 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
1da177e4
LT
2287 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2288 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
807540ba 2289 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
1da177e4
LT
2290}
2291
2292static int e100_up(struct nic *nic)
2293{
2294 int err;
2295
f26251eb 2296 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2297 return err;
f26251eb 2298 if ((err = e100_alloc_cbs(nic)))
1da177e4 2299 goto err_rx_clean_list;
f26251eb 2300 if ((err = e100_hw_init(nic)))
1da177e4
LT
2301 goto err_clean_cbs;
2302 e100_set_multicast_list(nic->netdev);
ca93ca42 2303 e100_start_receiver(nic, NULL);
1da177e4 2304 mod_timer(&nic->watchdog, jiffies);
f26251eb 2305 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2306 nic->netdev->name, nic->netdev)))
2307 goto err_no_irq;
1da177e4 2308 netif_wake_queue(nic->netdev);
bea3348e 2309 napi_enable(&nic->napi);
0236ebb7
MC
2310 /* enable ints _after_ enabling poll, preventing a race between
2311 * disable ints+schedule */
2312 e100_enable_irq(nic);
1da177e4
LT
2313 return 0;
2314
2315err_no_irq:
2316 del_timer_sync(&nic->watchdog);
2317err_clean_cbs:
2318 e100_clean_cbs(nic);
2319err_rx_clean_list:
2320 e100_rx_clean_list(nic);
2321 return err;
2322}
2323
2324static void e100_down(struct nic *nic)
2325{
0236ebb7 2326 /* wait here for poll to complete */
bea3348e 2327 napi_disable(&nic->napi);
0236ebb7 2328 netif_stop_queue(nic->netdev);
1da177e4
LT
2329 e100_hw_reset(nic);
2330 free_irq(nic->pdev->irq, nic->netdev);
2331 del_timer_sync(&nic->watchdog);
2332 netif_carrier_off(nic->netdev);
1da177e4
LT
2333 e100_clean_cbs(nic);
2334 e100_rx_clean_list(nic);
2335}
2336
2337static void e100_tx_timeout(struct net_device *netdev)
2338{
2339 struct nic *nic = netdev_priv(netdev);
2340
05479938 2341 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2342 * in interrupt context */
2343 schedule_work(&nic->tx_timeout_task);
2344}
2345
c4028958 2346static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2347{
c4028958
DH
2348 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2349 struct net_device *netdev = nic->netdev;
2acdb1e0 2350
fa05e1ad
JP
2351 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2352 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
401da6ae
AC
2353
2354 rtnl_lock();
2355 if (netif_running(netdev)) {
2356 e100_down(netdev_priv(netdev));
2357 e100_up(netdev_priv(netdev));
2358 }
2359 rtnl_unlock();
1da177e4
LT
2360}
2361
2362static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2363{
2364 int err;
2365 struct sk_buff *skb;
2366
2367 /* Use driver resources to perform internal MAC or PHY
2368 * loopback test. A single packet is prepared and transmitted
2369 * in loopback mode, and the test passes if the received
2370 * packet compares byte-for-byte to the transmitted packet. */
2371
f26251eb 2372 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2373 return err;
f26251eb 2374 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2375 goto err_clean_rx;
2376
2377 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2378 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2379 loopback_mode = lb_mac;
2380
2381 nic->loopback = loopback_mode;
f26251eb 2382 if ((err = e100_hw_init(nic)))
1da177e4
LT
2383 goto err_loopback_none;
2384
f26251eb 2385 if (loopback_mode == lb_phy)
1da177e4
LT
2386 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2387 BMCR_LOOPBACK);
2388
ca93ca42 2389 e100_start_receiver(nic, NULL);
1da177e4 2390
f26251eb 2391 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2392 err = -ENOMEM;
2393 goto err_loopback_none;
2394 }
2395 skb_put(skb, ETH_DATA_LEN);
2396 memset(skb->data, 0xFF, ETH_DATA_LEN);
2397 e100_xmit_frame(skb, nic->netdev);
2398
2399 msleep(10);
2400
aa49cdd9 2401 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2402 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2403
f26251eb 2404 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2405 skb->data, ETH_DATA_LEN))
2406 err = -EAGAIN;
2407
2408err_loopback_none:
2409 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2410 nic->loopback = lb_none;
1da177e4 2411 e100_clean_cbs(nic);
aa49cdd9 2412 e100_hw_reset(nic);
1da177e4
LT
2413err_clean_rx:
2414 e100_rx_clean_list(nic);
2415 return err;
2416}
2417
2418#define MII_LED_CONTROL 0x1B
b55de80e
BA
2419#define E100_82552_LED_OVERRIDE 0x19
2420#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2421#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
1da177e4
LT
2422
2423static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2424{
2425 struct nic *nic = netdev_priv(netdev);
2426 return mii_ethtool_gset(&nic->mii, cmd);
2427}
2428
2429static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2430{
2431 struct nic *nic = netdev_priv(netdev);
2432 int err;
2433
2434 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2435 err = mii_ethtool_sset(&nic->mii, cmd);
2436 e100_exec_cb(nic, NULL, e100_configure);
2437
2438 return err;
2439}
2440
2441static void e100_get_drvinfo(struct net_device *netdev,
2442 struct ethtool_drvinfo *info)
2443{
2444 struct nic *nic = netdev_priv(netdev);
23020ab3
RJ
2445 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2446 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
23020ab3
RJ
2447 strlcpy(info->bus_info, pci_name(nic->pdev),
2448 sizeof(info->bus_info));
1da177e4
LT
2449}
2450
abf9b902 2451#define E100_PHY_REGS 0x1C
1da177e4
LT
2452static int e100_get_regs_len(struct net_device *netdev)
2453{
2454 struct nic *nic = netdev_priv(netdev);
abf9b902 2455 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2456}
2457
2458static void e100_get_regs(struct net_device *netdev,
2459 struct ethtool_regs *regs, void *p)
2460{
2461 struct nic *nic = netdev_priv(netdev);
2462 u32 *buff = p;
2463 int i;
2464
44c10138 2465 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2466 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2467 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2468 ioread16(&nic->csr->scb.status);
f26251eb 2469 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2470 buff[1 + E100_PHY_REGS - i] =
2471 mdio_read(netdev, nic->mii.phy_id, i);
2472 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2473 e100_exec_cb(nic, NULL, e100_dump);
2474 msleep(10);
2475 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2476 sizeof(nic->mem->dump_buf));
2477}
2478
2479static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2480{
2481 struct nic *nic = netdev_priv(netdev);
2482 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2483 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2484}
2485
2486static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2487{
2488 struct nic *nic = netdev_priv(netdev);
2489
bc79fc84
RW
2490 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2491 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2492 return -EOPNOTSUPP;
2493
f26251eb 2494 if (wol->wolopts)
1da177e4
LT
2495 nic->flags |= wol_magic;
2496 else
2497 nic->flags &= ~wol_magic;
2498
bc79fc84
RW
2499 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2500
1da177e4
LT
2501 e100_exec_cb(nic, NULL, e100_configure);
2502
2503 return 0;
2504}
2505
2506static u32 e100_get_msglevel(struct net_device *netdev)
2507{
2508 struct nic *nic = netdev_priv(netdev);
2509 return nic->msg_enable;
2510}
2511
2512static void e100_set_msglevel(struct net_device *netdev, u32 value)
2513{
2514 struct nic *nic = netdev_priv(netdev);
2515 nic->msg_enable = value;
2516}
2517
2518static int e100_nway_reset(struct net_device *netdev)
2519{
2520 struct nic *nic = netdev_priv(netdev);
2521 return mii_nway_restart(&nic->mii);
2522}
2523
2524static u32 e100_get_link(struct net_device *netdev)
2525{
2526 struct nic *nic = netdev_priv(netdev);
2527 return mii_link_ok(&nic->mii);
2528}
2529
2530static int e100_get_eeprom_len(struct net_device *netdev)
2531{
2532 struct nic *nic = netdev_priv(netdev);
2533 return nic->eeprom_wc << 1;
2534}
2535
2536#define E100_EEPROM_MAGIC 0x1234
2537static int e100_get_eeprom(struct net_device *netdev,
2538 struct ethtool_eeprom *eeprom, u8 *bytes)
2539{
2540 struct nic *nic = netdev_priv(netdev);
2541
2542 eeprom->magic = E100_EEPROM_MAGIC;
2543 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2544
2545 return 0;
2546}
2547
2548static int e100_set_eeprom(struct net_device *netdev,
2549 struct ethtool_eeprom *eeprom, u8 *bytes)
2550{
2551 struct nic *nic = netdev_priv(netdev);
2552
f26251eb 2553 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2554 return -EINVAL;
2555
2556 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2557
2558 return e100_eeprom_save(nic, eeprom->offset >> 1,
2559 (eeprom->len >> 1) + 1);
2560}
2561
2562static void e100_get_ringparam(struct net_device *netdev,
2563 struct ethtool_ringparam *ring)
2564{
2565 struct nic *nic = netdev_priv(netdev);
2566 struct param_range *rfds = &nic->params.rfds;
2567 struct param_range *cbs = &nic->params.cbs;
2568
2569 ring->rx_max_pending = rfds->max;
2570 ring->tx_max_pending = cbs->max;
1da177e4
LT
2571 ring->rx_pending = rfds->count;
2572 ring->tx_pending = cbs->count;
1da177e4
LT
2573}
2574
2575static int e100_set_ringparam(struct net_device *netdev,
2576 struct ethtool_ringparam *ring)
2577{
2578 struct nic *nic = netdev_priv(netdev);
2579 struct param_range *rfds = &nic->params.rfds;
2580 struct param_range *cbs = &nic->params.cbs;
2581
05479938 2582 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2583 return -EINVAL;
2584
f26251eb 2585 if (netif_running(netdev))
1da177e4
LT
2586 e100_down(nic);
2587 rfds->count = max(ring->rx_pending, rfds->min);
2588 rfds->count = min(rfds->count, rfds->max);
2589 cbs->count = max(ring->tx_pending, cbs->min);
2590 cbs->count = min(cbs->count, cbs->max);
fa05e1ad
JP
2591 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2592 rfds->count, cbs->count);
f26251eb 2593 if (netif_running(netdev))
1da177e4
LT
2594 e100_up(nic);
2595
2596 return 0;
2597}
2598
2599static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2600 "Link test (on/offline)",
2601 "Eeprom test (on/offline)",
2602 "Self test (offline)",
2603 "Mac loopback (offline)",
2604 "Phy loopback (offline)",
2605};
4c3616cd 2606#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2607
1da177e4
LT
2608static void e100_diag_test(struct net_device *netdev,
2609 struct ethtool_test *test, u64 *data)
2610{
2611 struct ethtool_cmd cmd;
2612 struct nic *nic = netdev_priv(netdev);
2613 int i, err;
2614
2615 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2616 data[0] = !mii_link_ok(&nic->mii);
2617 data[1] = e100_eeprom_load(nic);
f26251eb 2618 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2619
2620 /* save speed, duplex & autoneg settings */
2621 err = mii_ethtool_gset(&nic->mii, &cmd);
2622
f26251eb 2623 if (netif_running(netdev))
1da177e4
LT
2624 e100_down(nic);
2625 data[2] = e100_self_test(nic);
2626 data[3] = e100_loopback_test(nic, lb_mac);
2627 data[4] = e100_loopback_test(nic, lb_phy);
2628
2629 /* restore speed, duplex & autoneg settings */
2630 err = mii_ethtool_sset(&nic->mii, &cmd);
2631
f26251eb 2632 if (netif_running(netdev))
1da177e4
LT
2633 e100_up(nic);
2634 }
f26251eb 2635 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2636 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2637
2638 msleep_interruptible(4 * 1000);
1da177e4
LT
2639}
2640
a70b86ae
JK
2641static int e100_set_phys_id(struct net_device *netdev,
2642 enum ethtool_phys_id_state state)
1da177e4
LT
2643{
2644 struct nic *nic = netdev_priv(netdev);
a70b86ae
JK
2645 enum led_state {
2646 led_on = 0x01,
2647 led_off = 0x04,
2648 led_on_559 = 0x05,
2649 led_on_557 = 0x07,
2650 };
b55de80e 2651 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
a70b86ae
JK
2652 MII_LED_CONTROL;
2653 u16 leds = 0;
2654
2655 switch (state) {
2656 case ETHTOOL_ID_ACTIVE:
2657 return 2;
1da177e4 2658
a70b86ae
JK
2659 case ETHTOOL_ID_ON:
2660 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2661 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2662 break;
2663
2664 case ETHTOOL_ID_OFF:
2665 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2666 break;
2667
2668 case ETHTOOL_ID_INACTIVE:
2669 break;
2670 }
1da177e4 2671
a70b86ae 2672 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
1da177e4
LT
2673 return 0;
2674}
2675
2676static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2677 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2678 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2679 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2680 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2681 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2682 "tx_heartbeat_errors", "tx_window_errors",
2683 /* device-specific stats */
2684 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2685 "tx_flow_control_pause", "rx_flow_control_pause",
2686 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
6f66342c 2687 "rx_short_frame_errors", "rx_over_length_errors",
1da177e4
LT
2688};
2689#define E100_NET_STATS_LEN 21
4c3616cd 2690#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2691
b9f2c044 2692static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2693{
b9f2c044
JG
2694 switch (sset) {
2695 case ETH_SS_TEST:
2696 return E100_TEST_LEN;
2697 case ETH_SS_STATS:
2698 return E100_STATS_LEN;
2699 default:
2700 return -EOPNOTSUPP;
2701 }
1da177e4
LT
2702}
2703
2704static void e100_get_ethtool_stats(struct net_device *netdev,
2705 struct ethtool_stats *stats, u64 *data)
2706{
2707 struct nic *nic = netdev_priv(netdev);
2708 int i;
2709
f26251eb 2710 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2711 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2712
2713 data[i++] = nic->tx_deferred;
2714 data[i++] = nic->tx_single_collisions;
2715 data[i++] = nic->tx_multiple_collisions;
2716 data[i++] = nic->tx_fc_pause;
2717 data[i++] = nic->rx_fc_pause;
2718 data[i++] = nic->rx_fc_unsupported;
2719 data[i++] = nic->tx_tco_frames;
2720 data[i++] = nic->rx_tco_frames;
6f66342c
BG
2721 data[i++] = nic->rx_short_frame_errors;
2722 data[i++] = nic->rx_over_length_errors;
1da177e4
LT
2723}
2724
2725static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2726{
f26251eb 2727 switch (stringset) {
1da177e4
LT
2728 case ETH_SS_TEST:
2729 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2730 break;
2731 case ETH_SS_STATS:
2732 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2733 break;
2734 }
2735}
2736
7282d491 2737static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2738 .get_settings = e100_get_settings,
2739 .set_settings = e100_set_settings,
2740 .get_drvinfo = e100_get_drvinfo,
2741 .get_regs_len = e100_get_regs_len,
2742 .get_regs = e100_get_regs,
2743 .get_wol = e100_get_wol,
2744 .set_wol = e100_set_wol,
2745 .get_msglevel = e100_get_msglevel,
2746 .set_msglevel = e100_set_msglevel,
2747 .nway_reset = e100_nway_reset,
2748 .get_link = e100_get_link,
2749 .get_eeprom_len = e100_get_eeprom_len,
2750 .get_eeprom = e100_get_eeprom,
2751 .set_eeprom = e100_set_eeprom,
2752 .get_ringparam = e100_get_ringparam,
2753 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2754 .self_test = e100_diag_test,
2755 .get_strings = e100_get_strings,
a70b86ae 2756 .set_phys_id = e100_set_phys_id,
1da177e4 2757 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2758 .get_sset_count = e100_get_sset_count,
abe0c5d1 2759 .get_ts_info = ethtool_op_get_ts_info,
1da177e4
LT
2760};
2761
2762static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2763{
2764 struct nic *nic = netdev_priv(netdev);
2765
2766 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2767}
2768
2769static int e100_alloc(struct nic *nic)
2770{
2771 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2772 &nic->dma_addr);
2773 return nic->mem ? 0 : -ENOMEM;
2774}
2775
2776static void e100_free(struct nic *nic)
2777{
f26251eb 2778 if (nic->mem) {
1da177e4
LT
2779 pci_free_consistent(nic->pdev, sizeof(struct mem),
2780 nic->mem, nic->dma_addr);
2781 nic->mem = NULL;
2782 }
2783}
2784
2785static int e100_open(struct net_device *netdev)
2786{
2787 struct nic *nic = netdev_priv(netdev);
2788 int err = 0;
2789
2790 netif_carrier_off(netdev);
f26251eb 2791 if ((err = e100_up(nic)))
fa05e1ad 2792 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
1da177e4
LT
2793 return err;
2794}
2795
2796static int e100_close(struct net_device *netdev)
2797{
2798 e100_down(netdev_priv(netdev));
2799 return 0;
2800}
2801
719cdac5
BG
2802static int e100_set_features(struct net_device *netdev,
2803 netdev_features_t features)
2804{
2805 struct nic *nic = netdev_priv(netdev);
2806 netdev_features_t changed = features ^ netdev->features;
2807
0bf61e66 2808 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
719cdac5
BG
2809 return 0;
2810
2811 netdev->features = features;
2812 e100_exec_cb(nic, NULL, e100_configure);
2813 return 0;
2814}
2815
acc78426
SH
2816static const struct net_device_ops e100_netdev_ops = {
2817 .ndo_open = e100_open,
2818 .ndo_stop = e100_close,
00829823 2819 .ndo_start_xmit = e100_xmit_frame,
acc78426 2820 .ndo_validate_addr = eth_validate_addr,
afc4b13d 2821 .ndo_set_rx_mode = e100_set_multicast_list,
acc78426
SH
2822 .ndo_set_mac_address = e100_set_mac_address,
2823 .ndo_change_mtu = e100_change_mtu,
2824 .ndo_do_ioctl = e100_do_ioctl,
2825 .ndo_tx_timeout = e100_tx_timeout,
2826#ifdef CONFIG_NET_POLL_CONTROLLER
2827 .ndo_poll_controller = e100_netpoll,
2828#endif
719cdac5 2829 .ndo_set_features = e100_set_features,
acc78426
SH
2830};
2831
1da177e4
LT
2832static int __devinit e100_probe(struct pci_dev *pdev,
2833 const struct pci_device_id *ent)
2834{
2835 struct net_device *netdev;
2836 struct nic *nic;
2837 int err;
2838
41de8d4c 2839 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
1da177e4 2840 return -ENOMEM;
1da177e4 2841
719cdac5 2842 netdev->hw_features |= NETIF_F_RXFCS;
75f58a53 2843 netdev->priv_flags |= IFF_SUPP_NOFCS;
0bf61e66 2844 netdev->hw_features |= NETIF_F_RXALL;
719cdac5 2845
acc78426 2846 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2847 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2848 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2849 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2850
2851 nic = netdev_priv(netdev);
bea3348e 2852 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2853 nic->netdev = netdev;
2854 nic->pdev = pdev;
2855 nic->msg_enable = (1 << debug) - 1;
72001762 2856 nic->mdio_ctrl = mdio_ctrl_hw;
1da177e4
LT
2857 pci_set_drvdata(pdev, netdev);
2858
f26251eb 2859 if ((err = pci_enable_device(pdev))) {
fa05e1ad 2860 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
2861 goto err_out_free_dev;
2862 }
2863
f26251eb 2864 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
fa05e1ad 2865 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
1da177e4
LT
2866 err = -ENODEV;
2867 goto err_out_disable_pdev;
2868 }
2869
f26251eb 2870 if ((err = pci_request_regions(pdev, DRV_NAME))) {
fa05e1ad 2871 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
2872 goto err_out_disable_pdev;
2873 }
2874
284901a9 2875 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
fa05e1ad 2876 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
1da177e4
LT
2877 goto err_out_free_res;
2878 }
2879
1da177e4
LT
2880 SET_NETDEV_DEV(netdev, &pdev->dev);
2881
27345bb6 2882 if (use_io)
fa05e1ad 2883 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
27345bb6
JB
2884
2885 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2886 if (!nic->csr) {
fa05e1ad 2887 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
1da177e4
LT
2888 err = -ENOMEM;
2889 goto err_out_free_res;
2890 }
2891
f26251eb 2892 if (ent->driver_data)
1da177e4
LT
2893 nic->flags |= ich;
2894 else
2895 nic->flags &= ~ich;
2896
2897 e100_get_defaults(nic);
2898
243559f4
JB
2899 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2900 if (nic->mac < mac_82558_D101_A4)
2901 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2902
1f53367d 2903 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2904 spin_lock_init(&nic->cb_lock);
2905 spin_lock_init(&nic->cmd_lock);
ac7c6669 2906 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2907
2908 /* Reset the device before pci_set_master() in case device is in some
2909 * funky state and has an interrupt pending - hint: we don't have the
2910 * interrupt handler registered yet. */
2911 e100_hw_reset(nic);
2912
2913 pci_set_master(pdev);
2914
2915 init_timer(&nic->watchdog);
2916 nic->watchdog.function = e100_watchdog;
2917 nic->watchdog.data = (unsigned long)nic;
1da177e4 2918
c4028958 2919 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2920
f26251eb 2921 if ((err = e100_alloc(nic))) {
fa05e1ad 2922 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
1da177e4
LT
2923 goto err_out_iounmap;
2924 }
2925
f26251eb 2926 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2927 goto err_out_free;
2928
f92d8728
MC
2929 e100_phy_init(nic);
2930
1da177e4 2931 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2932 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2933 if (!is_valid_ether_addr(netdev->perm_addr)) {
2934 if (!eeprom_bad_csum_allow) {
fa05e1ad 2935 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
948cd43f
JB
2936 err = -EAGAIN;
2937 goto err_out_free;
2938 } else {
fa05e1ad 2939 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
948cd43f 2940 }
1da177e4
LT
2941 }
2942
2943 /* Wol magic packet can be enabled from eeprom */
f26251eb 2944 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2945 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2946 nic->flags |= wol_magic;
bc79fc84
RW
2947 device_set_wakeup_enable(&pdev->dev, true);
2948 }
1da177e4 2949
6bdacb1a 2950 /* ack any pending wake events, disable PME */
e7272403 2951 pci_pme_active(pdev, false);
1da177e4
LT
2952
2953 strcpy(netdev->name, "eth%d");
f26251eb 2954 if ((err = register_netdev(netdev))) {
fa05e1ad 2955 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
1da177e4
LT
2956 goto err_out_free;
2957 }
98468efd
RO
2958 nic->cbs_pool = pci_pool_create(netdev->name,
2959 nic->pdev,
211a0d94 2960 nic->params.cbs.max * sizeof(struct cb),
98468efd
RO
2961 sizeof(u32),
2962 0);
fa05e1ad
JP
2963 netif_info(nic, probe, nic->netdev,
2964 "addr 0x%llx, irq %d, MAC addr %pM\n",
2965 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2966 pdev->irq, netdev->dev_addr);
1da177e4
LT
2967
2968 return 0;
2969
2970err_out_free:
2971 e100_free(nic);
2972err_out_iounmap:
27345bb6 2973 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2974err_out_free_res:
2975 pci_release_regions(pdev);
2976err_out_disable_pdev:
2977 pci_disable_device(pdev);
2978err_out_free_dev:
2979 pci_set_drvdata(pdev, NULL);
2980 free_netdev(netdev);
2981 return err;
2982}
2983
2984static void __devexit e100_remove(struct pci_dev *pdev)
2985{
2986 struct net_device *netdev = pci_get_drvdata(pdev);
2987
f26251eb 2988 if (netdev) {
1da177e4
LT
2989 struct nic *nic = netdev_priv(netdev);
2990 unregister_netdev(netdev);
2991 e100_free(nic);
915e91d7 2992 pci_iounmap(pdev, nic->csr);
98468efd 2993 pci_pool_destroy(nic->cbs_pool);
1da177e4
LT
2994 free_netdev(netdev);
2995 pci_release_regions(pdev);
2996 pci_disable_device(pdev);
2997 pci_set_drvdata(pdev, NULL);
2998 }
2999}
3000
b55de80e
BA
3001#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
3002#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3003#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
ac7c992c 3004static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
3005{
3006 struct net_device *netdev = pci_get_drvdata(pdev);
3007 struct nic *nic = netdev_priv(netdev);
3008
824545e7 3009 if (netif_running(netdev))
f902283b 3010 e100_down(nic);
518d8338 3011 netif_device_detach(netdev);
a53a33da 3012
1da177e4 3013 pci_save_state(pdev);
e8e82b76
AK
3014
3015 if ((nic->flags & wol_magic) | e100_asf(nic)) {
b55de80e
BA
3016 /* enable reverse auto-negotiation */
3017 if (nic->phy == phy_82552_v) {
3018 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3019 E100_82552_SMARTSPEED);
3020
3021 mdio_write(netdev, nic->mii.phy_id,
3022 E100_82552_SMARTSPEED, smartspeed |
3023 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3024 }
ac7c992c 3025 *enable_wake = true;
e8e82b76 3026 } else {
ac7c992c 3027 *enable_wake = false;
e8e82b76 3028 }
975b366a 3029
8543da66 3030 pci_disable_device(pdev);
ac7c992c 3031}
1da177e4 3032
ac7c992c
TLSC
3033static int __e100_power_off(struct pci_dev *pdev, bool wake)
3034{
6905b1f1 3035 if (wake)
ac7c992c 3036 return pci_prepare_to_sleep(pdev);
6905b1f1
RW
3037
3038 pci_wake_from_d3(pdev, false);
3039 pci_set_power_state(pdev, PCI_D3hot);
3040
3041 return 0;
1da177e4
LT
3042}
3043
f902283b 3044#ifdef CONFIG_PM
ac7c992c
TLSC
3045static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3046{
3047 bool wake;
3048 __e100_shutdown(pdev, &wake);
3049 return __e100_power_off(pdev, wake);
3050}
3051
1da177e4
LT
3052static int e100_resume(struct pci_dev *pdev)
3053{
3054 struct net_device *netdev = pci_get_drvdata(pdev);
3055 struct nic *nic = netdev_priv(netdev);
3056
975b366a 3057 pci_set_power_state(pdev, PCI_D0);
1da177e4 3058 pci_restore_state(pdev);
6bdacb1a 3059 /* ack any pending wake events, disable PME */
975b366a 3060 pci_enable_wake(pdev, 0, 0);
1da177e4 3061
4b512d26 3062 /* disable reverse auto-negotiation */
b55de80e
BA
3063 if (nic->phy == phy_82552_v) {
3064 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3065 E100_82552_SMARTSPEED);
3066
3067 mdio_write(netdev, nic->mii.phy_id,
3068 E100_82552_SMARTSPEED,
3069 smartspeed & ~(E100_82552_REV_ANEG));
3070 }
3071
1da177e4 3072 netif_device_attach(netdev);
975b366a 3073 if (netif_running(netdev))
1da177e4
LT
3074 e100_up(nic);
3075
3076 return 0;
3077}
975b366a 3078#endif /* CONFIG_PM */
1da177e4 3079
d18c3db5 3080static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 3081{
ac7c992c
TLSC
3082 bool wake;
3083 __e100_shutdown(pdev, &wake);
3084 if (system_state == SYSTEM_POWER_OFF)
3085 __e100_power_off(pdev, wake);
6bdacb1a
MC
3086}
3087
2cc30492
AK
3088/* ------------------ PCI Error Recovery infrastructure -------------- */
3089/**
3090 * e100_io_error_detected - called when PCI error is detected.
3091 * @pdev: Pointer to PCI device
0a0863af 3092 * @state: The current pci connection state
2cc30492
AK
3093 */
3094static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3095{
3096 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 3097 struct nic *nic = netdev_priv(netdev);
2cc30492 3098
2cc30492 3099 netif_device_detach(netdev);
ef681ce1
AD
3100
3101 if (state == pci_channel_io_perm_failure)
3102 return PCI_ERS_RESULT_DISCONNECT;
3103
3104 if (netif_running(netdev))
3105 e100_down(nic);
b1d26f24 3106 pci_disable_device(pdev);
2cc30492
AK
3107
3108 /* Request a slot reset. */
3109 return PCI_ERS_RESULT_NEED_RESET;
3110}
3111
3112/**
3113 * e100_io_slot_reset - called after the pci bus has been reset.
3114 * @pdev: Pointer to PCI device
3115 *
3116 * Restart the card from scratch.
3117 */
3118static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3119{
3120 struct net_device *netdev = pci_get_drvdata(pdev);
3121 struct nic *nic = netdev_priv(netdev);
3122
3123 if (pci_enable_device(pdev)) {
fa05e1ad 3124 pr_err("Cannot re-enable PCI device after reset\n");
2cc30492
AK
3125 return PCI_ERS_RESULT_DISCONNECT;
3126 }
3127 pci_set_master(pdev);
3128
3129 /* Only one device per card can do a reset */
3130 if (0 != PCI_FUNC(pdev->devfn))
3131 return PCI_ERS_RESULT_RECOVERED;
3132 e100_hw_reset(nic);
3133 e100_phy_init(nic);
3134
3135 return PCI_ERS_RESULT_RECOVERED;
3136}
3137
3138/**
3139 * e100_io_resume - resume normal operations
3140 * @pdev: Pointer to PCI device
3141 *
3142 * Resume normal operations after an error recovery
3143 * sequence has been completed.
3144 */
3145static void e100_io_resume(struct pci_dev *pdev)
3146{
3147 struct net_device *netdev = pci_get_drvdata(pdev);
3148 struct nic *nic = netdev_priv(netdev);
3149
3150 /* ack any pending wake events, disable PME */
3151 pci_enable_wake(pdev, 0, 0);
3152
3153 netif_device_attach(netdev);
3154 if (netif_running(netdev)) {
3155 e100_open(netdev);
3156 mod_timer(&nic->watchdog, jiffies);
3157 }
3158}
3159
3160static struct pci_error_handlers e100_err_handler = {
3161 .error_detected = e100_io_error_detected,
3162 .slot_reset = e100_io_slot_reset,
3163 .resume = e100_io_resume,
3164};
6bdacb1a 3165
1da177e4
LT
3166static struct pci_driver e100_driver = {
3167 .name = DRV_NAME,
3168 .id_table = e100_id_table,
3169 .probe = e100_probe,
3170 .remove = __devexit_p(e100_remove),
e8e82b76 3171#ifdef CONFIG_PM
975b366a 3172 /* Power Management hooks */
1da177e4
LT
3173 .suspend = e100_suspend,
3174 .resume = e100_resume,
3175#endif
05479938 3176 .shutdown = e100_shutdown,
2cc30492 3177 .err_handler = &e100_err_handler,
1da177e4
LT
3178};
3179
3180static int __init e100_init_module(void)
3181{
f26251eb 3182 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
fa05e1ad
JP
3183 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3184 pr_info("%s\n", DRV_COPYRIGHT);
1da177e4 3185 }
29917620 3186 return pci_register_driver(&e100_driver);
1da177e4
LT
3187}
3188
3189static void __exit e100_cleanup_module(void)
3190{
3191 pci_unregister_driver(&e100_driver);
3192}
3193
3194module_init(e100_init_module);
3195module_exit(e100_cleanup_module);
This page took 1.199502 seconds and 5 git commands to generate.