Merge branch 'topic/docs-next' into v4l_for_linus
[deliverable/linux.git] / drivers / net / ethernet / intel / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
72001762
AM
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
1da177e4
LT
148 */
149
fa05e1ad
JP
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
a6b7a407
AD
152#include <linux/hardirq.h>
153#include <linux/interrupt.h>
1da177e4
LT
154#include <linux/module.h>
155#include <linux/moduleparam.h>
156#include <linux/kernel.h>
157#include <linux/types.h>
d43c36dc 158#include <linux/sched.h>
1da177e4
LT
159#include <linux/slab.h>
160#include <linux/delay.h>
161#include <linux/init.h>
162#include <linux/pci.h>
1e7f0bd8 163#include <linux/dma-mapping.h>
98468efd 164#include <linux/dmapool.h>
1da177e4
LT
165#include <linux/netdevice.h>
166#include <linux/etherdevice.h>
167#include <linux/mii.h>
168#include <linux/if_vlan.h>
169#include <linux/skbuff.h>
170#include <linux/ethtool.h>
171#include <linux/string.h>
9ac32e1b 172#include <linux/firmware.h>
401da6ae 173#include <linux/rtnetlink.h>
1da177e4
LT
174#include <asm/unaligned.h>
175
176
177#define DRV_NAME "e100"
4e1dc97d 178#define DRV_EXT "-NAPI"
b55de80e 179#define DRV_VERSION "3.5.24-k2"DRV_EXT
1da177e4 180#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 181#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
182
183#define E100_WATCHDOG_PERIOD (2 * HZ)
184#define E100_NAPI_WEIGHT 16
185
9ac32e1b
JSR
186#define FIRMWARE_D101M "e100/d101m_ucode.bin"
187#define FIRMWARE_D101S "e100/d101s_ucode.bin"
188#define FIRMWARE_D102E "e100/d102e_ucode.bin"
189
1da177e4
LT
190MODULE_DESCRIPTION(DRV_DESCRIPTION);
191MODULE_AUTHOR(DRV_COPYRIGHT);
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
194MODULE_FIRMWARE(FIRMWARE_D101M);
195MODULE_FIRMWARE(FIRMWARE_D101S);
196MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
197
198static int debug = 3;
8fb6f732 199static int eeprom_bad_csum_allow = 0;
27345bb6 200static int use_io = 0;
1da177e4 201module_param(debug, int, 0);
8fb6f732 202module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 203module_param(use_io, int, 0);
1da177e4 204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
9baa3c34 211static const struct pci_device_id e100_id_table[] = {
1da177e4
LT
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
b55de80e 247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
1da177e4
LT
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
b55de80e 283 phy_82552_v = 0xd061004d,
1da177e4
LT
284 phy_unknown = 0xFFFFFFFF,
285};
286
287/* CSR (Control/Status Registers) */
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
7734f6e6 305 rus_no_res = 0x08,
1da177e4
LT
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
ca93ca42
JG
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
1da177e4
LT
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
05479938 350
1da177e4
LT
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
72001762 379 eeprom_phy_iface = 0x06,
1da177e4
LT
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
72001762
AM
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
1da177e4
LT
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
75f58a53
BG
415/**
416 * cb_command - Command Block flags
dbedd44e 417 * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
75f58a53 418 */
1da177e4
LT
419enum cb_command {
420 cb_nop = 0x0000,
421 cb_iaaddr = 0x0001,
422 cb_config = 0x0002,
423 cb_multi = 0x0003,
424 cb_tx = 0x0004,
425 cb_ucode = 0x0005,
426 cb_dump = 0x0006,
427 cb_tx_sf = 0x0008,
75f58a53 428 cb_tx_nc = 0x0010,
1da177e4
LT
429 cb_cid = 0x1f00,
430 cb_i = 0x2000,
431 cb_s = 0x4000,
432 cb_el = 0x8000,
433};
434
435struct rfd {
aaf918ba
AV
436 __le16 status;
437 __le16 command;
438 __le32 link;
439 __le32 rbd;
440 __le16 actual_size;
441 __le16 size;
1da177e4
LT
442};
443
444struct rx {
445 struct rx *next, *prev;
446 struct sk_buff *skb;
447 dma_addr_t dma_addr;
448};
449
450#if defined(__BIG_ENDIAN_BITFIELD)
451#define X(a,b) b,a
452#else
453#define X(a,b) a,b
454#endif
455struct config {
456/*0*/ u8 X(byte_count:6, pad0:2);
457/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458/*2*/ u8 adaptive_ifs;
459/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
462/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
0bf61e66 465 rx_save_overruns : 1), rx_save_bad_frames : 1);
1da177e4
LT
466/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468 tx_dynamic_tbd:1);
469/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473 loopback:2);
474/*11*/ u8 X(linear_priority:3, pad11:5);
475/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476/*13*/ u8 ip_addr_lo;
477/*14*/ u8 ip_addr_hi;
478/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481/*16*/ u8 fc_delay_lo;
482/*17*/ u8 fc_delay_hi;
483/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
491 u8 pad_d102[9];
492};
493
494#define E100_MAX_MULTICAST_ADDRS 64
495struct multi {
aaf918ba 496 __le16 count;
1da177e4
LT
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
498};
499
500/* Important: keep total struct u32-aligned */
501#define UCODE_SIZE 134
502struct cb {
aaf918ba
AV
503 __le16 status;
504 __le16 command;
505 __le32 link;
1da177e4
LT
506 union {
507 u8 iaaddr[ETH_ALEN];
aaf918ba 508 __le32 ucode[UCODE_SIZE];
1da177e4
LT
509 struct config config;
510 struct multi multi;
511 struct {
512 u32 tbd_array;
513 u16 tcb_byte_count;
514 u8 threshold;
515 u8 tbd_count;
516 struct {
aaf918ba
AV
517 __le32 buf_addr;
518 __le16 size;
1da177e4
LT
519 u16 eol;
520 } tbd;
521 } tcb;
aaf918ba 522 __le32 dump_buffer_addr;
1da177e4
LT
523 } u;
524 struct cb *next, *prev;
525 dma_addr_t dma_addr;
526 struct sk_buff *skb;
527};
528
529enum loopback {
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
531};
532
533struct stats {
aaf918ba 534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
aaf918ba 537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
aaf918ba
AV
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
542 __le32 complete;
1da177e4
LT
543};
544
545struct mem {
546 struct {
547 u32 signature;
548 u32 result;
549 } selftest;
550 struct stats stats;
551 u8 dump_buf[596];
552};
553
554struct param_range {
555 u32 min;
556 u32 max;
557 u32 count;
558};
559
560struct params {
561 struct param_range rfds;
562 struct param_range cbs;
563};
564
565struct nic {
566 /* Begin: frequently used values: keep adjacent for cache effect */
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
72001762 570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
1da177e4
LT
571
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
ca93ca42 576 enum ru_state ru_running;
1da177e4
LT
577
578 spinlock_t cb_lock ____cacheline_aligned;
579 spinlock_t cmd_lock;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
bea3348e 583 struct napi_struct napi;
1da177e4
LT
584 struct cb *cbs;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
aaf918ba 588 __le16 tx_command;
1da177e4
LT
589 /* End: frequently used values: keep adjacent for cache effect */
590
591 enum {
592 ich = (1 << 0),
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
598
599 enum mac mac;
600 enum phy phy;
601 struct params params;
1da177e4 602 struct timer_list watchdog;
1da177e4 603 struct mii_if_info mii;
2acdb1e0 604 struct work_struct tx_timeout_task;
1da177e4
LT
605 enum loopback loopback;
606
607 struct mem *mem;
608 dma_addr_t dma_addr;
609
98468efd 610 struct pci_pool *cbs_pool;
1da177e4
LT
611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs;
613 u8 tx_threshold;
614 u32 tx_frames;
615 u32 tx_collisions;
616 u32 tx_deferred;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
619 u32 tx_fc_pause;
620 u32 tx_tco_frames;
621
622 u32 rx_fc_pause;
623 u32 rx_fc_unsupported;
624 u32 rx_tco_frames;
d24d65ed 625 u32 rx_short_frame_errors;
1da177e4
LT
626 u32 rx_over_length_errors;
627
1da177e4 628 u16 eeprom_wc;
aaf918ba 629 __le16 eeprom[256];
ac7c6669 630 spinlock_t mdio_lock;
7e15b0c9 631 const struct firmware *fw;
1da177e4
LT
632};
633
634static inline void e100_write_flush(struct nic *nic)
635{
636 /* Flush previous PCI writes through intermediate bridges
637 * by doing a benign read */
27345bb6 638 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
639}
640
858119e1 641static void e100_enable_irq(struct nic *nic)
1da177e4
LT
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 647 e100_write_flush(nic);
ad8c48ad 648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
649}
650
858119e1 651static void e100_disable_irq(struct nic *nic)
1da177e4
LT
652{
653 unsigned long flags;
654
655 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 657 e100_write_flush(nic);
ad8c48ad 658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
659}
660
661static void e100_hw_reset(struct nic *nic)
662{
663 /* Put CU and RU into idle with a selective reset to get
664 * device off of PCI bus */
27345bb6 665 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
666 e100_write_flush(nic); udelay(20);
667
668 /* Now fully reset device */
27345bb6 669 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
670 e100_write_flush(nic); udelay(20);
671
672 /* Mask off our interrupt line - it's unmasked after reset */
673 e100_disable_irq(nic);
674}
675
676static int e100_self_test(struct nic *nic)
677{
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679
680 /* Passing the self-test is a pretty good indication
681 * that the device can DMA to/from host memory */
682
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
685
27345bb6 686 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
687 e100_write_flush(nic);
688 /* Wait 10 msec for self-test to complete */
689 msleep(10);
690
691 /* Interrupts are enabled after self-test */
692 e100_disable_irq(nic);
693
694 /* Check results of self-test */
f26251eb 695 if (nic->mem->selftest.result != 0) {
fa05e1ad
JP
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
1da177e4
LT
699 return -ETIMEDOUT;
700 }
f26251eb 701 if (nic->mem->selftest.signature == 0) {
fa05e1ad 702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
1da177e4
LT
703 return -ETIMEDOUT;
704 }
705
706 return 0;
707}
708
aaf918ba 709static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
710{
711 u32 cmd_addr_data[3];
712 u8 ctrl;
713 int i, j;
714
715 /* Three cmds: write/erase enable, write data, write/erase disable */
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 718 le16_to_cpu(data);
1da177e4
LT
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
720
721 /* Bit-bang cmds to write word to eeprom */
f26251eb 722 for (j = 0; j < 3; j++) {
1da177e4
LT
723
724 /* Chip select */
27345bb6 725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
726 e100_write_flush(nic); udelay(4);
727
f26251eb 728 for (i = 31; i >= 0; i--) {
1da177e4
LT
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
730 eecs | eedi : eecs;
27345bb6 731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
732 e100_write_flush(nic); udelay(4);
733
27345bb6 734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
735 e100_write_flush(nic); udelay(4);
736 }
737 /* Wait 10 msec for cmd to complete */
738 msleep(10);
739
740 /* Chip deselect */
27345bb6 741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
742 e100_write_flush(nic); udelay(4);
743 }
744};
745
746/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 747static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
748{
749 u32 cmd_addr_data;
750 u16 data = 0;
751 u8 ctrl;
752 int i;
753
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755
756 /* Chip select */
27345bb6 757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
758 e100_write_flush(nic); udelay(4);
759
760 /* Bit-bang to read word from eeprom */
f26251eb 761 for (i = 31; i >= 0; i--) {
1da177e4 762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 764 e100_write_flush(nic); udelay(4);
05479938 765
27345bb6 766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 767 e100_write_flush(nic); udelay(4);
05479938 768
1da177e4
LT
769 /* Eeprom drives a dummy zero to EEDO after receiving
770 * complete address. Use this to adjust addr_len. */
27345bb6 771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 772 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
773 *addr_len -= (i - 16);
774 i = 17;
775 }
05479938 776
1da177e4
LT
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
778 }
779
780 /* Chip deselect */
27345bb6 781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
782 e100_write_flush(nic); udelay(4);
783
aaf918ba 784 return cpu_to_le16(data);
1da177e4
LT
785};
786
787/* Load entire EEPROM image into driver cache and validate checksum */
788static int e100_eeprom_load(struct nic *nic)
789{
790 u16 addr, addr_len = 8, checksum = 0;
791
792 /* Try reading with an 8-bit addr len to discover actual addr len */
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
795
f26251eb 796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 798 if (addr < nic->eeprom_wc - 1)
aaf918ba 799 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
800 }
801
802 /* The checksum, stored in the last word, is calculated such that
803 * the sum of words should be 0xBABA */
aaf918ba 804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
fa05e1ad 805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
8fb6f732
DM
806 if (!eeprom_bad_csum_allow)
807 return -EAGAIN;
1da177e4
LT
808 }
809
810 return 0;
811}
812
813/* Save (portion of) driver EEPROM cache to device and update checksum */
814static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815{
816 u16 addr, addr_len = 8, checksum = 0;
817
818 /* Try reading with an 8-bit addr len to discover actual addr len */
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
821
f26251eb 822 if (start + count >= nic->eeprom_wc)
1da177e4
LT
823 return -EINVAL;
824
f26251eb 825 for (addr = start; addr < start + count; addr++)
1da177e4
LT
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827
828 /* The checksum, stored in the last word, is calculated such that
829 * the sum of words should be 0xBABA */
f26251eb 830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
835
836 return 0;
837}
838
962082b6 839#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 840#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 841static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
842{
843 unsigned long flags;
844 unsigned int i;
845 int err = 0;
846
847 spin_lock_irqsave(&nic->cmd_lock, flags);
848
849 /* Previous command is accepted when SCB clears */
f26251eb
BA
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
852 break;
853 cpu_relax();
f26251eb 854 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
855 udelay(5);
856 }
f26251eb 857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
858 err = -EAGAIN;
859 goto err_unlock;
860 }
861
f26251eb 862 if (unlikely(cmd != cuc_resume))
27345bb6
JB
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
865
866err_unlock:
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
868
869 return err;
870}
871
858119e1 872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
61a0f6ef 873 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1da177e4
LT
874{
875 struct cb *cb;
876 unsigned long flags;
ac7c1c5a 877 int err;
1da177e4
LT
878
879 spin_lock_irqsave(&nic->cb_lock, flags);
880
f26251eb 881 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
882 err = -ENOMEM;
883 goto err_unlock;
884 }
885
886 cb = nic->cb_to_use;
887 nic->cb_to_use = cb->next;
888 nic->cbs_avail--;
889 cb->skb = skb;
890
61a0f6ef
NH
891 err = cb_prepare(nic, cb, skb);
892 if (err)
893 goto err_unlock;
894
f26251eb 895 if (unlikely(!nic->cbs_avail))
1da177e4
LT
896 err = -ENOSPC;
897
1da177e4
LT
898
899 /* Order is important otherwise we'll be in a race with h/w:
900 * set S-bit in current first, then clear S-bit in previous. */
901 cb->command |= cpu_to_le16(cb_s);
c335869f 902 dma_wmb();
1da177e4
LT
903 cb->prev->command &= cpu_to_le16(~cb_s);
904
f26251eb
BA
905 while (nic->cb_to_send != nic->cb_to_use) {
906 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
907 nic->cb_to_send->dma_addr))) {
908 /* Ok, here's where things get sticky. It's
909 * possible that we can't schedule the command
910 * because the controller is too busy, so
911 * let's just queue the command and try again
912 * when another command is scheduled. */
f26251eb 913 if (err == -ENOSPC) {
962082b6
MC
914 //request a reset
915 schedule_work(&nic->tx_timeout_task);
916 }
1da177e4
LT
917 break;
918 } else {
919 nic->cuc_cmd = cuc_resume;
920 nic->cb_to_send = nic->cb_to_send->next;
921 }
922 }
923
924err_unlock:
925 spin_unlock_irqrestore(&nic->cb_lock, flags);
926
927 return err;
928}
929
72001762
AM
930static int mdio_read(struct net_device *netdev, int addr, int reg)
931{
932 struct nic *nic = netdev_priv(netdev);
933 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
934}
935
936static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
937{
938 struct nic *nic = netdev_priv(netdev);
939
940 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
941}
942
943/* the standard mdio_ctrl() function for usual MII-compliant hardware */
944static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
1da177e4
LT
945{
946 u32 data_out = 0;
947 unsigned int i;
ac7c6669 948 unsigned long flags;
1da177e4 949
ac7c6669
OM
950
951 /*
952 * Stratus87247: we shouldn't be writing the MDI control
953 * register until the Ready bit shows True. Also, since
954 * manipulation of the MDI control registers is a multi-step
955 * procedure it should be done under lock.
956 */
957 spin_lock_irqsave(&nic->mdio_lock, flags);
958 for (i = 100; i; --i) {
27345bb6 959 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
960 break;
961 udelay(20);
962 }
963 if (unlikely(!i)) {
fa05e1ad 964 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
ac7c6669
OM
965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
966 return 0; /* No way to indicate timeout error */
967 }
27345bb6 968 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 969
ac7c6669 970 for (i = 0; i < 100; i++) {
1da177e4 971 udelay(20);
27345bb6 972 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
973 break;
974 }
ac7c6669 975 spin_unlock_irqrestore(&nic->mdio_lock, flags);
fa05e1ad
JP
976 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
977 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
978 dir == mdi_read ? "READ" : "WRITE",
979 addr, reg, data, data_out);
1da177e4
LT
980 return (u16)data_out;
981}
982
72001762
AM
983/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
984static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
985 u32 addr,
986 u32 dir,
987 u32 reg,
988 u16 data)
989{
990 if ((reg == MII_BMCR) && (dir == mdi_write)) {
991 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
992 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
993 MII_ADVERTISE);
994
995 /*
996 * Workaround Si issue where sometimes the part will not
997 * autoneg to 100Mbps even when advertised.
998 */
999 if (advert & ADVERTISE_100FULL)
1000 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
1001 else if (advert & ADVERTISE_100HALF)
1002 data |= BMCR_SPEED100;
1003 }
1004 }
1005 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1da177e4
LT
1006}
1007
72001762
AM
1008/* Fully software-emulated mdio_ctrl() function for cards without
1009 * MII-compliant PHYs.
1010 * For now, this is mainly geared towards 80c24 support; in case of further
1011 * requirements for other types (i82503, ...?) either extend this mechanism
1012 * or split it, whichever is cleaner.
1013 */
1014static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1015 u32 addr,
1016 u32 dir,
1017 u32 reg,
1018 u16 data)
1019{
1020 /* might need to allocate a netdev_priv'ed register array eventually
1021 * to be able to record state changes, but for now
1022 * some fully hardcoded register handling ought to be ok I guess. */
1023
1024 if (dir == mdi_read) {
1025 switch (reg) {
1026 case MII_BMCR:
1027 /* Auto-negotiation, right? */
1028 return BMCR_ANENABLE |
1029 BMCR_FULLDPLX;
1030 case MII_BMSR:
1031 return BMSR_LSTATUS /* for mii_link_ok() */ |
1032 BMSR_ANEGCAPABLE |
1033 BMSR_10FULL;
1034 case MII_ADVERTISE:
1035 /* 80c24 is a "combo card" PHY, right? */
1036 return ADVERTISE_10HALF |
1037 ADVERTISE_10FULL;
1038 default:
fa05e1ad
JP
1039 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041 dir == mdi_read ? "READ" : "WRITE",
1042 addr, reg, data);
72001762
AM
1043 return 0xFFFF;
1044 }
1045 } else {
1046 switch (reg) {
1047 default:
fa05e1ad
JP
1048 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1049 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1050 dir == mdi_read ? "READ" : "WRITE",
1051 addr, reg, data);
72001762
AM
1052 return 0xFFFF;
1053 }
b55de80e 1054 }
72001762
AM
1055}
1056static inline int e100_phy_supports_mii(struct nic *nic)
1057{
1058 /* for now, just check it by comparing whether we
1059 are using MII software emulation.
1060 */
1061 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1da177e4
LT
1062}
1063
1064static void e100_get_defaults(struct nic *nic)
1065{
2afecc04
JB
1066 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1067 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 1068
1da177e4 1069 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 1070 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 1071 if (nic->mac == mac_unknown)
1da177e4
LT
1072 nic->mac = mac_82557_D100_A;
1073
1074 nic->params.rfds = rfds;
1075 nic->params.cbs = cbs;
1076
1077 /* Quadwords to DMA into FIFO before starting frame transmit */
1078 nic->tx_threshold = 0xE0;
1079
0a0863af 1080 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
1081 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1082 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
1083
1084 /* Template for a freshly allocated RFD */
7734f6e6 1085 nic->blank_rfd.command = 0;
1172899a 1086 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
719cdac5 1087 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1da177e4
LT
1088
1089 /* MII setup */
1090 nic->mii.phy_id_mask = 0x1F;
1091 nic->mii.reg_num_mask = 0x1F;
1092 nic->mii.dev = nic->netdev;
1093 nic->mii.mdio_read = mdio_read;
1094 nic->mii.mdio_write = mdio_write;
1095}
1096
61a0f6ef 1097static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4
LT
1098{
1099 struct config *config = &cb->u.config;
1100 u8 *c = (u8 *)config;
719cdac5 1101 struct net_device *netdev = nic->netdev;
1da177e4
LT
1102
1103 cb->command = cpu_to_le16(cb_config);
1104
1105 memset(config, 0, sizeof(struct config));
1106
1107 config->byte_count = 0x16; /* bytes in this struct */
1108 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1109 config->direct_rx_dma = 0x1; /* reserved */
1110 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1111 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1112 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1113 config->tx_underrun_retry = 0x3; /* # of underrun retries */
72001762
AM
1114 if (e100_phy_supports_mii(nic))
1115 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1da177e4
LT
1116 config->pad10 = 0x6;
1117 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1118 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1119 config->ifs = 0x6; /* x16 = inter frame spacing */
1120 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1121 config->pad15_1 = 0x1;
1122 config->pad15_2 = 0x1;
1123 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1124 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1125 config->tx_padding = 0x1; /* 1=pad short frames */
1126 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1127 config->pad18 = 0x1;
1128 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1129 config->pad20_1 = 0x1F;
1130 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1131 config->pad21_1 = 0x5;
1132
1133 config->adaptive_ifs = nic->adaptive_ifs;
1134 config->loopback = nic->loopback;
1135
f26251eb 1136 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1137 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1138
f26251eb 1139 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1140 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1141 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1142 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1143 }
1144
719cdac5
BG
1145 if (unlikely(netdev->features & NETIF_F_RXFCS))
1146 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1147
f26251eb 1148 if (nic->flags & multicast_all)
1da177e4
LT
1149 config->multicast_all = 0x1; /* 1=accept, 0=no */
1150
6bdacb1a 1151 /* disable WoL when up */
f26251eb 1152 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1153 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1154
f26251eb 1155 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1156 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1157 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1158 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1159 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1160 if (nic->mac >= mac_82559_D101M) {
1da177e4 1161 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1162 /* Enable TCO in extended config */
1163 if (nic->mac >= mac_82551_10) {
1164 config->byte_count = 0x20; /* extended bytes */
1165 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1166 }
1167 } else {
1da177e4 1168 config->standard_stat_counter = 0x0;
44e4925e 1169 }
1da177e4
LT
1170 }
1171
0bf61e66
BG
1172 if (netdev->features & NETIF_F_RXALL) {
1173 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1174 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1175 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1176 }
1177
ab90695a
AS
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1179 c + 0);
1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1181 c + 8);
1182 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1183 c + 16);
61a0f6ef 1184 return 0;
1da177e4
LT
1185}
1186
2afecc04
JB
1187/*************************************************************************
1188* CPUSaver parameters
1189*
1190* All CPUSaver parameters are 16-bit literals that are part of a
1191* "move immediate value" instruction. By changing the value of
1192* the literal in the instruction before the code is loaded, the
1193* driver can change the algorithm.
1194*
0779bf2d 1195* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1196* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1197* timer is reset each time a new packet is received. (see
1198* BUNDLEMAX below to set the limit on number of chained packets)
1199* The current default is 0x600 or 1536. Experiments show that
1200* the value should probably stay within the 0x200 - 0x1000.
1201*
05479938 1202* BUNDLEMAX -
2afecc04
JB
1203* This sets the maximum number of frames that will be bundled. In
1204* some situations, such as the TCP windowing algorithm, it may be
1205* better to limit the growth of the bundle size than let it go as
1206* high as it can, because that could cause too much added latency.
1207* The default is six, because this is the number of packets in the
1208* default TCP window size. A value of 1 would make CPUSaver indicate
1209* an interrupt for every frame received. If you do not want to put
1210* a limit on the bundle size, set this value to xFFFF.
1211*
05479938 1212* BUNDLESMALL -
2afecc04
JB
1213* This contains a bit-mask describing the minimum size frame that
1214* will be bundled. The default masks the lower 7 bits, which means
1215* that any frame less than 128 bytes in length will not be bundled,
1216* but will instead immediately generate an interrupt. This does
1217* not affect the current bundle in any way. Any frame that is 128
1218* bytes or large will be bundled normally. This feature is meant
1219* to provide immediate indication of ACK frames in a TCP environment.
1220* Customers were seeing poor performance when a machine with CPUSaver
1221* enabled was sending but not receiving. The delay introduced when
1222* the ACKs were received was enough to reduce total throughput, because
1223* the sender would sit idle until the ACK was finally seen.
1224*
1225* The current default is 0xFF80, which masks out the lower 7 bits.
1226* This means that any frame which is x7F (127) bytes or smaller
05479938 1227* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1228* bit mask, there are only a few valid values that can be used. To
1229* turn this feature off, the driver can write the value xFFFF to the
1230* lower word of this instruction (in the same way that the other
1231* parameters are used). Likewise, a value of 0xF800 (2047) would
1232* cause an interrupt to be generated for every frame, because all
1233* standard Ethernet frames are <= 2047 bytes in length.
1234*************************************************************************/
1235
05479938 1236/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1237 * workarounds it provides, set the following defines to:
1238 * BUNDLESMALL 0
1239 * BUNDLEMAX 1
1240 * INTDELAY 1
1241 */
1242#define BUNDLESMALL 1
1243#define BUNDLEMAX (u16)6
1244#define INTDELAY (u16)1536 /* 0x600 */
1245
9ac32e1b
JSR
1246/* Initialize firmware */
1247static const struct firmware *e100_request_firmware(struct nic *nic)
1248{
1249 const char *fw_name;
7e15b0c9 1250 const struct firmware *fw = nic->fw;
9ac32e1b 1251 u8 timer, bundle, min_size;
7e15b0c9 1252 int err = 0;
8b0d2f9e 1253 bool required = false;
9ac32e1b 1254
2afecc04
JB
1255 /* do not load u-code for ICH devices */
1256 if (nic->flags & ich)
9ac32e1b 1257 return NULL;
2afecc04 1258
8b0d2f9e
BM
1259 /* Search for ucode match against h/w revision
1260 *
1261 * Based on comments in the source code for the FreeBSD fxp
1262 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1263 *
1264 * "fixes for bugs in the B-step hardware (specifically, bugs
1265 * with Inline Receive)."
1266 *
1267 * So we must fail if it cannot be loaded.
1268 *
1269 * The other microcode files are only required for the optional
1270 * CPUSaver feature. Nice to have, but no reason to fail.
1271 */
1272 if (nic->mac == mac_82559_D101M) {
9ac32e1b 1273 fw_name = FIRMWARE_D101M;
8b0d2f9e 1274 } else if (nic->mac == mac_82559_D101S) {
9ac32e1b 1275 fw_name = FIRMWARE_D101S;
8b0d2f9e 1276 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
9ac32e1b 1277 fw_name = FIRMWARE_D102E;
8b0d2f9e
BM
1278 required = true;
1279 } else { /* No ucode on other devices */
9ac32e1b 1280 return NULL;
8b0d2f9e 1281 }
9ac32e1b 1282
7e15b0c9
DG
1283 /* If the firmware has not previously been loaded, request a pointer
1284 * to it. If it was previously loaded, we are reinitializing the
1285 * adapter, possibly in a resume from hibernate, in which case
1286 * request_firmware() cannot be used.
1287 */
1288 if (!fw)
1289 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1290
9ac32e1b 1291 if (err) {
8b0d2f9e
BM
1292 if (required) {
1293 netif_err(nic, probe, nic->netdev,
1294 "Failed to load firmware \"%s\": %d\n",
1295 fw_name, err);
1296 return ERR_PTR(err);
1297 } else {
1298 netif_info(nic, probe, nic->netdev,
1299 "CPUSaver disabled. Needs \"%s\": %d\n",
1300 fw_name, err);
1301 return NULL;
1302 }
9ac32e1b 1303 }
7e15b0c9 1304
9ac32e1b
JSR
1305 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1306 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1307 if (fw->size != UCODE_SIZE * 4 + 3) {
fa05e1ad
JP
1308 netif_err(nic, probe, nic->netdev,
1309 "Firmware \"%s\" has wrong size %zu\n",
1310 fw_name, fw->size);
9ac32e1b
JSR
1311 release_firmware(fw);
1312 return ERR_PTR(-EINVAL);
2afecc04
JB
1313 }
1314
9ac32e1b
JSR
1315 /* Read timer, bundle and min_size from end of firmware blob */
1316 timer = fw->data[UCODE_SIZE * 4];
1317 bundle = fw->data[UCODE_SIZE * 4 + 1];
1318 min_size = fw->data[UCODE_SIZE * 4 + 2];
1319
1320 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1321 min_size >= UCODE_SIZE) {
fa05e1ad
JP
1322 netif_err(nic, probe, nic->netdev,
1323 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1324 fw_name, timer, bundle, min_size);
9ac32e1b
JSR
1325 release_firmware(fw);
1326 return ERR_PTR(-EINVAL);
1327 }
7e15b0c9
DG
1328
1329 /* OK, firmware is validated and ready to use. Save a pointer
1330 * to it in the nic */
1331 nic->fw = fw;
9ac32e1b 1332 return fw;
24180333
JB
1333}
1334
61a0f6ef 1335static int e100_setup_ucode(struct nic *nic, struct cb *cb,
9ac32e1b 1336 struct sk_buff *skb)
24180333 1337{
9ac32e1b
JSR
1338 const struct firmware *fw = (void *)skb;
1339 u8 timer, bundle, min_size;
1340
1341 /* It's not a real skb; we just abused the fact that e100_exec_cb
1342 will pass it through to here... */
1343 cb->skb = NULL;
1344
1345 /* firmware is stored as little endian already */
1346 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1347
1348 /* Read timer, bundle and min_size from end of firmware blob */
1349 timer = fw->data[UCODE_SIZE * 4];
1350 bundle = fw->data[UCODE_SIZE * 4 + 1];
1351 min_size = fw->data[UCODE_SIZE * 4 + 2];
1352
1353 /* Insert user-tunable settings in cb->u.ucode */
1354 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1355 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1356 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1357 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1358 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1359 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1360
1361 cb->command = cpu_to_le16(cb_ucode | cb_el);
61a0f6ef 1362 return 0;
9ac32e1b
JSR
1363}
1364
1365static inline int e100_load_ucode_wait(struct nic *nic)
1366{
1367 const struct firmware *fw;
24180333
JB
1368 int err = 0, counter = 50;
1369 struct cb *cb = nic->cb_to_clean;
1370
9ac32e1b
JSR
1371 fw = e100_request_firmware(nic);
1372 /* If it's NULL, then no ucode is required */
1373 if (!fw || IS_ERR(fw))
1374 return PTR_ERR(fw);
1375
1376 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
fa05e1ad
JP
1377 netif_err(nic, probe, nic->netdev,
1378 "ucode cmd failed with error %d\n", err);
05479938 1379
24180333
JB
1380 /* must restart cuc */
1381 nic->cuc_cmd = cuc_start;
1382
1383 /* wait for completion */
1384 e100_write_flush(nic);
1385 udelay(10);
1386
1387 /* wait for possibly (ouch) 500ms */
1388 while (!(cb->status & cpu_to_le16(cb_complete))) {
1389 msleep(10);
1390 if (!--counter) break;
1391 }
05479938 1392
3a4fa0a2 1393 /* ack any interrupts, something could have been set */
27345bb6 1394 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1395
1396 /* if the command failed, or is not OK, notify and return */
1397 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
fa05e1ad 1398 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
24180333
JB
1399 err = -EPERM;
1400 }
05479938 1401
24180333 1402 return err;
1da177e4
LT
1403}
1404
61a0f6ef 1405static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1da177e4
LT
1406 struct sk_buff *skb)
1407{
1408 cb->command = cpu_to_le16(cb_iaaddr);
1409 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
61a0f6ef 1410 return 0;
1da177e4
LT
1411}
1412
61a0f6ef 1413static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4
LT
1414{
1415 cb->command = cpu_to_le16(cb_dump);
1416 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1417 offsetof(struct mem, dump_buf));
61a0f6ef 1418 return 0;
1da177e4
LT
1419}
1420
72001762
AM
1421static int e100_phy_check_without_mii(struct nic *nic)
1422{
1423 u8 phy_type;
1424 int without_mii;
1425
1426 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1427
1428 switch (phy_type) {
1429 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1430 case I82503: /* Non-MII PHY; UNTESTED! */
1431 case S80C24: /* Non-MII PHY; tested and working */
1432 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1433 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1434 * doesn't have a programming interface of any sort. The
1435 * media is sensed automatically based on how the link partner
1436 * is configured. This is, in essence, manual configuration.
1437 */
fa05e1ad
JP
1438 netif_info(nic, probe, nic->netdev,
1439 "found MII-less i82503 or 80c24 or other PHY\n");
72001762
AM
1440
1441 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1442 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1443
1444 /* these might be needed for certain MII-less cards...
1445 * nic->flags |= ich;
1446 * nic->flags |= ich_10h_workaround; */
1447
1448 without_mii = 1;
1449 break;
1450 default:
1451 without_mii = 0;
1452 break;
1453 }
1454 return without_mii;
1455}
1456
1da177e4
LT
1457#define NCONFIG_AUTO_SWITCH 0x0080
1458#define MII_NSC_CONG MII_RESV1
1459#define NSC_CONG_ENABLE 0x0100
1460#define NSC_CONG_TXREADY 0x0400
1461#define ADVERTISE_FC_SUPPORTED 0x0400
1462static int e100_phy_init(struct nic *nic)
1463{
1464 struct net_device *netdev = nic->netdev;
1465 u32 addr;
1466 u16 bmcr, stat, id_lo, id_hi, cong;
1467
1468 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1469 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1470 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1471 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1472 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1473 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1474 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1475 break;
1476 }
72001762
AM
1477 if (addr == 32) {
1478 /* uhoh, no PHY detected: check whether we seem to be some
1479 * weird, rare variant which is *known* to not have any MII.
1480 * But do this AFTER MII checking only, since this does
1481 * lookup of EEPROM values which may easily be unreliable. */
1482 if (e100_phy_check_without_mii(nic))
1483 return 0; /* simply return and hope for the best */
1484 else {
1485 /* for unknown cases log a fatal error */
fa05e1ad
JP
1486 netif_err(nic, hw, nic->netdev,
1487 "Failed to locate any known PHY, aborting\n");
72001762
AM
1488 return -EAGAIN;
1489 }
1490 } else
fa05e1ad
JP
1491 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1492 "phy_addr = %d\n", nic->mii.phy_id);
1da177e4 1493
1da177e4
LT
1494 /* Get phy ID */
1495 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1496 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1497 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
fa05e1ad
JP
1498 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1499 "phy ID = 0x%08X\n", nic->phy);
1da177e4 1500
8fbd962e
BA
1501 /* Select the phy and isolate the rest */
1502 for (addr = 0; addr < 32; addr++) {
1503 if (addr != nic->mii.phy_id) {
1504 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1505 } else if (nic->phy != phy_82552_v) {
1506 bmcr = mdio_read(netdev, addr, MII_BMCR);
1507 mdio_write(netdev, addr, MII_BMCR,
1508 bmcr & ~BMCR_ISOLATE);
1509 }
1510 }
1511 /*
1512 * Workaround for 82552:
1513 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1514 * other phy_id's) using bmcr value from addr discovery loop above.
1515 */
1516 if (nic->phy == phy_82552_v)
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1518 bmcr & ~BMCR_ISOLATE);
1519
1da177e4
LT
1520 /* Handle National tx phys */
1521#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1522 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1523 /* Disable congestion control */
1524 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1525 cong |= NSC_CONG_TXREADY;
1526 cong &= ~NSC_CONG_ENABLE;
1527 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1528 }
1529
b55de80e
BA
1530 if (nic->phy == phy_82552_v) {
1531 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1532
72001762
AM
1533 /* assign special tweaked mdio_ctrl() function */
1534 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1535
b55de80e
BA
1536 /* Workaround Si not advertising flow-control during autoneg */
1537 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1538 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1539
1540 /* Reset for the above changes to take effect */
1541 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1542 bmcr |= BMCR_RESET;
1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478 1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
a3566b52 1546 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
60ffa478
JK
1547 /* enable/disable MDI/MDI-X auto-switching. */
1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1550 }
1da177e4
LT
1551
1552 return 0;
1553}
1554
1555static int e100_hw_init(struct nic *nic)
1556{
dca97ad2 1557 int err = 0;
1da177e4
LT
1558
1559 e100_hw_reset(nic);
1560
fa05e1ad 1561 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
f26251eb 1562 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1563 return err;
1564
f26251eb 1565 if ((err = e100_phy_init(nic)))
1da177e4 1566 return err;
f26251eb 1567 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1568 return err;
f26251eb 1569 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1570 return err;
9ac32e1b 1571 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1572 return err;
f26251eb 1573 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1574 return err;
f26251eb 1575 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1576 return err;
f26251eb 1577 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1578 nic->dma_addr + offsetof(struct mem, stats))))
1579 return err;
f26251eb 1580 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1581 return err;
1582
1583 e100_disable_irq(nic);
1584
1585 return 0;
1586}
1587
61a0f6ef 1588static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4
LT
1589{
1590 struct net_device *netdev = nic->netdev;
22bedad3 1591 struct netdev_hw_addr *ha;
4cd24eaf 1592 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1da177e4
LT
1593
1594 cb->command = cpu_to_le16(cb_multi);
1595 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
48e2f183 1596 i = 0;
22bedad3 1597 netdev_for_each_mc_addr(ha, netdev) {
48e2f183
JP
1598 if (i == count)
1599 break;
22bedad3 1600 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1da177e4 1601 ETH_ALEN);
48e2f183 1602 }
61a0f6ef 1603 return 0;
1da177e4
LT
1604}
1605
1606static void e100_set_multicast_list(struct net_device *netdev)
1607{
1608 struct nic *nic = netdev_priv(netdev);
1609
fa05e1ad
JP
1610 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1611 "mc_count=%d, flags=0x%04X\n",
1612 netdev_mc_count(netdev), netdev->flags);
1da177e4 1613
f26251eb 1614 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1615 nic->flags |= promiscuous;
1616 else
1617 nic->flags &= ~promiscuous;
1618
f26251eb 1619 if (netdev->flags & IFF_ALLMULTI ||
4cd24eaf 1620 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1da177e4
LT
1621 nic->flags |= multicast_all;
1622 else
1623 nic->flags &= ~multicast_all;
1624
1625 e100_exec_cb(nic, NULL, e100_configure);
1626 e100_exec_cb(nic, NULL, e100_multi);
1627}
1628
1629static void e100_update_stats(struct nic *nic)
1630{
09f75cd7
JG
1631 struct net_device *dev = nic->netdev;
1632 struct net_device_stats *ns = &dev->stats;
1da177e4 1633 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1634 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1635 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1636 &s->complete;
1637
1638 /* Device's stats reporting may take several microseconds to
0a0863af 1639 * complete, so we're always waiting for results of the
1da177e4
LT
1640 * previous command. */
1641
f26251eb 1642 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1643 *complete = 0;
1644 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1645 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1646 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1647 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1648 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1649 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1650 ns->collisions += nic->tx_collisions;
1651 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1652 le32_to_cpu(s->tx_lost_crs);
d24d65ed
BG
1653 nic->rx_short_frame_errors +=
1654 le32_to_cpu(s->rx_short_frame_errors);
1655 ns->rx_length_errors = nic->rx_short_frame_errors +
1da177e4
LT
1656 nic->rx_over_length_errors;
1657 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1658 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1659 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1660 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1661 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1662 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1663 le32_to_cpu(s->rx_alignment_errors) +
1664 le32_to_cpu(s->rx_short_frame_errors) +
1665 le32_to_cpu(s->rx_cdt_errors);
1666 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1667 nic->tx_single_collisions +=
1668 le32_to_cpu(s->tx_single_collisions);
1669 nic->tx_multiple_collisions +=
1670 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1671 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1672 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1673 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1674 nic->rx_fc_unsupported +=
1675 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1676 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1677 nic->tx_tco_frames +=
1678 le16_to_cpu(s->xmt_tco_frames);
1679 nic->rx_tco_frames +=
1680 le16_to_cpu(s->rcv_tco_frames);
1681 }
1682 }
1683 }
1684
05479938 1685
f26251eb 1686 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
fa05e1ad
JP
1687 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1688 "exec cuc_dump_reset failed\n");
1da177e4
LT
1689}
1690
1691static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1692{
1693 /* Adjust inter-frame-spacing (IFS) between two transmits if
1694 * we're getting collisions on a half-duplex connection. */
1695
f26251eb 1696 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1697 u32 prev = nic->adaptive_ifs;
1698 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1699
f26251eb 1700 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1701 (nic->tx_frames > min_frames)) {
f26251eb 1702 if (nic->adaptive_ifs < 60)
1da177e4
LT
1703 nic->adaptive_ifs += 5;
1704 } else if (nic->tx_frames < min_frames) {
f26251eb 1705 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1706 nic->adaptive_ifs -= 5;
1707 }
f26251eb 1708 if (nic->adaptive_ifs != prev)
1da177e4
LT
1709 e100_exec_cb(nic, NULL, e100_configure);
1710 }
1711}
1712
1713static void e100_watchdog(unsigned long data)
1714{
1715 struct nic *nic = (struct nic *)data;
8ae6daca 1716 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
25db0338 1717 u32 speed;
1da177e4 1718
fa05e1ad
JP
1719 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1720 "right now = %ld\n", jiffies);
1da177e4
LT
1721
1722 /* mii library handles link maintenance tasks */
1723
1724 mii_ethtool_gset(&nic->mii, &cmd);
25db0338 1725 speed = ethtool_cmd_speed(&cmd);
1da177e4 1726
f26251eb 1727 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
fa05e1ad 1728 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
25db0338 1729 speed == SPEED_100 ? 100 : 10,
fa05e1ad 1730 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1731 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
fa05e1ad 1732 netdev_info(nic->netdev, "NIC Link is Down\n");
1da177e4
LT
1733 }
1734
1735 mii_check_link(&nic->mii);
1736
1737 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1738 * allocation failure.
1739 * Unfortunately have to use a spinlock to not re-enable interrupts
1740 * accidentally, due to hardware that shares a register between the
1741 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1742 spin_lock_irq(&nic->cmd_lock);
27345bb6 1743 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1744 e100_write_flush(nic);
ad8c48ad 1745 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1746
1747 e100_update_stats(nic);
25db0338 1748 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1da177e4 1749
f26251eb 1750 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1751 /* Issue a multicast command to workaround a 557 lock up */
1752 e100_set_multicast_list(nic->netdev);
1753
25db0338 1754 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1da177e4
LT
1755 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1756 nic->flags |= ich_10h_workaround;
1757 else
1758 nic->flags &= ~ich_10h_workaround;
1759
34c6417b
SH
1760 mod_timer(&nic->watchdog,
1761 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1762}
1763
61a0f6ef 1764static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1765 struct sk_buff *skb)
1766{
61a0f6ef 1767 dma_addr_t dma_addr;
1da177e4 1768 cb->command = nic->tx_command;
75f58a53 1769
61a0f6ef
NH
1770 dma_addr = pci_map_single(nic->pdev,
1771 skb->data, skb->len, PCI_DMA_TODEVICE);
1772 /* If we can't map the skb, have the upper layer try later */
5e5d4942
JJB
1773 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1774 dev_kfree_skb_any(skb);
1775 skb = NULL;
61a0f6ef 1776 return -ENOMEM;
5e5d4942 1777 }
61a0f6ef 1778
75f58a53
BG
1779 /*
1780 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1781 * testing, ie sending frames with bad CRC.
1782 */
1783 if (unlikely(skb->no_fcs))
3d2372eb 1784 cb->command |= cpu_to_le16(cb_tx_nc);
75f58a53 1785 else
3d2372eb 1786 cb->command &= ~cpu_to_le16(cb_tx_nc);
75f58a53 1787
962082b6 1788 /* interrupt every 16 packets regardless of delay */
f26251eb 1789 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1790 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1791 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1792 cb->u.tcb.tcb_byte_count = 0;
1793 cb->u.tcb.threshold = nic->tx_threshold;
1794 cb->u.tcb.tbd_count = 1;
61a0f6ef 1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1da177e4 1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
48425b14 1797 skb_tx_timestamp(skb);
61a0f6ef 1798 return 0;
1da177e4
LT
1799}
1800
3b29a56d
SH
1801static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1802 struct net_device *netdev)
1da177e4
LT
1803{
1804 struct nic *nic = netdev_priv(netdev);
1805 int err;
1806
f26251eb 1807 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1808 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1809 Issue a NOP command followed by a 1us delay before
1810 issuing the Tx command. */
f26251eb 1811 if (e100_exec_cmd(nic, cuc_nop, 0))
fa05e1ad
JP
1812 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1813 "exec cuc_nop failed\n");
1da177e4
LT
1814 udelay(1);
1815 }
1816
1817 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1818
f26251eb 1819 switch (err) {
1da177e4
LT
1820 case -ENOSPC:
1821 /* We queued the skb, but now we're out of space. */
fa05e1ad
JP
1822 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1823 "No space for CB\n");
1da177e4
LT
1824 netif_stop_queue(netdev);
1825 break;
1826 case -ENOMEM:
1827 /* This is a hard error - log it. */
fa05e1ad
JP
1828 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1829 "Out of Tx resources, returning skb\n");
1da177e4 1830 netif_stop_queue(netdev);
5b548140 1831 return NETDEV_TX_BUSY;
1da177e4
LT
1832 }
1833
6ed10654 1834 return NETDEV_TX_OK;
1da177e4
LT
1835}
1836
858119e1 1837static int e100_tx_clean(struct nic *nic)
1da177e4 1838{
09f75cd7 1839 struct net_device *dev = nic->netdev;
1da177e4
LT
1840 struct cb *cb;
1841 int tx_cleaned = 0;
1842
1843 spin_lock(&nic->cb_lock);
1844
1da177e4 1845 /* Clean CBs marked complete */
f26251eb 1846 for (cb = nic->cb_to_clean;
1da177e4
LT
1847 cb->status & cpu_to_le16(cb_complete);
1848 cb = nic->cb_to_clean = cb->next) {
c335869f 1849 dma_rmb(); /* read skb after status */
fa05e1ad
JP
1850 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1851 "cb[%d]->status = 0x%04X\n",
1852 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1853 cb->status);
dc45010e 1854
f26251eb 1855 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1856 dev->stats.tx_packets++;
1857 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1858
1859 pci_unmap_single(nic->pdev,
1860 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1861 le16_to_cpu(cb->u.tcb.tbd.size),
1862 PCI_DMA_TODEVICE);
1863 dev_kfree_skb_any(cb->skb);
1864 cb->skb = NULL;
1865 tx_cleaned = 1;
1866 }
1867 cb->status = 0;
1868 nic->cbs_avail++;
1869 }
1870
1871 spin_unlock(&nic->cb_lock);
1872
1873 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1874 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1875 netif_wake_queue(nic->netdev);
1876
1877 return tx_cleaned;
1878}
1879
1880static void e100_clean_cbs(struct nic *nic)
1881{
f26251eb
BA
1882 if (nic->cbs) {
1883 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1884 struct cb *cb = nic->cb_to_clean;
f26251eb 1885 if (cb->skb) {
1da177e4
LT
1886 pci_unmap_single(nic->pdev,
1887 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1888 le16_to_cpu(cb->u.tcb.tbd.size),
1889 PCI_DMA_TODEVICE);
1890 dev_kfree_skb(cb->skb);
1891 }
1892 nic->cb_to_clean = nic->cb_to_clean->next;
1893 nic->cbs_avail++;
1894 }
98468efd 1895 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1da177e4
LT
1896 nic->cbs = NULL;
1897 nic->cbs_avail = 0;
1898 }
1899 nic->cuc_cmd = cuc_start;
1900 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1901 nic->cbs;
1902}
1903
1904static int e100_alloc_cbs(struct nic *nic)
1905{
1906 struct cb *cb;
1907 unsigned int i, count = nic->params.cbs.count;
1908
1909 nic->cuc_cmd = cuc_start;
1910 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1911 nic->cbs_avail = 0;
1912
98468efd
RO
1913 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1914 &nic->cbs_dma_addr);
f26251eb 1915 if (!nic->cbs)
1da177e4 1916 return -ENOMEM;
70abc8cb 1917 memset(nic->cbs, 0, count * sizeof(struct cb));
1da177e4 1918
f26251eb 1919 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1920 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1921 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1922
1923 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1924 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1925 ((i+1) % count) * sizeof(struct cb));
1da177e4
LT
1926 }
1927
1928 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1929 nic->cbs_avail = count;
1930
1931 return 0;
1932}
1933
ca93ca42 1934static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1935{
f26251eb
BA
1936 if (!nic->rxs) return;
1937 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1938
1939 /* handle init time starts */
f26251eb 1940 if (!rx) rx = nic->rxs;
ca93ca42
JG
1941
1942 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1943 if (rx->skb) {
ca93ca42
JG
1944 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1945 nic->ru_running = RU_RUNNING;
1946 }
1da177e4
LT
1947}
1948
719cdac5 1949#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
858119e1 1950static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1951{
89d71a66 1952 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1da177e4
LT
1953 return -ENOMEM;
1954
89d71a66 1955 /* Init, and map the RFD. */
27d7ff46 1956 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1957 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1958 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1959
8d8bb39b 1960 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1961 dev_kfree_skb_any(rx->skb);
097688ef 1962 rx->skb = NULL;
1f53367d
MC
1963 rx->dma_addr = 0;
1964 return -ENOMEM;
1965 }
1966
1da177e4 1967 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1968 * this one. We are safe to touch the previous RFD because
1969 * it is protected by the before last buffer's el bit being set */
aaf918ba 1970 if (rx->prev->skb) {
1da177e4 1971 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1972 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1973 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1974 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1975 }
1976
1977 return 0;
1978}
1979
858119e1 1980static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1981 unsigned int *work_done, unsigned int work_to_do)
1982{
09f75cd7 1983 struct net_device *dev = nic->netdev;
1da177e4
LT
1984 struct sk_buff *skb = rx->skb;
1985 struct rfd *rfd = (struct rfd *)skb->data;
1986 u16 rfd_status, actual_size;
719cdac5 1987 u16 fcs_pad = 0;
1da177e4 1988
f26251eb 1989 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1990 return -EAGAIN;
1991
1992 /* Need to sync before taking a peek at cb_complete bit */
1993 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1994 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1995 rfd_status = le16_to_cpu(rfd->status);
1996
fa05e1ad
JP
1997 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1998 "status=0x%04X\n", rfd_status);
c335869f 1999 dma_rmb(); /* read size after status bit */
1da177e4
LT
2000
2001 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
2002 if (unlikely(!(rfd_status & cb_complete))) {
2003 /* If the next buffer has the el bit, but we think the receiver
2004 * is still running, check to see if it really stopped while
2005 * we had interrupts off.
2006 * This allows for a fast restart without re-enabling
2007 * interrupts */
2008 if ((le16_to_cpu(rfd->command) & cb_el) &&
2009 (RU_RUNNING == nic->ru_running))
2010
17393dd6 2011 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 2012 nic->ru_running = RU_SUSPENDED;
303d67c2
KH
2013 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2014 sizeof(struct rfd),
6ff9c2e7 2015 PCI_DMA_FROMDEVICE);
1f53367d 2016 return -ENODATA;
7734f6e6 2017 }
1da177e4
LT
2018
2019 /* Get actual data size */
719cdac5
BG
2020 if (unlikely(dev->features & NETIF_F_RXFCS))
2021 fcs_pad = 4;
1da177e4 2022 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 2023 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
2024 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2025
2026 /* Get data */
2027 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2028 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 2029
7734f6e6
DA
2030 /* If this buffer has the el bit, but we think the receiver
2031 * is still running, check to see if it really stopped while
2032 * we had interrupts off.
2033 * This allows for a fast restart without re-enabling interrupts.
2034 * This can happen when the RU sees the size change but also sees
2035 * the el bit set. */
2036 if ((le16_to_cpu(rfd->command) & cb_el) &&
2037 (RU_RUNNING == nic->ru_running)) {
2038
17393dd6 2039 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 2040 nic->ru_running = RU_SUSPENDED;
7734f6e6 2041 }
ca93ca42 2042
1da177e4
LT
2043 /* Pull off the RFD and put the actual data (minus eth hdr) */
2044 skb_reserve(skb, sizeof(struct rfd));
2045 skb_put(skb, actual_size);
2046 skb->protocol = eth_type_trans(skb, nic->netdev);
2047
0bf61e66
BG
2048 /* If we are receiving all frames, then don't bother
2049 * checking for errors.
2050 */
2051 if (unlikely(dev->features & NETIF_F_RXALL)) {
2052 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2053 /* Received oversized frame, but keep it. */
2054 nic->rx_over_length_errors++;
2055 goto process_skb;
2056 }
2057
f26251eb 2058 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 2059 /* Don't indicate if hardware indicates errors */
1da177e4 2060 dev_kfree_skb_any(skb);
719cdac5 2061 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
1da177e4
LT
2062 /* Don't indicate oversized frames */
2063 nic->rx_over_length_errors++;
1da177e4
LT
2064 dev_kfree_skb_any(skb);
2065 } else {
0bf61e66 2066process_skb:
09f75cd7 2067 dev->stats.rx_packets++;
719cdac5 2068 dev->stats.rx_bytes += (actual_size - fcs_pad);
1da177e4 2069 netif_receive_skb(skb);
f26251eb 2070 if (work_done)
1da177e4
LT
2071 (*work_done)++;
2072 }
2073
2074 rx->skb = NULL;
2075
2076 return 0;
2077}
2078
858119e1 2079static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
2080 unsigned int work_to_do)
2081{
2082 struct rx *rx;
7734f6e6
DA
2083 int restart_required = 0, err = 0;
2084 struct rx *old_before_last_rx, *new_before_last_rx;
2085 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
2086
2087 /* Indicate newly arrived packets */
f26251eb 2088 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
2089 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2090 /* Hit quota or no more to clean */
2091 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 2092 break;
1da177e4
LT
2093 }
2094
7734f6e6
DA
2095
2096 /* On EAGAIN, hit quota so have more work to do, restart once
2097 * cleanup is complete.
2098 * Else, are we already rnr? then pay attention!!! this ensures that
2099 * the state machine progression never allows a start with a
2100 * partially cleaned list, avoiding a race between hardware
2101 * and rx_to_clean when in NAPI mode */
2102 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2103 restart_required = 1;
2104
2105 old_before_last_rx = nic->rx_to_use->prev->prev;
2106 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 2107
1da177e4 2108 /* Alloc new skbs to refill list */
f26251eb
BA
2109 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2110 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
2111 break; /* Better luck next time (see watchdog) */
2112 }
ca93ca42 2113
7734f6e6
DA
2114 new_before_last_rx = nic->rx_to_use->prev->prev;
2115 if (new_before_last_rx != old_before_last_rx) {
2116 /* Set the el-bit on the buffer that is before the last buffer.
2117 * This lets us update the next pointer on the last buffer
2118 * without worrying about hardware touching it.
2119 * We set the size to 0 to prevent hardware from touching this
2120 * buffer.
2121 * When the hardware hits the before last buffer with el-bit
2122 * and size of 0, it will RNR interrupt, the RUS will go into
2123 * the No Resources state. It will not complete nor write to
2124 * this buffer. */
2125 new_before_last_rfd =
2126 (struct rfd *)new_before_last_rx->skb->data;
2127 new_before_last_rfd->size = 0;
2128 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2129 pci_dma_sync_single_for_device(nic->pdev,
2130 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2131 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2132
2133 /* Now that we have a new stopping point, we can clear the old
2134 * stopping point. We must sync twice to get the proper
2135 * ordering on the hardware side of things. */
2136 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2137 pci_dma_sync_single_for_device(nic->pdev,
2138 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2139 PCI_DMA_BIDIRECTIONAL);
719cdac5
BG
2140 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2141 + ETH_FCS_LEN);
7734f6e6
DA
2142 pci_dma_sync_single_for_device(nic->pdev,
2143 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2144 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2145 }
2146
f26251eb 2147 if (restart_required) {
ca93ca42 2148 // ack the rnr?
915e91d7 2149 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 2150 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 2151 if (work_done)
ca93ca42
JG
2152 (*work_done)++;
2153 }
1da177e4
LT
2154}
2155
2156static void e100_rx_clean_list(struct nic *nic)
2157{
2158 struct rx *rx;
2159 unsigned int i, count = nic->params.rfds.count;
2160
ca93ca42
JG
2161 nic->ru_running = RU_UNINITIALIZED;
2162
f26251eb
BA
2163 if (nic->rxs) {
2164 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2165 if (rx->skb) {
1da177e4 2166 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2167 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2168 dev_kfree_skb(rx->skb);
2169 }
2170 }
2171 kfree(nic->rxs);
2172 nic->rxs = NULL;
2173 }
2174
2175 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
2176}
2177
2178static int e100_rx_alloc_list(struct nic *nic)
2179{
2180 struct rx *rx;
2181 unsigned int i, count = nic->params.rfds.count;
7734f6e6 2182 struct rfd *before_last;
1da177e4
LT
2183
2184 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 2185 nic->ru_running = RU_UNINITIALIZED;
1da177e4 2186
f26251eb 2187 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2188 return -ENOMEM;
1da177e4 2189
f26251eb 2190 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
2191 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2192 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 2193 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
2194 e100_rx_clean_list(nic);
2195 return -ENOMEM;
2196 }
2197 }
7734f6e6
DA
2198 /* Set the el-bit on the buffer that is before the last buffer.
2199 * This lets us update the next pointer on the last buffer without
2200 * worrying about hardware touching it.
2201 * We set the size to 0 to prevent hardware from touching this buffer.
2202 * When the hardware hits the before last buffer with el-bit and size
2203 * of 0, it will RNR interrupt, the RU will go into the No Resources
2204 * state. It will not complete nor write to this buffer. */
2205 rx = nic->rxs->prev->prev;
2206 before_last = (struct rfd *)rx->skb->data;
2207 before_last->command |= cpu_to_le16(cb_el);
2208 before_last->size = 0;
2209 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 2210 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2211
2212 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2213 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2214
2215 return 0;
2216}
2217
7d12e780 2218static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2219{
2220 struct net_device *netdev = dev_id;
2221 struct nic *nic = netdev_priv(netdev);
27345bb6 2222 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4 2223
fa05e1ad
JP
2224 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2225 "stat_ack = 0x%02X\n", stat_ack);
1da177e4 2226
f26251eb 2227 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
2228 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2229 return IRQ_NONE;
2230
2231 /* Ack interrupt(s) */
27345bb6 2232 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2233
ca93ca42 2234 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 2235 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
2236 nic->ru_running = RU_SUSPENDED;
2237
288379f0 2238 if (likely(napi_schedule_prep(&nic->napi))) {
0685c31b 2239 e100_disable_irq(nic);
288379f0 2240 __napi_schedule(&nic->napi);
0685c31b 2241 }
1da177e4
LT
2242
2243 return IRQ_HANDLED;
2244}
2245
bea3348e 2246static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2247{
bea3348e 2248 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 2249 unsigned int work_done = 0;
1da177e4 2250
bea3348e 2251 e100_rx_clean(nic, &work_done, budget);
53e52c72 2252 e100_tx_clean(nic);
1da177e4 2253
53e52c72
DM
2254 /* If budget not fully consumed, exit the polling mode */
2255 if (work_done < budget) {
288379f0 2256 napi_complete(napi);
1da177e4 2257 e100_enable_irq(nic);
1da177e4
LT
2258 }
2259
bea3348e 2260 return work_done;
1da177e4
LT
2261}
2262
2263#ifdef CONFIG_NET_POLL_CONTROLLER
2264static void e100_netpoll(struct net_device *netdev)
2265{
2266 struct nic *nic = netdev_priv(netdev);
611494dc 2267
1da177e4 2268 e100_disable_irq(nic);
7d12e780 2269 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2270 e100_tx_clean(nic);
2271 e100_enable_irq(nic);
2272}
2273#endif
2274
1da177e4
LT
2275static int e100_set_mac_address(struct net_device *netdev, void *p)
2276{
2277 struct nic *nic = netdev_priv(netdev);
2278 struct sockaddr *addr = p;
2279
2280 if (!is_valid_ether_addr(addr->sa_data))
2281 return -EADDRNOTAVAIL;
2282
2283 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2284 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2285
2286 return 0;
2287}
2288
2289static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2290{
f26251eb 2291 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2292 return -EINVAL;
2293 netdev->mtu = new_mtu;
2294 return 0;
2295}
2296
2297static int e100_asf(struct nic *nic)
2298{
2299 /* ASF can be enabled from eeprom */
807540ba 2300 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
1da177e4
LT
2301 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2302 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
807540ba 2303 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
1da177e4
LT
2304}
2305
2306static int e100_up(struct nic *nic)
2307{
2308 int err;
2309
f26251eb 2310 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2311 return err;
f26251eb 2312 if ((err = e100_alloc_cbs(nic)))
1da177e4 2313 goto err_rx_clean_list;
f26251eb 2314 if ((err = e100_hw_init(nic)))
1da177e4
LT
2315 goto err_clean_cbs;
2316 e100_set_multicast_list(nic->netdev);
ca93ca42 2317 e100_start_receiver(nic, NULL);
1da177e4 2318 mod_timer(&nic->watchdog, jiffies);
f26251eb 2319 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2320 nic->netdev->name, nic->netdev)))
2321 goto err_no_irq;
1da177e4 2322 netif_wake_queue(nic->netdev);
bea3348e 2323 napi_enable(&nic->napi);
0236ebb7
MC
2324 /* enable ints _after_ enabling poll, preventing a race between
2325 * disable ints+schedule */
2326 e100_enable_irq(nic);
1da177e4
LT
2327 return 0;
2328
2329err_no_irq:
2330 del_timer_sync(&nic->watchdog);
2331err_clean_cbs:
2332 e100_clean_cbs(nic);
2333err_rx_clean_list:
2334 e100_rx_clean_list(nic);
2335 return err;
2336}
2337
2338static void e100_down(struct nic *nic)
2339{
0236ebb7 2340 /* wait here for poll to complete */
bea3348e 2341 napi_disable(&nic->napi);
0236ebb7 2342 netif_stop_queue(nic->netdev);
1da177e4
LT
2343 e100_hw_reset(nic);
2344 free_irq(nic->pdev->irq, nic->netdev);
2345 del_timer_sync(&nic->watchdog);
2346 netif_carrier_off(nic->netdev);
1da177e4
LT
2347 e100_clean_cbs(nic);
2348 e100_rx_clean_list(nic);
2349}
2350
2351static void e100_tx_timeout(struct net_device *netdev)
2352{
2353 struct nic *nic = netdev_priv(netdev);
2354
05479938 2355 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2356 * in interrupt context */
2357 schedule_work(&nic->tx_timeout_task);
2358}
2359
c4028958 2360static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2361{
c4028958
DH
2362 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2363 struct net_device *netdev = nic->netdev;
2acdb1e0 2364
fa05e1ad
JP
2365 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2366 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
401da6ae
AC
2367
2368 rtnl_lock();
2369 if (netif_running(netdev)) {
2370 e100_down(netdev_priv(netdev));
2371 e100_up(netdev_priv(netdev));
2372 }
2373 rtnl_unlock();
1da177e4
LT
2374}
2375
2376static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2377{
2378 int err;
2379 struct sk_buff *skb;
2380
2381 /* Use driver resources to perform internal MAC or PHY
2382 * loopback test. A single packet is prepared and transmitted
2383 * in loopback mode, and the test passes if the received
2384 * packet compares byte-for-byte to the transmitted packet. */
2385
f26251eb 2386 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2387 return err;
f26251eb 2388 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2389 goto err_clean_rx;
2390
2391 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2392 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2393 loopback_mode = lb_mac;
2394
2395 nic->loopback = loopback_mode;
f26251eb 2396 if ((err = e100_hw_init(nic)))
1da177e4
LT
2397 goto err_loopback_none;
2398
f26251eb 2399 if (loopback_mode == lb_phy)
1da177e4
LT
2400 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2401 BMCR_LOOPBACK);
2402
ca93ca42 2403 e100_start_receiver(nic, NULL);
1da177e4 2404
f26251eb 2405 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2406 err = -ENOMEM;
2407 goto err_loopback_none;
2408 }
2409 skb_put(skb, ETH_DATA_LEN);
2410 memset(skb->data, 0xFF, ETH_DATA_LEN);
2411 e100_xmit_frame(skb, nic->netdev);
2412
2413 msleep(10);
2414
aa49cdd9 2415 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2416 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2417
f26251eb 2418 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2419 skb->data, ETH_DATA_LEN))
2420 err = -EAGAIN;
2421
2422err_loopback_none:
2423 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2424 nic->loopback = lb_none;
1da177e4 2425 e100_clean_cbs(nic);
aa49cdd9 2426 e100_hw_reset(nic);
1da177e4
LT
2427err_clean_rx:
2428 e100_rx_clean_list(nic);
2429 return err;
2430}
2431
2432#define MII_LED_CONTROL 0x1B
b55de80e
BA
2433#define E100_82552_LED_OVERRIDE 0x19
2434#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2435#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
1da177e4
LT
2436
2437static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2438{
2439 struct nic *nic = netdev_priv(netdev);
2440 return mii_ethtool_gset(&nic->mii, cmd);
2441}
2442
2443static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2444{
2445 struct nic *nic = netdev_priv(netdev);
2446 int err;
2447
2448 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2449 err = mii_ethtool_sset(&nic->mii, cmd);
2450 e100_exec_cb(nic, NULL, e100_configure);
2451
2452 return err;
2453}
2454
2455static void e100_get_drvinfo(struct net_device *netdev,
2456 struct ethtool_drvinfo *info)
2457{
2458 struct nic *nic = netdev_priv(netdev);
23020ab3
RJ
2459 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2460 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
23020ab3
RJ
2461 strlcpy(info->bus_info, pci_name(nic->pdev),
2462 sizeof(info->bus_info));
1da177e4
LT
2463}
2464
abf9b902 2465#define E100_PHY_REGS 0x1C
1da177e4
LT
2466static int e100_get_regs_len(struct net_device *netdev)
2467{
2468 struct nic *nic = netdev_priv(netdev);
abf9b902 2469 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2470}
2471
2472static void e100_get_regs(struct net_device *netdev,
2473 struct ethtool_regs *regs, void *p)
2474{
2475 struct nic *nic = netdev_priv(netdev);
2476 u32 *buff = p;
2477 int i;
2478
44c10138 2479 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2480 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2481 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2482 ioread16(&nic->csr->scb.status);
f26251eb 2483 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2484 buff[1 + E100_PHY_REGS - i] =
2485 mdio_read(netdev, nic->mii.phy_id, i);
2486 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2487 e100_exec_cb(nic, NULL, e100_dump);
2488 msleep(10);
2489 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2490 sizeof(nic->mem->dump_buf));
2491}
2492
2493static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2494{
2495 struct nic *nic = netdev_priv(netdev);
2496 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2497 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2498}
2499
2500static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2501{
2502 struct nic *nic = netdev_priv(netdev);
2503
bc79fc84
RW
2504 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2505 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2506 return -EOPNOTSUPP;
2507
f26251eb 2508 if (wol->wolopts)
1da177e4
LT
2509 nic->flags |= wol_magic;
2510 else
2511 nic->flags &= ~wol_magic;
2512
bc79fc84
RW
2513 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2514
1da177e4
LT
2515 e100_exec_cb(nic, NULL, e100_configure);
2516
2517 return 0;
2518}
2519
2520static u32 e100_get_msglevel(struct net_device *netdev)
2521{
2522 struct nic *nic = netdev_priv(netdev);
2523 return nic->msg_enable;
2524}
2525
2526static void e100_set_msglevel(struct net_device *netdev, u32 value)
2527{
2528 struct nic *nic = netdev_priv(netdev);
2529 nic->msg_enable = value;
2530}
2531
2532static int e100_nway_reset(struct net_device *netdev)
2533{
2534 struct nic *nic = netdev_priv(netdev);
2535 return mii_nway_restart(&nic->mii);
2536}
2537
2538static u32 e100_get_link(struct net_device *netdev)
2539{
2540 struct nic *nic = netdev_priv(netdev);
2541 return mii_link_ok(&nic->mii);
2542}
2543
2544static int e100_get_eeprom_len(struct net_device *netdev)
2545{
2546 struct nic *nic = netdev_priv(netdev);
2547 return nic->eeprom_wc << 1;
2548}
2549
2550#define E100_EEPROM_MAGIC 0x1234
2551static int e100_get_eeprom(struct net_device *netdev,
2552 struct ethtool_eeprom *eeprom, u8 *bytes)
2553{
2554 struct nic *nic = netdev_priv(netdev);
2555
2556 eeprom->magic = E100_EEPROM_MAGIC;
2557 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2558
2559 return 0;
2560}
2561
2562static int e100_set_eeprom(struct net_device *netdev,
2563 struct ethtool_eeprom *eeprom, u8 *bytes)
2564{
2565 struct nic *nic = netdev_priv(netdev);
2566
f26251eb 2567 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2568 return -EINVAL;
2569
2570 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2571
2572 return e100_eeprom_save(nic, eeprom->offset >> 1,
2573 (eeprom->len >> 1) + 1);
2574}
2575
2576static void e100_get_ringparam(struct net_device *netdev,
2577 struct ethtool_ringparam *ring)
2578{
2579 struct nic *nic = netdev_priv(netdev);
2580 struct param_range *rfds = &nic->params.rfds;
2581 struct param_range *cbs = &nic->params.cbs;
2582
2583 ring->rx_max_pending = rfds->max;
2584 ring->tx_max_pending = cbs->max;
1da177e4
LT
2585 ring->rx_pending = rfds->count;
2586 ring->tx_pending = cbs->count;
1da177e4
LT
2587}
2588
2589static int e100_set_ringparam(struct net_device *netdev,
2590 struct ethtool_ringparam *ring)
2591{
2592 struct nic *nic = netdev_priv(netdev);
2593 struct param_range *rfds = &nic->params.rfds;
2594 struct param_range *cbs = &nic->params.cbs;
2595
05479938 2596 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2597 return -EINVAL;
2598
f26251eb 2599 if (netif_running(netdev))
1da177e4
LT
2600 e100_down(nic);
2601 rfds->count = max(ring->rx_pending, rfds->min);
2602 rfds->count = min(rfds->count, rfds->max);
2603 cbs->count = max(ring->tx_pending, cbs->min);
2604 cbs->count = min(cbs->count, cbs->max);
fa05e1ad
JP
2605 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2606 rfds->count, cbs->count);
f26251eb 2607 if (netif_running(netdev))
1da177e4
LT
2608 e100_up(nic);
2609
2610 return 0;
2611}
2612
2613static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2614 "Link test (on/offline)",
2615 "Eeprom test (on/offline)",
2616 "Self test (offline)",
2617 "Mac loopback (offline)",
2618 "Phy loopback (offline)",
2619};
4c3616cd 2620#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2621
1da177e4
LT
2622static void e100_diag_test(struct net_device *netdev,
2623 struct ethtool_test *test, u64 *data)
2624{
2625 struct ethtool_cmd cmd;
2626 struct nic *nic = netdev_priv(netdev);
2627 int i, err;
2628
2629 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2630 data[0] = !mii_link_ok(&nic->mii);
2631 data[1] = e100_eeprom_load(nic);
f26251eb 2632 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2633
2634 /* save speed, duplex & autoneg settings */
2635 err = mii_ethtool_gset(&nic->mii, &cmd);
2636
f26251eb 2637 if (netif_running(netdev))
1da177e4
LT
2638 e100_down(nic);
2639 data[2] = e100_self_test(nic);
2640 data[3] = e100_loopback_test(nic, lb_mac);
2641 data[4] = e100_loopback_test(nic, lb_phy);
2642
2643 /* restore speed, duplex & autoneg settings */
2644 err = mii_ethtool_sset(&nic->mii, &cmd);
2645
f26251eb 2646 if (netif_running(netdev))
1da177e4
LT
2647 e100_up(nic);
2648 }
f26251eb 2649 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2650 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2651
2652 msleep_interruptible(4 * 1000);
1da177e4
LT
2653}
2654
a70b86ae
JK
2655static int e100_set_phys_id(struct net_device *netdev,
2656 enum ethtool_phys_id_state state)
1da177e4
LT
2657{
2658 struct nic *nic = netdev_priv(netdev);
a70b86ae
JK
2659 enum led_state {
2660 led_on = 0x01,
2661 led_off = 0x04,
2662 led_on_559 = 0x05,
2663 led_on_557 = 0x07,
2664 };
b55de80e 2665 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
a70b86ae
JK
2666 MII_LED_CONTROL;
2667 u16 leds = 0;
2668
2669 switch (state) {
2670 case ETHTOOL_ID_ACTIVE:
2671 return 2;
1da177e4 2672
a70b86ae
JK
2673 case ETHTOOL_ID_ON:
2674 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2675 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2676 break;
2677
2678 case ETHTOOL_ID_OFF:
2679 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2680 break;
2681
2682 case ETHTOOL_ID_INACTIVE:
2683 break;
2684 }
1da177e4 2685
a70b86ae 2686 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
1da177e4
LT
2687 return 0;
2688}
2689
2690static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2691 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2692 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2693 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2694 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2695 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2696 "tx_heartbeat_errors", "tx_window_errors",
2697 /* device-specific stats */
2698 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2699 "tx_flow_control_pause", "rx_flow_control_pause",
2700 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
6f66342c 2701 "rx_short_frame_errors", "rx_over_length_errors",
1da177e4
LT
2702};
2703#define E100_NET_STATS_LEN 21
4c3616cd 2704#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2705
b9f2c044 2706static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2707{
b9f2c044
JG
2708 switch (sset) {
2709 case ETH_SS_TEST:
2710 return E100_TEST_LEN;
2711 case ETH_SS_STATS:
2712 return E100_STATS_LEN;
2713 default:
2714 return -EOPNOTSUPP;
2715 }
1da177e4
LT
2716}
2717
2718static void e100_get_ethtool_stats(struct net_device *netdev,
2719 struct ethtool_stats *stats, u64 *data)
2720{
2721 struct nic *nic = netdev_priv(netdev);
2722 int i;
2723
f26251eb 2724 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2725 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2726
2727 data[i++] = nic->tx_deferred;
2728 data[i++] = nic->tx_single_collisions;
2729 data[i++] = nic->tx_multiple_collisions;
2730 data[i++] = nic->tx_fc_pause;
2731 data[i++] = nic->rx_fc_pause;
2732 data[i++] = nic->rx_fc_unsupported;
2733 data[i++] = nic->tx_tco_frames;
2734 data[i++] = nic->rx_tco_frames;
6f66342c
BG
2735 data[i++] = nic->rx_short_frame_errors;
2736 data[i++] = nic->rx_over_length_errors;
1da177e4
LT
2737}
2738
2739static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2740{
f26251eb 2741 switch (stringset) {
1da177e4
LT
2742 case ETH_SS_TEST:
2743 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2744 break;
2745 case ETH_SS_STATS:
2746 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2747 break;
2748 }
2749}
2750
7282d491 2751static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2752 .get_settings = e100_get_settings,
2753 .set_settings = e100_set_settings,
2754 .get_drvinfo = e100_get_drvinfo,
2755 .get_regs_len = e100_get_regs_len,
2756 .get_regs = e100_get_regs,
2757 .get_wol = e100_get_wol,
2758 .set_wol = e100_set_wol,
2759 .get_msglevel = e100_get_msglevel,
2760 .set_msglevel = e100_set_msglevel,
2761 .nway_reset = e100_nway_reset,
2762 .get_link = e100_get_link,
2763 .get_eeprom_len = e100_get_eeprom_len,
2764 .get_eeprom = e100_get_eeprom,
2765 .set_eeprom = e100_set_eeprom,
2766 .get_ringparam = e100_get_ringparam,
2767 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2768 .self_test = e100_diag_test,
2769 .get_strings = e100_get_strings,
a70b86ae 2770 .set_phys_id = e100_set_phys_id,
1da177e4 2771 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2772 .get_sset_count = e100_get_sset_count,
abe0c5d1 2773 .get_ts_info = ethtool_op_get_ts_info,
1da177e4
LT
2774};
2775
2776static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2777{
2778 struct nic *nic = netdev_priv(netdev);
2779
2780 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2781}
2782
2783static int e100_alloc(struct nic *nic)
2784{
2785 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2786 &nic->dma_addr);
2787 return nic->mem ? 0 : -ENOMEM;
2788}
2789
2790static void e100_free(struct nic *nic)
2791{
f26251eb 2792 if (nic->mem) {
1da177e4
LT
2793 pci_free_consistent(nic->pdev, sizeof(struct mem),
2794 nic->mem, nic->dma_addr);
2795 nic->mem = NULL;
2796 }
2797}
2798
2799static int e100_open(struct net_device *netdev)
2800{
2801 struct nic *nic = netdev_priv(netdev);
2802 int err = 0;
2803
2804 netif_carrier_off(netdev);
f26251eb 2805 if ((err = e100_up(nic)))
fa05e1ad 2806 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
1da177e4
LT
2807 return err;
2808}
2809
2810static int e100_close(struct net_device *netdev)
2811{
2812 e100_down(netdev_priv(netdev));
2813 return 0;
2814}
2815
719cdac5
BG
2816static int e100_set_features(struct net_device *netdev,
2817 netdev_features_t features)
2818{
2819 struct nic *nic = netdev_priv(netdev);
2820 netdev_features_t changed = features ^ netdev->features;
2821
0bf61e66 2822 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
719cdac5
BG
2823 return 0;
2824
2825 netdev->features = features;
2826 e100_exec_cb(nic, NULL, e100_configure);
2827 return 0;
2828}
2829
acc78426
SH
2830static const struct net_device_ops e100_netdev_ops = {
2831 .ndo_open = e100_open,
2832 .ndo_stop = e100_close,
00829823 2833 .ndo_start_xmit = e100_xmit_frame,
acc78426 2834 .ndo_validate_addr = eth_validate_addr,
afc4b13d 2835 .ndo_set_rx_mode = e100_set_multicast_list,
acc78426
SH
2836 .ndo_set_mac_address = e100_set_mac_address,
2837 .ndo_change_mtu = e100_change_mtu,
2838 .ndo_do_ioctl = e100_do_ioctl,
2839 .ndo_tx_timeout = e100_tx_timeout,
2840#ifdef CONFIG_NET_POLL_CONTROLLER
2841 .ndo_poll_controller = e100_netpoll,
2842#endif
719cdac5 2843 .ndo_set_features = e100_set_features,
acc78426
SH
2844};
2845
1dd06ae8 2846static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4
LT
2847{
2848 struct net_device *netdev;
2849 struct nic *nic;
2850 int err;
2851
41de8d4c 2852 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
1da177e4 2853 return -ENOMEM;
1da177e4 2854
719cdac5 2855 netdev->hw_features |= NETIF_F_RXFCS;
75f58a53 2856 netdev->priv_flags |= IFF_SUPP_NOFCS;
0bf61e66 2857 netdev->hw_features |= NETIF_F_RXALL;
719cdac5 2858
acc78426 2859 netdev->netdev_ops = &e100_netdev_ops;
7ad24ea4 2860 netdev->ethtool_ops = &e100_ethtool_ops;
1da177e4 2861 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2862 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2863
2864 nic = netdev_priv(netdev);
bea3348e 2865 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2866 nic->netdev = netdev;
2867 nic->pdev = pdev;
2868 nic->msg_enable = (1 << debug) - 1;
72001762 2869 nic->mdio_ctrl = mdio_ctrl_hw;
1da177e4
LT
2870 pci_set_drvdata(pdev, netdev);
2871
f26251eb 2872 if ((err = pci_enable_device(pdev))) {
fa05e1ad 2873 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
2874 goto err_out_free_dev;
2875 }
2876
f26251eb 2877 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
fa05e1ad 2878 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
1da177e4
LT
2879 err = -ENODEV;
2880 goto err_out_disable_pdev;
2881 }
2882
f26251eb 2883 if ((err = pci_request_regions(pdev, DRV_NAME))) {
fa05e1ad 2884 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
2885 goto err_out_disable_pdev;
2886 }
2887
284901a9 2888 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
fa05e1ad 2889 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
1da177e4
LT
2890 goto err_out_free_res;
2891 }
2892
1da177e4
LT
2893 SET_NETDEV_DEV(netdev, &pdev->dev);
2894
27345bb6 2895 if (use_io)
fa05e1ad 2896 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
27345bb6
JB
2897
2898 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2899 if (!nic->csr) {
fa05e1ad 2900 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
1da177e4
LT
2901 err = -ENOMEM;
2902 goto err_out_free_res;
2903 }
2904
f26251eb 2905 if (ent->driver_data)
1da177e4
LT
2906 nic->flags |= ich;
2907 else
2908 nic->flags &= ~ich;
2909
2910 e100_get_defaults(nic);
2911
243559f4
JB
2912 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2913 if (nic->mac < mac_82558_D101_A4)
2914 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2915
1f53367d 2916 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2917 spin_lock_init(&nic->cb_lock);
2918 spin_lock_init(&nic->cmd_lock);
ac7c6669 2919 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2920
2921 /* Reset the device before pci_set_master() in case device is in some
2922 * funky state and has an interrupt pending - hint: we don't have the
2923 * interrupt handler registered yet. */
2924 e100_hw_reset(nic);
2925
2926 pci_set_master(pdev);
2927
f16e9d86 2928 setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
1da177e4 2929
c4028958 2930 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2931
f26251eb 2932 if ((err = e100_alloc(nic))) {
fa05e1ad 2933 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
1da177e4
LT
2934 goto err_out_iounmap;
2935 }
2936
f26251eb 2937 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2938 goto err_out_free;
2939
f92d8728
MC
2940 e100_phy_init(nic);
2941
1da177e4 2942 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
aaeb6cdf 2943 if (!is_valid_ether_addr(netdev->dev_addr)) {
948cd43f 2944 if (!eeprom_bad_csum_allow) {
fa05e1ad 2945 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
948cd43f
JB
2946 err = -EAGAIN;
2947 goto err_out_free;
2948 } else {
fa05e1ad 2949 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
948cd43f 2950 }
1da177e4
LT
2951 }
2952
2953 /* Wol magic packet can be enabled from eeprom */
f26251eb 2954 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2955 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2956 nic->flags |= wol_magic;
bc79fc84
RW
2957 device_set_wakeup_enable(&pdev->dev, true);
2958 }
1da177e4 2959
6bdacb1a 2960 /* ack any pending wake events, disable PME */
e7272403 2961 pci_pme_active(pdev, false);
1da177e4
LT
2962
2963 strcpy(netdev->name, "eth%d");
f26251eb 2964 if ((err = register_netdev(netdev))) {
fa05e1ad 2965 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
1da177e4
LT
2966 goto err_out_free;
2967 }
98468efd
RO
2968 nic->cbs_pool = pci_pool_create(netdev->name,
2969 nic->pdev,
211a0d94 2970 nic->params.cbs.max * sizeof(struct cb),
98468efd
RO
2971 sizeof(u32),
2972 0);
9ad607b4
JJB
2973 if (!nic->cbs_pool) {
2974 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2975 err = -ENOMEM;
2976 goto err_out_pool;
2977 }
fa05e1ad
JP
2978 netif_info(nic, probe, nic->netdev,
2979 "addr 0x%llx, irq %d, MAC addr %pM\n",
2980 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2981 pdev->irq, netdev->dev_addr);
1da177e4
LT
2982
2983 return 0;
2984
9ad607b4
JJB
2985err_out_pool:
2986 unregister_netdev(netdev);
1da177e4
LT
2987err_out_free:
2988 e100_free(nic);
2989err_out_iounmap:
27345bb6 2990 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2991err_out_free_res:
2992 pci_release_regions(pdev);
2993err_out_disable_pdev:
2994 pci_disable_device(pdev);
2995err_out_free_dev:
1da177e4
LT
2996 free_netdev(netdev);
2997 return err;
2998}
2999
9f9a12f8 3000static void e100_remove(struct pci_dev *pdev)
1da177e4
LT
3001{
3002 struct net_device *netdev = pci_get_drvdata(pdev);
3003
f26251eb 3004 if (netdev) {
1da177e4
LT
3005 struct nic *nic = netdev_priv(netdev);
3006 unregister_netdev(netdev);
3007 e100_free(nic);
915e91d7 3008 pci_iounmap(pdev, nic->csr);
98468efd 3009 pci_pool_destroy(nic->cbs_pool);
1da177e4
LT
3010 free_netdev(netdev);
3011 pci_release_regions(pdev);
3012 pci_disable_device(pdev);
1da177e4
LT
3013 }
3014}
3015
b55de80e
BA
3016#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
3017#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3018#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
ac7c992c 3019static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
3020{
3021 struct net_device *netdev = pci_get_drvdata(pdev);
3022 struct nic *nic = netdev_priv(netdev);
3023
824545e7 3024 if (netif_running(netdev))
f902283b 3025 e100_down(nic);
518d8338 3026 netif_device_detach(netdev);
a53a33da 3027
1da177e4 3028 pci_save_state(pdev);
e8e82b76
AK
3029
3030 if ((nic->flags & wol_magic) | e100_asf(nic)) {
b55de80e
BA
3031 /* enable reverse auto-negotiation */
3032 if (nic->phy == phy_82552_v) {
3033 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3034 E100_82552_SMARTSPEED);
3035
3036 mdio_write(netdev, nic->mii.phy_id,
3037 E100_82552_SMARTSPEED, smartspeed |
3038 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3039 }
ac7c992c 3040 *enable_wake = true;
e8e82b76 3041 } else {
ac7c992c 3042 *enable_wake = false;
e8e82b76 3043 }
975b366a 3044
2b6e0ca1 3045 pci_clear_master(pdev);
ac7c992c 3046}
1da177e4 3047
ac7c992c
TLSC
3048static int __e100_power_off(struct pci_dev *pdev, bool wake)
3049{
6905b1f1 3050 if (wake)
ac7c992c 3051 return pci_prepare_to_sleep(pdev);
6905b1f1
RW
3052
3053 pci_wake_from_d3(pdev, false);
3054 pci_set_power_state(pdev, PCI_D3hot);
3055
3056 return 0;
1da177e4
LT
3057}
3058
f902283b 3059#ifdef CONFIG_PM
ac7c992c
TLSC
3060static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3061{
3062 bool wake;
3063 __e100_shutdown(pdev, &wake);
3064 return __e100_power_off(pdev, wake);
3065}
3066
1da177e4
LT
3067static int e100_resume(struct pci_dev *pdev)
3068{
3069 struct net_device *netdev = pci_get_drvdata(pdev);
3070 struct nic *nic = netdev_priv(netdev);
3071
975b366a 3072 pci_set_power_state(pdev, PCI_D0);
1da177e4 3073 pci_restore_state(pdev);
6bdacb1a 3074 /* ack any pending wake events, disable PME */
1ca01512 3075 pci_enable_wake(pdev, PCI_D0, 0);
1da177e4 3076
4b512d26 3077 /* disable reverse auto-negotiation */
b55de80e
BA
3078 if (nic->phy == phy_82552_v) {
3079 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3080 E100_82552_SMARTSPEED);
3081
3082 mdio_write(netdev, nic->mii.phy_id,
3083 E100_82552_SMARTSPEED,
3084 smartspeed & ~(E100_82552_REV_ANEG));
3085 }
3086
1da177e4 3087 netif_device_attach(netdev);
975b366a 3088 if (netif_running(netdev))
1da177e4
LT
3089 e100_up(nic);
3090
3091 return 0;
3092}
975b366a 3093#endif /* CONFIG_PM */
1da177e4 3094
d18c3db5 3095static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 3096{
ac7c992c
TLSC
3097 bool wake;
3098 __e100_shutdown(pdev, &wake);
3099 if (system_state == SYSTEM_POWER_OFF)
3100 __e100_power_off(pdev, wake);
6bdacb1a
MC
3101}
3102
2cc30492
AK
3103/* ------------------ PCI Error Recovery infrastructure -------------- */
3104/**
3105 * e100_io_error_detected - called when PCI error is detected.
3106 * @pdev: Pointer to PCI device
0a0863af 3107 * @state: The current pci connection state
2cc30492
AK
3108 */
3109static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3110{
3111 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 3112 struct nic *nic = netdev_priv(netdev);
2cc30492 3113
2cc30492 3114 netif_device_detach(netdev);
ef681ce1
AD
3115
3116 if (state == pci_channel_io_perm_failure)
3117 return PCI_ERS_RESULT_DISCONNECT;
3118
3119 if (netif_running(netdev))
3120 e100_down(nic);
b1d26f24 3121 pci_disable_device(pdev);
2cc30492
AK
3122
3123 /* Request a slot reset. */
3124 return PCI_ERS_RESULT_NEED_RESET;
3125}
3126
3127/**
3128 * e100_io_slot_reset - called after the pci bus has been reset.
3129 * @pdev: Pointer to PCI device
3130 *
3131 * Restart the card from scratch.
3132 */
3133static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3134{
3135 struct net_device *netdev = pci_get_drvdata(pdev);
3136 struct nic *nic = netdev_priv(netdev);
3137
3138 if (pci_enable_device(pdev)) {
fa05e1ad 3139 pr_err("Cannot re-enable PCI device after reset\n");
2cc30492
AK
3140 return PCI_ERS_RESULT_DISCONNECT;
3141 }
3142 pci_set_master(pdev);
3143
3144 /* Only one device per card can do a reset */
3145 if (0 != PCI_FUNC(pdev->devfn))
3146 return PCI_ERS_RESULT_RECOVERED;
3147 e100_hw_reset(nic);
3148 e100_phy_init(nic);
3149
3150 return PCI_ERS_RESULT_RECOVERED;
3151}
3152
3153/**
3154 * e100_io_resume - resume normal operations
3155 * @pdev: Pointer to PCI device
3156 *
3157 * Resume normal operations after an error recovery
3158 * sequence has been completed.
3159 */
3160static void e100_io_resume(struct pci_dev *pdev)
3161{
3162 struct net_device *netdev = pci_get_drvdata(pdev);
3163 struct nic *nic = netdev_priv(netdev);
3164
3165 /* ack any pending wake events, disable PME */
1ca01512 3166 pci_enable_wake(pdev, PCI_D0, 0);
2cc30492
AK
3167
3168 netif_device_attach(netdev);
3169 if (netif_running(netdev)) {
3170 e100_open(netdev);
3171 mod_timer(&nic->watchdog, jiffies);
3172 }
3173}
3174
3646f0e5 3175static const struct pci_error_handlers e100_err_handler = {
2cc30492
AK
3176 .error_detected = e100_io_error_detected,
3177 .slot_reset = e100_io_slot_reset,
3178 .resume = e100_io_resume,
3179};
6bdacb1a 3180
1da177e4
LT
3181static struct pci_driver e100_driver = {
3182 .name = DRV_NAME,
3183 .id_table = e100_id_table,
3184 .probe = e100_probe,
9f9a12f8 3185 .remove = e100_remove,
e8e82b76 3186#ifdef CONFIG_PM
975b366a 3187 /* Power Management hooks */
1da177e4
LT
3188 .suspend = e100_suspend,
3189 .resume = e100_resume,
3190#endif
05479938 3191 .shutdown = e100_shutdown,
2cc30492 3192 .err_handler = &e100_err_handler,
1da177e4
LT
3193};
3194
3195static int __init e100_init_module(void)
3196{
f26251eb 3197 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
fa05e1ad
JP
3198 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3199 pr_info("%s\n", DRV_COPYRIGHT);
1da177e4 3200 }
29917620 3201 return pci_register_driver(&e100_driver);
1da177e4
LT
3202}
3203
3204static void __exit e100_cleanup_module(void)
3205{
3206 pci_unregister_driver(&e100_driver);
3207}
3208
3209module_init(e100_init_module);
3210module_exit(e100_cleanup_module);
This page took 1.371915 seconds and 5 git commands to generate.