ixgbe: Support RX-ALL feature flag.
[deliverable/linux.git] / drivers / net / ethernet / intel / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
72001762
AM
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
1da177e4
LT
148 */
149
fa05e1ad
JP
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
a6b7a407
AD
152#include <linux/hardirq.h>
153#include <linux/interrupt.h>
1da177e4
LT
154#include <linux/module.h>
155#include <linux/moduleparam.h>
156#include <linux/kernel.h>
157#include <linux/types.h>
d43c36dc 158#include <linux/sched.h>
1da177e4
LT
159#include <linux/slab.h>
160#include <linux/delay.h>
161#include <linux/init.h>
162#include <linux/pci.h>
1e7f0bd8 163#include <linux/dma-mapping.h>
98468efd 164#include <linux/dmapool.h>
1da177e4
LT
165#include <linux/netdevice.h>
166#include <linux/etherdevice.h>
167#include <linux/mii.h>
168#include <linux/if_vlan.h>
169#include <linux/skbuff.h>
170#include <linux/ethtool.h>
171#include <linux/string.h>
9ac32e1b 172#include <linux/firmware.h>
401da6ae 173#include <linux/rtnetlink.h>
1da177e4
LT
174#include <asm/unaligned.h>
175
176
177#define DRV_NAME "e100"
4e1dc97d 178#define DRV_EXT "-NAPI"
b55de80e 179#define DRV_VERSION "3.5.24-k2"DRV_EXT
1da177e4 180#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 181#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
182
183#define E100_WATCHDOG_PERIOD (2 * HZ)
184#define E100_NAPI_WEIGHT 16
185
9ac32e1b
JSR
186#define FIRMWARE_D101M "e100/d101m_ucode.bin"
187#define FIRMWARE_D101S "e100/d101s_ucode.bin"
188#define FIRMWARE_D102E "e100/d102e_ucode.bin"
189
1da177e4
LT
190MODULE_DESCRIPTION(DRV_DESCRIPTION);
191MODULE_AUTHOR(DRV_COPYRIGHT);
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
194MODULE_FIRMWARE(FIRMWARE_D101M);
195MODULE_FIRMWARE(FIRMWARE_D101S);
196MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
197
198static int debug = 3;
8fb6f732 199static int eeprom_bad_csum_allow = 0;
27345bb6 200static int use_io = 0;
1da177e4 201module_param(debug, int, 0);
8fb6f732 202module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 203module_param(use_io, int, 0);
1da177e4 204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
a3aa1884 211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
1da177e4
LT
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
b55de80e 247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
1da177e4
LT
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
b55de80e 283 phy_82552_v = 0xd061004d,
1da177e4
LT
284 phy_unknown = 0xFFFFFFFF,
285};
286
287/* CSR (Control/Status Registers) */
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
7734f6e6 305 rus_no_res = 0x08,
1da177e4
LT
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
ca93ca42
JG
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
1da177e4
LT
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
05479938 350
1da177e4
LT
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
72001762 379 eeprom_phy_iface = 0x06,
1da177e4
LT
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
72001762
AM
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
1da177e4
LT
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
75f58a53
BG
415/**
416 * cb_command - Command Block flags
417 * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory
418 */
1da177e4
LT
419enum cb_command {
420 cb_nop = 0x0000,
421 cb_iaaddr = 0x0001,
422 cb_config = 0x0002,
423 cb_multi = 0x0003,
424 cb_tx = 0x0004,
425 cb_ucode = 0x0005,
426 cb_dump = 0x0006,
427 cb_tx_sf = 0x0008,
75f58a53 428 cb_tx_nc = 0x0010,
1da177e4
LT
429 cb_cid = 0x1f00,
430 cb_i = 0x2000,
431 cb_s = 0x4000,
432 cb_el = 0x8000,
433};
434
435struct rfd {
aaf918ba
AV
436 __le16 status;
437 __le16 command;
438 __le32 link;
439 __le32 rbd;
440 __le16 actual_size;
441 __le16 size;
1da177e4
LT
442};
443
444struct rx {
445 struct rx *next, *prev;
446 struct sk_buff *skb;
447 dma_addr_t dma_addr;
448};
449
450#if defined(__BIG_ENDIAN_BITFIELD)
451#define X(a,b) b,a
452#else
453#define X(a,b) a,b
454#endif
455struct config {
456/*0*/ u8 X(byte_count:6, pad0:2);
457/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458/*2*/ u8 adaptive_ifs;
459/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
462/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
0bf61e66 465 rx_save_overruns : 1), rx_save_bad_frames : 1);
1da177e4
LT
466/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468 tx_dynamic_tbd:1);
469/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473 loopback:2);
474/*11*/ u8 X(linear_priority:3, pad11:5);
475/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476/*13*/ u8 ip_addr_lo;
477/*14*/ u8 ip_addr_hi;
478/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481/*16*/ u8 fc_delay_lo;
482/*17*/ u8 fc_delay_hi;
483/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
491 u8 pad_d102[9];
492};
493
494#define E100_MAX_MULTICAST_ADDRS 64
495struct multi {
aaf918ba 496 __le16 count;
1da177e4
LT
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
498};
499
500/* Important: keep total struct u32-aligned */
501#define UCODE_SIZE 134
502struct cb {
aaf918ba
AV
503 __le16 status;
504 __le16 command;
505 __le32 link;
1da177e4
LT
506 union {
507 u8 iaaddr[ETH_ALEN];
aaf918ba 508 __le32 ucode[UCODE_SIZE];
1da177e4
LT
509 struct config config;
510 struct multi multi;
511 struct {
512 u32 tbd_array;
513 u16 tcb_byte_count;
514 u8 threshold;
515 u8 tbd_count;
516 struct {
aaf918ba
AV
517 __le32 buf_addr;
518 __le16 size;
1da177e4
LT
519 u16 eol;
520 } tbd;
521 } tcb;
aaf918ba 522 __le32 dump_buffer_addr;
1da177e4
LT
523 } u;
524 struct cb *next, *prev;
525 dma_addr_t dma_addr;
526 struct sk_buff *skb;
527};
528
529enum loopback {
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
531};
532
533struct stats {
aaf918ba 534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
aaf918ba 537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
aaf918ba
AV
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
542 __le32 complete;
1da177e4
LT
543};
544
545struct mem {
546 struct {
547 u32 signature;
548 u32 result;
549 } selftest;
550 struct stats stats;
551 u8 dump_buf[596];
552};
553
554struct param_range {
555 u32 min;
556 u32 max;
557 u32 count;
558};
559
560struct params {
561 struct param_range rfds;
562 struct param_range cbs;
563};
564
565struct nic {
566 /* Begin: frequently used values: keep adjacent for cache effect */
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
72001762 570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
1da177e4
LT
571
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
ca93ca42 576 enum ru_state ru_running;
1da177e4
LT
577
578 spinlock_t cb_lock ____cacheline_aligned;
579 spinlock_t cmd_lock;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
bea3348e 583 struct napi_struct napi;
1da177e4
LT
584 struct cb *cbs;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
aaf918ba 588 __le16 tx_command;
1da177e4
LT
589 /* End: frequently used values: keep adjacent for cache effect */
590
591 enum {
592 ich = (1 << 0),
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
598
599 enum mac mac;
600 enum phy phy;
601 struct params params;
1da177e4 602 struct timer_list watchdog;
1da177e4 603 struct mii_if_info mii;
2acdb1e0 604 struct work_struct tx_timeout_task;
1da177e4
LT
605 enum loopback loopback;
606
607 struct mem *mem;
608 dma_addr_t dma_addr;
609
98468efd 610 struct pci_pool *cbs_pool;
1da177e4
LT
611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs;
613 u8 tx_threshold;
614 u32 tx_frames;
615 u32 tx_collisions;
616 u32 tx_deferred;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
619 u32 tx_fc_pause;
620 u32 tx_tco_frames;
621
622 u32 rx_fc_pause;
623 u32 rx_fc_unsupported;
624 u32 rx_tco_frames;
d24d65ed 625 u32 rx_short_frame_errors;
1da177e4
LT
626 u32 rx_over_length_errors;
627
1da177e4 628 u16 eeprom_wc;
aaf918ba 629 __le16 eeprom[256];
ac7c6669 630 spinlock_t mdio_lock;
7e15b0c9 631 const struct firmware *fw;
1da177e4
LT
632};
633
634static inline void e100_write_flush(struct nic *nic)
635{
636 /* Flush previous PCI writes through intermediate bridges
637 * by doing a benign read */
27345bb6 638 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
639}
640
858119e1 641static void e100_enable_irq(struct nic *nic)
1da177e4
LT
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 647 e100_write_flush(nic);
ad8c48ad 648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
649}
650
858119e1 651static void e100_disable_irq(struct nic *nic)
1da177e4
LT
652{
653 unsigned long flags;
654
655 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 657 e100_write_flush(nic);
ad8c48ad 658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
659}
660
661static void e100_hw_reset(struct nic *nic)
662{
663 /* Put CU and RU into idle with a selective reset to get
664 * device off of PCI bus */
27345bb6 665 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
666 e100_write_flush(nic); udelay(20);
667
668 /* Now fully reset device */
27345bb6 669 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
670 e100_write_flush(nic); udelay(20);
671
672 /* Mask off our interrupt line - it's unmasked after reset */
673 e100_disable_irq(nic);
674}
675
676static int e100_self_test(struct nic *nic)
677{
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679
680 /* Passing the self-test is a pretty good indication
681 * that the device can DMA to/from host memory */
682
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
685
27345bb6 686 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
687 e100_write_flush(nic);
688 /* Wait 10 msec for self-test to complete */
689 msleep(10);
690
691 /* Interrupts are enabled after self-test */
692 e100_disable_irq(nic);
693
694 /* Check results of self-test */
f26251eb 695 if (nic->mem->selftest.result != 0) {
fa05e1ad
JP
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
1da177e4
LT
699 return -ETIMEDOUT;
700 }
f26251eb 701 if (nic->mem->selftest.signature == 0) {
fa05e1ad 702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
1da177e4
LT
703 return -ETIMEDOUT;
704 }
705
706 return 0;
707}
708
aaf918ba 709static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
710{
711 u32 cmd_addr_data[3];
712 u8 ctrl;
713 int i, j;
714
715 /* Three cmds: write/erase enable, write data, write/erase disable */
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 718 le16_to_cpu(data);
1da177e4
LT
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
720
721 /* Bit-bang cmds to write word to eeprom */
f26251eb 722 for (j = 0; j < 3; j++) {
1da177e4
LT
723
724 /* Chip select */
27345bb6 725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
726 e100_write_flush(nic); udelay(4);
727
f26251eb 728 for (i = 31; i >= 0; i--) {
1da177e4
LT
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
730 eecs | eedi : eecs;
27345bb6 731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
732 e100_write_flush(nic); udelay(4);
733
27345bb6 734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
735 e100_write_flush(nic); udelay(4);
736 }
737 /* Wait 10 msec for cmd to complete */
738 msleep(10);
739
740 /* Chip deselect */
27345bb6 741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
742 e100_write_flush(nic); udelay(4);
743 }
744};
745
746/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 747static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
748{
749 u32 cmd_addr_data;
750 u16 data = 0;
751 u8 ctrl;
752 int i;
753
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755
756 /* Chip select */
27345bb6 757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
758 e100_write_flush(nic); udelay(4);
759
760 /* Bit-bang to read word from eeprom */
f26251eb 761 for (i = 31; i >= 0; i--) {
1da177e4 762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 764 e100_write_flush(nic); udelay(4);
05479938 765
27345bb6 766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 767 e100_write_flush(nic); udelay(4);
05479938 768
1da177e4
LT
769 /* Eeprom drives a dummy zero to EEDO after receiving
770 * complete address. Use this to adjust addr_len. */
27345bb6 771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 772 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
773 *addr_len -= (i - 16);
774 i = 17;
775 }
05479938 776
1da177e4
LT
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
778 }
779
780 /* Chip deselect */
27345bb6 781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
782 e100_write_flush(nic); udelay(4);
783
aaf918ba 784 return cpu_to_le16(data);
1da177e4
LT
785};
786
787/* Load entire EEPROM image into driver cache and validate checksum */
788static int e100_eeprom_load(struct nic *nic)
789{
790 u16 addr, addr_len = 8, checksum = 0;
791
792 /* Try reading with an 8-bit addr len to discover actual addr len */
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
795
f26251eb 796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 798 if (addr < nic->eeprom_wc - 1)
aaf918ba 799 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
800 }
801
802 /* The checksum, stored in the last word, is calculated such that
803 * the sum of words should be 0xBABA */
aaf918ba 804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
fa05e1ad 805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
8fb6f732
DM
806 if (!eeprom_bad_csum_allow)
807 return -EAGAIN;
1da177e4
LT
808 }
809
810 return 0;
811}
812
813/* Save (portion of) driver EEPROM cache to device and update checksum */
814static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815{
816 u16 addr, addr_len = 8, checksum = 0;
817
818 /* Try reading with an 8-bit addr len to discover actual addr len */
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
821
f26251eb 822 if (start + count >= nic->eeprom_wc)
1da177e4
LT
823 return -EINVAL;
824
f26251eb 825 for (addr = start; addr < start + count; addr++)
1da177e4
LT
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827
828 /* The checksum, stored in the last word, is calculated such that
829 * the sum of words should be 0xBABA */
f26251eb 830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
835
836 return 0;
837}
838
962082b6 839#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 840#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 841static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
842{
843 unsigned long flags;
844 unsigned int i;
845 int err = 0;
846
847 spin_lock_irqsave(&nic->cmd_lock, flags);
848
849 /* Previous command is accepted when SCB clears */
f26251eb
BA
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
852 break;
853 cpu_relax();
f26251eb 854 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
855 udelay(5);
856 }
f26251eb 857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
858 err = -EAGAIN;
859 goto err_unlock;
860 }
861
f26251eb 862 if (unlikely(cmd != cuc_resume))
27345bb6
JB
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
865
866err_unlock:
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
868
869 return err;
870}
871
858119e1 872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
873 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{
875 struct cb *cb;
876 unsigned long flags;
877 int err = 0;
878
879 spin_lock_irqsave(&nic->cb_lock, flags);
880
f26251eb 881 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
882 err = -ENOMEM;
883 goto err_unlock;
884 }
885
886 cb = nic->cb_to_use;
887 nic->cb_to_use = cb->next;
888 nic->cbs_avail--;
889 cb->skb = skb;
890
f26251eb 891 if (unlikely(!nic->cbs_avail))
1da177e4
LT
892 err = -ENOSPC;
893
894 cb_prepare(nic, cb, skb);
895
896 /* Order is important otherwise we'll be in a race with h/w:
897 * set S-bit in current first, then clear S-bit in previous. */
898 cb->command |= cpu_to_le16(cb_s);
899 wmb();
900 cb->prev->command &= cpu_to_le16(~cb_s);
901
f26251eb
BA
902 while (nic->cb_to_send != nic->cb_to_use) {
903 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
904 nic->cb_to_send->dma_addr))) {
905 /* Ok, here's where things get sticky. It's
906 * possible that we can't schedule the command
907 * because the controller is too busy, so
908 * let's just queue the command and try again
909 * when another command is scheduled. */
f26251eb 910 if (err == -ENOSPC) {
962082b6
MC
911 //request a reset
912 schedule_work(&nic->tx_timeout_task);
913 }
1da177e4
LT
914 break;
915 } else {
916 nic->cuc_cmd = cuc_resume;
917 nic->cb_to_send = nic->cb_to_send->next;
918 }
919 }
920
921err_unlock:
922 spin_unlock_irqrestore(&nic->cb_lock, flags);
923
924 return err;
925}
926
72001762
AM
927static int mdio_read(struct net_device *netdev, int addr, int reg)
928{
929 struct nic *nic = netdev_priv(netdev);
930 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
931}
932
933static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
934{
935 struct nic *nic = netdev_priv(netdev);
936
937 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
938}
939
940/* the standard mdio_ctrl() function for usual MII-compliant hardware */
941static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
1da177e4
LT
942{
943 u32 data_out = 0;
944 unsigned int i;
ac7c6669 945 unsigned long flags;
1da177e4 946
ac7c6669
OM
947
948 /*
949 * Stratus87247: we shouldn't be writing the MDI control
950 * register until the Ready bit shows True. Also, since
951 * manipulation of the MDI control registers is a multi-step
952 * procedure it should be done under lock.
953 */
954 spin_lock_irqsave(&nic->mdio_lock, flags);
955 for (i = 100; i; --i) {
27345bb6 956 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
957 break;
958 udelay(20);
959 }
960 if (unlikely(!i)) {
fa05e1ad 961 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
ac7c6669
OM
962 spin_unlock_irqrestore(&nic->mdio_lock, flags);
963 return 0; /* No way to indicate timeout error */
964 }
27345bb6 965 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 966
ac7c6669 967 for (i = 0; i < 100; i++) {
1da177e4 968 udelay(20);
27345bb6 969 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
970 break;
971 }
ac7c6669 972 spin_unlock_irqrestore(&nic->mdio_lock, flags);
fa05e1ad
JP
973 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
974 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
975 dir == mdi_read ? "READ" : "WRITE",
976 addr, reg, data, data_out);
1da177e4
LT
977 return (u16)data_out;
978}
979
72001762
AM
980/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
981static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
982 u32 addr,
983 u32 dir,
984 u32 reg,
985 u16 data)
986{
987 if ((reg == MII_BMCR) && (dir == mdi_write)) {
988 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
989 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
990 MII_ADVERTISE);
991
992 /*
993 * Workaround Si issue where sometimes the part will not
994 * autoneg to 100Mbps even when advertised.
995 */
996 if (advert & ADVERTISE_100FULL)
997 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
998 else if (advert & ADVERTISE_100HALF)
999 data |= BMCR_SPEED100;
1000 }
1001 }
1002 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1da177e4
LT
1003}
1004
72001762
AM
1005/* Fully software-emulated mdio_ctrl() function for cards without
1006 * MII-compliant PHYs.
1007 * For now, this is mainly geared towards 80c24 support; in case of further
1008 * requirements for other types (i82503, ...?) either extend this mechanism
1009 * or split it, whichever is cleaner.
1010 */
1011static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1012 u32 addr,
1013 u32 dir,
1014 u32 reg,
1015 u16 data)
1016{
1017 /* might need to allocate a netdev_priv'ed register array eventually
1018 * to be able to record state changes, but for now
1019 * some fully hardcoded register handling ought to be ok I guess. */
1020
1021 if (dir == mdi_read) {
1022 switch (reg) {
1023 case MII_BMCR:
1024 /* Auto-negotiation, right? */
1025 return BMCR_ANENABLE |
1026 BMCR_FULLDPLX;
1027 case MII_BMSR:
1028 return BMSR_LSTATUS /* for mii_link_ok() */ |
1029 BMSR_ANEGCAPABLE |
1030 BMSR_10FULL;
1031 case MII_ADVERTISE:
1032 /* 80c24 is a "combo card" PHY, right? */
1033 return ADVERTISE_10HALF |
1034 ADVERTISE_10FULL;
1035 default:
fa05e1ad
JP
1036 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1037 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1038 dir == mdi_read ? "READ" : "WRITE",
1039 addr, reg, data);
72001762
AM
1040 return 0xFFFF;
1041 }
1042 } else {
1043 switch (reg) {
1044 default:
fa05e1ad
JP
1045 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1046 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1047 dir == mdi_read ? "READ" : "WRITE",
1048 addr, reg, data);
72001762
AM
1049 return 0xFFFF;
1050 }
b55de80e 1051 }
72001762
AM
1052}
1053static inline int e100_phy_supports_mii(struct nic *nic)
1054{
1055 /* for now, just check it by comparing whether we
1056 are using MII software emulation.
1057 */
1058 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1da177e4
LT
1059}
1060
1061static void e100_get_defaults(struct nic *nic)
1062{
2afecc04
JB
1063 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1064 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 1065
1da177e4 1066 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 1067 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 1068 if (nic->mac == mac_unknown)
1da177e4
LT
1069 nic->mac = mac_82557_D100_A;
1070
1071 nic->params.rfds = rfds;
1072 nic->params.cbs = cbs;
1073
1074 /* Quadwords to DMA into FIFO before starting frame transmit */
1075 nic->tx_threshold = 0xE0;
1076
0a0863af 1077 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
1078 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1079 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
1080
1081 /* Template for a freshly allocated RFD */
7734f6e6 1082 nic->blank_rfd.command = 0;
1172899a 1083 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
719cdac5 1084 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1da177e4
LT
1085
1086 /* MII setup */
1087 nic->mii.phy_id_mask = 0x1F;
1088 nic->mii.reg_num_mask = 0x1F;
1089 nic->mii.dev = nic->netdev;
1090 nic->mii.mdio_read = mdio_read;
1091 nic->mii.mdio_write = mdio_write;
1092}
1093
1094static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1095{
1096 struct config *config = &cb->u.config;
1097 u8 *c = (u8 *)config;
719cdac5 1098 struct net_device *netdev = nic->netdev;
1da177e4
LT
1099
1100 cb->command = cpu_to_le16(cb_config);
1101
1102 memset(config, 0, sizeof(struct config));
1103
1104 config->byte_count = 0x16; /* bytes in this struct */
1105 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1106 config->direct_rx_dma = 0x1; /* reserved */
1107 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1108 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1109 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1110 config->tx_underrun_retry = 0x3; /* # of underrun retries */
72001762
AM
1111 if (e100_phy_supports_mii(nic))
1112 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1da177e4
LT
1113 config->pad10 = 0x6;
1114 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1115 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1116 config->ifs = 0x6; /* x16 = inter frame spacing */
1117 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1118 config->pad15_1 = 0x1;
1119 config->pad15_2 = 0x1;
1120 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1121 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1122 config->tx_padding = 0x1; /* 1=pad short frames */
1123 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1124 config->pad18 = 0x1;
1125 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1126 config->pad20_1 = 0x1F;
1127 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1128 config->pad21_1 = 0x5;
1129
1130 config->adaptive_ifs = nic->adaptive_ifs;
1131 config->loopback = nic->loopback;
1132
f26251eb 1133 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1134 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1135
f26251eb 1136 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1137 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1138 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1139 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1140 }
1141
719cdac5
BG
1142 if (unlikely(netdev->features & NETIF_F_RXFCS))
1143 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1144
f26251eb 1145 if (nic->flags & multicast_all)
1da177e4
LT
1146 config->multicast_all = 0x1; /* 1=accept, 0=no */
1147
6bdacb1a 1148 /* disable WoL when up */
f26251eb 1149 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1150 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1151
f26251eb 1152 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1153 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1154 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1155 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1156 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1157 if (nic->mac >= mac_82559_D101M) {
1da177e4 1158 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1159 /* Enable TCO in extended config */
1160 if (nic->mac >= mac_82551_10) {
1161 config->byte_count = 0x20; /* extended bytes */
1162 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1163 }
1164 } else {
1da177e4 1165 config->standard_stat_counter = 0x0;
44e4925e 1166 }
1da177e4
LT
1167 }
1168
0bf61e66
BG
1169 if (netdev->features & NETIF_F_RXALL) {
1170 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1171 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1172 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1173 }
1174
fa05e1ad
JP
1175 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1176 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1177 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1179 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1180 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1182 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1183 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1da177e4
LT
1184}
1185
2afecc04
JB
1186/*************************************************************************
1187* CPUSaver parameters
1188*
1189* All CPUSaver parameters are 16-bit literals that are part of a
1190* "move immediate value" instruction. By changing the value of
1191* the literal in the instruction before the code is loaded, the
1192* driver can change the algorithm.
1193*
0779bf2d 1194* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1195* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1196* timer is reset each time a new packet is received. (see
1197* BUNDLEMAX below to set the limit on number of chained packets)
1198* The current default is 0x600 or 1536. Experiments show that
1199* the value should probably stay within the 0x200 - 0x1000.
1200*
05479938 1201* BUNDLEMAX -
2afecc04
JB
1202* This sets the maximum number of frames that will be bundled. In
1203* some situations, such as the TCP windowing algorithm, it may be
1204* better to limit the growth of the bundle size than let it go as
1205* high as it can, because that could cause too much added latency.
1206* The default is six, because this is the number of packets in the
1207* default TCP window size. A value of 1 would make CPUSaver indicate
1208* an interrupt for every frame received. If you do not want to put
1209* a limit on the bundle size, set this value to xFFFF.
1210*
05479938 1211* BUNDLESMALL -
2afecc04
JB
1212* This contains a bit-mask describing the minimum size frame that
1213* will be bundled. The default masks the lower 7 bits, which means
1214* that any frame less than 128 bytes in length will not be bundled,
1215* but will instead immediately generate an interrupt. This does
1216* not affect the current bundle in any way. Any frame that is 128
1217* bytes or large will be bundled normally. This feature is meant
1218* to provide immediate indication of ACK frames in a TCP environment.
1219* Customers were seeing poor performance when a machine with CPUSaver
1220* enabled was sending but not receiving. The delay introduced when
1221* the ACKs were received was enough to reduce total throughput, because
1222* the sender would sit idle until the ACK was finally seen.
1223*
1224* The current default is 0xFF80, which masks out the lower 7 bits.
1225* This means that any frame which is x7F (127) bytes or smaller
05479938 1226* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1227* bit mask, there are only a few valid values that can be used. To
1228* turn this feature off, the driver can write the value xFFFF to the
1229* lower word of this instruction (in the same way that the other
1230* parameters are used). Likewise, a value of 0xF800 (2047) would
1231* cause an interrupt to be generated for every frame, because all
1232* standard Ethernet frames are <= 2047 bytes in length.
1233*************************************************************************/
1234
05479938 1235/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1236 * workarounds it provides, set the following defines to:
1237 * BUNDLESMALL 0
1238 * BUNDLEMAX 1
1239 * INTDELAY 1
1240 */
1241#define BUNDLESMALL 1
1242#define BUNDLEMAX (u16)6
1243#define INTDELAY (u16)1536 /* 0x600 */
1244
9ac32e1b
JSR
1245/* Initialize firmware */
1246static const struct firmware *e100_request_firmware(struct nic *nic)
1247{
1248 const char *fw_name;
7e15b0c9 1249 const struct firmware *fw = nic->fw;
9ac32e1b 1250 u8 timer, bundle, min_size;
7e15b0c9 1251 int err = 0;
9ac32e1b 1252
2afecc04
JB
1253 /* do not load u-code for ICH devices */
1254 if (nic->flags & ich)
9ac32e1b 1255 return NULL;
2afecc04 1256
44c10138 1257 /* Search for ucode match against h/w revision */
9ac32e1b
JSR
1258 if (nic->mac == mac_82559_D101M)
1259 fw_name = FIRMWARE_D101M;
1260 else if (nic->mac == mac_82559_D101S)
1261 fw_name = FIRMWARE_D101S;
1262 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1263 fw_name = FIRMWARE_D102E;
1264 else /* No ucode on other devices */
1265 return NULL;
1266
7e15b0c9
DG
1267 /* If the firmware has not previously been loaded, request a pointer
1268 * to it. If it was previously loaded, we are reinitializing the
1269 * adapter, possibly in a resume from hibernate, in which case
1270 * request_firmware() cannot be used.
1271 */
1272 if (!fw)
1273 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1274
9ac32e1b 1275 if (err) {
fa05e1ad
JP
1276 netif_err(nic, probe, nic->netdev,
1277 "Failed to load firmware \"%s\": %d\n",
1278 fw_name, err);
9ac32e1b
JSR
1279 return ERR_PTR(err);
1280 }
7e15b0c9 1281
9ac32e1b
JSR
1282 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1283 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1284 if (fw->size != UCODE_SIZE * 4 + 3) {
fa05e1ad
JP
1285 netif_err(nic, probe, nic->netdev,
1286 "Firmware \"%s\" has wrong size %zu\n",
1287 fw_name, fw->size);
9ac32e1b
JSR
1288 release_firmware(fw);
1289 return ERR_PTR(-EINVAL);
2afecc04
JB
1290 }
1291
9ac32e1b
JSR
1292 /* Read timer, bundle and min_size from end of firmware blob */
1293 timer = fw->data[UCODE_SIZE * 4];
1294 bundle = fw->data[UCODE_SIZE * 4 + 1];
1295 min_size = fw->data[UCODE_SIZE * 4 + 2];
1296
1297 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1298 min_size >= UCODE_SIZE) {
fa05e1ad
JP
1299 netif_err(nic, probe, nic->netdev,
1300 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1301 fw_name, timer, bundle, min_size);
9ac32e1b
JSR
1302 release_firmware(fw);
1303 return ERR_PTR(-EINVAL);
1304 }
7e15b0c9
DG
1305
1306 /* OK, firmware is validated and ready to use. Save a pointer
1307 * to it in the nic */
1308 nic->fw = fw;
9ac32e1b 1309 return fw;
24180333
JB
1310}
1311
9ac32e1b
JSR
1312static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1313 struct sk_buff *skb)
24180333 1314{
9ac32e1b
JSR
1315 const struct firmware *fw = (void *)skb;
1316 u8 timer, bundle, min_size;
1317
1318 /* It's not a real skb; we just abused the fact that e100_exec_cb
1319 will pass it through to here... */
1320 cb->skb = NULL;
1321
1322 /* firmware is stored as little endian already */
1323 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1324
1325 /* Read timer, bundle and min_size from end of firmware blob */
1326 timer = fw->data[UCODE_SIZE * 4];
1327 bundle = fw->data[UCODE_SIZE * 4 + 1];
1328 min_size = fw->data[UCODE_SIZE * 4 + 2];
1329
1330 /* Insert user-tunable settings in cb->u.ucode */
1331 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1332 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1333 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1334 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1335 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1336 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1337
1338 cb->command = cpu_to_le16(cb_ucode | cb_el);
1339}
1340
1341static inline int e100_load_ucode_wait(struct nic *nic)
1342{
1343 const struct firmware *fw;
24180333
JB
1344 int err = 0, counter = 50;
1345 struct cb *cb = nic->cb_to_clean;
1346
9ac32e1b
JSR
1347 fw = e100_request_firmware(nic);
1348 /* If it's NULL, then no ucode is required */
1349 if (!fw || IS_ERR(fw))
1350 return PTR_ERR(fw);
1351
1352 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
fa05e1ad
JP
1353 netif_err(nic, probe, nic->netdev,
1354 "ucode cmd failed with error %d\n", err);
05479938 1355
24180333
JB
1356 /* must restart cuc */
1357 nic->cuc_cmd = cuc_start;
1358
1359 /* wait for completion */
1360 e100_write_flush(nic);
1361 udelay(10);
1362
1363 /* wait for possibly (ouch) 500ms */
1364 while (!(cb->status & cpu_to_le16(cb_complete))) {
1365 msleep(10);
1366 if (!--counter) break;
1367 }
05479938 1368
3a4fa0a2 1369 /* ack any interrupts, something could have been set */
27345bb6 1370 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1371
1372 /* if the command failed, or is not OK, notify and return */
1373 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
fa05e1ad 1374 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
24180333
JB
1375 err = -EPERM;
1376 }
05479938 1377
24180333 1378 return err;
1da177e4
LT
1379}
1380
1381static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1382 struct sk_buff *skb)
1383{
1384 cb->command = cpu_to_le16(cb_iaaddr);
1385 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1386}
1387
1388static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1389{
1390 cb->command = cpu_to_le16(cb_dump);
1391 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1392 offsetof(struct mem, dump_buf));
1393}
1394
72001762
AM
1395static int e100_phy_check_without_mii(struct nic *nic)
1396{
1397 u8 phy_type;
1398 int without_mii;
1399
1400 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1401
1402 switch (phy_type) {
1403 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1404 case I82503: /* Non-MII PHY; UNTESTED! */
1405 case S80C24: /* Non-MII PHY; tested and working */
1406 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1407 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1408 * doesn't have a programming interface of any sort. The
1409 * media is sensed automatically based on how the link partner
1410 * is configured. This is, in essence, manual configuration.
1411 */
fa05e1ad
JP
1412 netif_info(nic, probe, nic->netdev,
1413 "found MII-less i82503 or 80c24 or other PHY\n");
72001762
AM
1414
1415 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1416 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1417
1418 /* these might be needed for certain MII-less cards...
1419 * nic->flags |= ich;
1420 * nic->flags |= ich_10h_workaround; */
1421
1422 without_mii = 1;
1423 break;
1424 default:
1425 without_mii = 0;
1426 break;
1427 }
1428 return without_mii;
1429}
1430
1da177e4
LT
1431#define NCONFIG_AUTO_SWITCH 0x0080
1432#define MII_NSC_CONG MII_RESV1
1433#define NSC_CONG_ENABLE 0x0100
1434#define NSC_CONG_TXREADY 0x0400
1435#define ADVERTISE_FC_SUPPORTED 0x0400
1436static int e100_phy_init(struct nic *nic)
1437{
1438 struct net_device *netdev = nic->netdev;
1439 u32 addr;
1440 u16 bmcr, stat, id_lo, id_hi, cong;
1441
1442 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1443 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1444 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1445 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1446 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1447 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1448 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1449 break;
1450 }
72001762
AM
1451 if (addr == 32) {
1452 /* uhoh, no PHY detected: check whether we seem to be some
1453 * weird, rare variant which is *known* to not have any MII.
1454 * But do this AFTER MII checking only, since this does
1455 * lookup of EEPROM values which may easily be unreliable. */
1456 if (e100_phy_check_without_mii(nic))
1457 return 0; /* simply return and hope for the best */
1458 else {
1459 /* for unknown cases log a fatal error */
fa05e1ad
JP
1460 netif_err(nic, hw, nic->netdev,
1461 "Failed to locate any known PHY, aborting\n");
72001762
AM
1462 return -EAGAIN;
1463 }
1464 } else
fa05e1ad
JP
1465 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1466 "phy_addr = %d\n", nic->mii.phy_id);
1da177e4 1467
1da177e4
LT
1468 /* Get phy ID */
1469 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1470 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1471 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
fa05e1ad
JP
1472 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1473 "phy ID = 0x%08X\n", nic->phy);
1da177e4 1474
8fbd962e
BA
1475 /* Select the phy and isolate the rest */
1476 for (addr = 0; addr < 32; addr++) {
1477 if (addr != nic->mii.phy_id) {
1478 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1479 } else if (nic->phy != phy_82552_v) {
1480 bmcr = mdio_read(netdev, addr, MII_BMCR);
1481 mdio_write(netdev, addr, MII_BMCR,
1482 bmcr & ~BMCR_ISOLATE);
1483 }
1484 }
1485 /*
1486 * Workaround for 82552:
1487 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1488 * other phy_id's) using bmcr value from addr discovery loop above.
1489 */
1490 if (nic->phy == phy_82552_v)
1491 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1492 bmcr & ~BMCR_ISOLATE);
1493
1da177e4
LT
1494 /* Handle National tx phys */
1495#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1496 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1497 /* Disable congestion control */
1498 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1499 cong |= NSC_CONG_TXREADY;
1500 cong &= ~NSC_CONG_ENABLE;
1501 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1502 }
1503
b55de80e
BA
1504 if (nic->phy == phy_82552_v) {
1505 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1506
72001762
AM
1507 /* assign special tweaked mdio_ctrl() function */
1508 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1509
b55de80e
BA
1510 /* Workaround Si not advertising flow-control during autoneg */
1511 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1512 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1513
1514 /* Reset for the above changes to take effect */
1515 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1516 bmcr |= BMCR_RESET;
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1518 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1519 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1520 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1521 /* enable/disable MDI/MDI-X auto-switching. */
1522 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1523 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1524 }
1da177e4
LT
1525
1526 return 0;
1527}
1528
1529static int e100_hw_init(struct nic *nic)
1530{
dca97ad2 1531 int err = 0;
1da177e4
LT
1532
1533 e100_hw_reset(nic);
1534
fa05e1ad 1535 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
f26251eb 1536 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1537 return err;
1538
f26251eb 1539 if ((err = e100_phy_init(nic)))
1da177e4 1540 return err;
f26251eb 1541 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1542 return err;
f26251eb 1543 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1544 return err;
9ac32e1b 1545 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1546 return err;
f26251eb 1547 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1548 return err;
f26251eb 1549 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1550 return err;
f26251eb 1551 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1552 nic->dma_addr + offsetof(struct mem, stats))))
1553 return err;
f26251eb 1554 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1555 return err;
1556
1557 e100_disable_irq(nic);
1558
1559 return 0;
1560}
1561
1562static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1563{
1564 struct net_device *netdev = nic->netdev;
22bedad3 1565 struct netdev_hw_addr *ha;
4cd24eaf 1566 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1da177e4
LT
1567
1568 cb->command = cpu_to_le16(cb_multi);
1569 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
48e2f183 1570 i = 0;
22bedad3 1571 netdev_for_each_mc_addr(ha, netdev) {
48e2f183
JP
1572 if (i == count)
1573 break;
22bedad3 1574 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1da177e4 1575 ETH_ALEN);
48e2f183 1576 }
1da177e4
LT
1577}
1578
1579static void e100_set_multicast_list(struct net_device *netdev)
1580{
1581 struct nic *nic = netdev_priv(netdev);
1582
fa05e1ad
JP
1583 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1584 "mc_count=%d, flags=0x%04X\n",
1585 netdev_mc_count(netdev), netdev->flags);
1da177e4 1586
f26251eb 1587 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1588 nic->flags |= promiscuous;
1589 else
1590 nic->flags &= ~promiscuous;
1591
f26251eb 1592 if (netdev->flags & IFF_ALLMULTI ||
4cd24eaf 1593 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1da177e4
LT
1594 nic->flags |= multicast_all;
1595 else
1596 nic->flags &= ~multicast_all;
1597
1598 e100_exec_cb(nic, NULL, e100_configure);
1599 e100_exec_cb(nic, NULL, e100_multi);
1600}
1601
1602static void e100_update_stats(struct nic *nic)
1603{
09f75cd7
JG
1604 struct net_device *dev = nic->netdev;
1605 struct net_device_stats *ns = &dev->stats;
1da177e4 1606 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1607 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1608 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1609 &s->complete;
1610
1611 /* Device's stats reporting may take several microseconds to
0a0863af 1612 * complete, so we're always waiting for results of the
1da177e4
LT
1613 * previous command. */
1614
f26251eb 1615 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1616 *complete = 0;
1617 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1618 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1619 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1620 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1621 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1622 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1623 ns->collisions += nic->tx_collisions;
1624 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1625 le32_to_cpu(s->tx_lost_crs);
d24d65ed
BG
1626 nic->rx_short_frame_errors +=
1627 le32_to_cpu(s->rx_short_frame_errors);
1628 ns->rx_length_errors = nic->rx_short_frame_errors +
1da177e4
LT
1629 nic->rx_over_length_errors;
1630 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1631 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1632 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1633 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1634 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1635 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1636 le32_to_cpu(s->rx_alignment_errors) +
1637 le32_to_cpu(s->rx_short_frame_errors) +
1638 le32_to_cpu(s->rx_cdt_errors);
1639 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1640 nic->tx_single_collisions +=
1641 le32_to_cpu(s->tx_single_collisions);
1642 nic->tx_multiple_collisions +=
1643 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1644 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1645 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1646 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1647 nic->rx_fc_unsupported +=
1648 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1649 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1650 nic->tx_tco_frames +=
1651 le16_to_cpu(s->xmt_tco_frames);
1652 nic->rx_tco_frames +=
1653 le16_to_cpu(s->rcv_tco_frames);
1654 }
1655 }
1656 }
1657
05479938 1658
f26251eb 1659 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
fa05e1ad
JP
1660 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1661 "exec cuc_dump_reset failed\n");
1da177e4
LT
1662}
1663
1664static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1665{
1666 /* Adjust inter-frame-spacing (IFS) between two transmits if
1667 * we're getting collisions on a half-duplex connection. */
1668
f26251eb 1669 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1670 u32 prev = nic->adaptive_ifs;
1671 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1672
f26251eb 1673 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1674 (nic->tx_frames > min_frames)) {
f26251eb 1675 if (nic->adaptive_ifs < 60)
1da177e4
LT
1676 nic->adaptive_ifs += 5;
1677 } else if (nic->tx_frames < min_frames) {
f26251eb 1678 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1679 nic->adaptive_ifs -= 5;
1680 }
f26251eb 1681 if (nic->adaptive_ifs != prev)
1da177e4
LT
1682 e100_exec_cb(nic, NULL, e100_configure);
1683 }
1684}
1685
1686static void e100_watchdog(unsigned long data)
1687{
1688 struct nic *nic = (struct nic *)data;
8ae6daca 1689 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
25db0338 1690 u32 speed;
1da177e4 1691
fa05e1ad
JP
1692 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1693 "right now = %ld\n", jiffies);
1da177e4
LT
1694
1695 /* mii library handles link maintenance tasks */
1696
1697 mii_ethtool_gset(&nic->mii, &cmd);
25db0338 1698 speed = ethtool_cmd_speed(&cmd);
1da177e4 1699
f26251eb 1700 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
fa05e1ad 1701 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
25db0338 1702 speed == SPEED_100 ? 100 : 10,
fa05e1ad 1703 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1704 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
fa05e1ad 1705 netdev_info(nic->netdev, "NIC Link is Down\n");
1da177e4
LT
1706 }
1707
1708 mii_check_link(&nic->mii);
1709
1710 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1711 * allocation failure.
1712 * Unfortunately have to use a spinlock to not re-enable interrupts
1713 * accidentally, due to hardware that shares a register between the
1714 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1715 spin_lock_irq(&nic->cmd_lock);
27345bb6 1716 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1717 e100_write_flush(nic);
ad8c48ad 1718 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1719
1720 e100_update_stats(nic);
25db0338 1721 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1da177e4 1722
f26251eb 1723 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1724 /* Issue a multicast command to workaround a 557 lock up */
1725 e100_set_multicast_list(nic->netdev);
1726
25db0338 1727 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1da177e4
LT
1728 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1729 nic->flags |= ich_10h_workaround;
1730 else
1731 nic->flags &= ~ich_10h_workaround;
1732
34c6417b
SH
1733 mod_timer(&nic->watchdog,
1734 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1735}
1736
858119e1 1737static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1738 struct sk_buff *skb)
1739{
1740 cb->command = nic->tx_command;
75f58a53
BG
1741
1742 /*
1743 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1744 * testing, ie sending frames with bad CRC.
1745 */
1746 if (unlikely(skb->no_fcs))
1747 cb->command |= __constant_cpu_to_le16(cb_tx_nc);
1748 else
1749 cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
1750
962082b6 1751 /* interrupt every 16 packets regardless of delay */
f26251eb 1752 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1753 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1754 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1755 cb->u.tcb.tcb_byte_count = 0;
1756 cb->u.tcb.threshold = nic->tx_threshold;
1757 cb->u.tcb.tbd_count = 1;
1758 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1759 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1760 /* check for mapping failure? */
1da177e4
LT
1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1762}
1763
3b29a56d
SH
1764static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1765 struct net_device *netdev)
1da177e4
LT
1766{
1767 struct nic *nic = netdev_priv(netdev);
1768 int err;
1769
f26251eb 1770 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1771 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1772 Issue a NOP command followed by a 1us delay before
1773 issuing the Tx command. */
f26251eb 1774 if (e100_exec_cmd(nic, cuc_nop, 0))
fa05e1ad
JP
1775 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1776 "exec cuc_nop failed\n");
1da177e4
LT
1777 udelay(1);
1778 }
1779
1780 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1781
f26251eb 1782 switch (err) {
1da177e4
LT
1783 case -ENOSPC:
1784 /* We queued the skb, but now we're out of space. */
fa05e1ad
JP
1785 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1786 "No space for CB\n");
1da177e4
LT
1787 netif_stop_queue(netdev);
1788 break;
1789 case -ENOMEM:
1790 /* This is a hard error - log it. */
fa05e1ad
JP
1791 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1792 "Out of Tx resources, returning skb\n");
1da177e4 1793 netif_stop_queue(netdev);
5b548140 1794 return NETDEV_TX_BUSY;
1da177e4
LT
1795 }
1796
6ed10654 1797 return NETDEV_TX_OK;
1da177e4
LT
1798}
1799
858119e1 1800static int e100_tx_clean(struct nic *nic)
1da177e4 1801{
09f75cd7 1802 struct net_device *dev = nic->netdev;
1da177e4
LT
1803 struct cb *cb;
1804 int tx_cleaned = 0;
1805
1806 spin_lock(&nic->cb_lock);
1807
1da177e4 1808 /* Clean CBs marked complete */
f26251eb 1809 for (cb = nic->cb_to_clean;
1da177e4
LT
1810 cb->status & cpu_to_le16(cb_complete);
1811 cb = nic->cb_to_clean = cb->next) {
2d0bb1c1 1812 rmb(); /* read skb after status */
fa05e1ad
JP
1813 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1814 "cb[%d]->status = 0x%04X\n",
1815 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1816 cb->status);
dc45010e 1817
f26251eb 1818 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1819 dev->stats.tx_packets++;
1820 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1821
1822 pci_unmap_single(nic->pdev,
1823 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1824 le16_to_cpu(cb->u.tcb.tbd.size),
1825 PCI_DMA_TODEVICE);
1826 dev_kfree_skb_any(cb->skb);
1827 cb->skb = NULL;
1828 tx_cleaned = 1;
1829 }
1830 cb->status = 0;
1831 nic->cbs_avail++;
1832 }
1833
1834 spin_unlock(&nic->cb_lock);
1835
1836 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1837 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1838 netif_wake_queue(nic->netdev);
1839
1840 return tx_cleaned;
1841}
1842
1843static void e100_clean_cbs(struct nic *nic)
1844{
f26251eb
BA
1845 if (nic->cbs) {
1846 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1847 struct cb *cb = nic->cb_to_clean;
f26251eb 1848 if (cb->skb) {
1da177e4
LT
1849 pci_unmap_single(nic->pdev,
1850 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1851 le16_to_cpu(cb->u.tcb.tbd.size),
1852 PCI_DMA_TODEVICE);
1853 dev_kfree_skb(cb->skb);
1854 }
1855 nic->cb_to_clean = nic->cb_to_clean->next;
1856 nic->cbs_avail++;
1857 }
98468efd 1858 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1da177e4
LT
1859 nic->cbs = NULL;
1860 nic->cbs_avail = 0;
1861 }
1862 nic->cuc_cmd = cuc_start;
1863 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1864 nic->cbs;
1865}
1866
1867static int e100_alloc_cbs(struct nic *nic)
1868{
1869 struct cb *cb;
1870 unsigned int i, count = nic->params.cbs.count;
1871
1872 nic->cuc_cmd = cuc_start;
1873 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1874 nic->cbs_avail = 0;
1875
98468efd
RO
1876 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1877 &nic->cbs_dma_addr);
f26251eb 1878 if (!nic->cbs)
1da177e4 1879 return -ENOMEM;
70abc8cb 1880 memset(nic->cbs, 0, count * sizeof(struct cb));
1da177e4 1881
f26251eb 1882 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1883 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1884 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1885
1886 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1887 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1888 ((i+1) % count) * sizeof(struct cb));
1da177e4
LT
1889 }
1890
1891 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1892 nic->cbs_avail = count;
1893
1894 return 0;
1895}
1896
ca93ca42 1897static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1898{
f26251eb
BA
1899 if (!nic->rxs) return;
1900 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1901
1902 /* handle init time starts */
f26251eb 1903 if (!rx) rx = nic->rxs;
ca93ca42
JG
1904
1905 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1906 if (rx->skb) {
ca93ca42
JG
1907 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1908 nic->ru_running = RU_RUNNING;
1909 }
1da177e4
LT
1910}
1911
719cdac5 1912#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
858119e1 1913static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1914{
89d71a66 1915 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1da177e4
LT
1916 return -ENOMEM;
1917
89d71a66 1918 /* Init, and map the RFD. */
27d7ff46 1919 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1920 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1921 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1922
8d8bb39b 1923 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1924 dev_kfree_skb_any(rx->skb);
097688ef 1925 rx->skb = NULL;
1f53367d
MC
1926 rx->dma_addr = 0;
1927 return -ENOMEM;
1928 }
1929
1da177e4 1930 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1931 * this one. We are safe to touch the previous RFD because
1932 * it is protected by the before last buffer's el bit being set */
aaf918ba 1933 if (rx->prev->skb) {
1da177e4 1934 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1935 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1936 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1937 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1938 }
1939
1940 return 0;
1941}
1942
858119e1 1943static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1944 unsigned int *work_done, unsigned int work_to_do)
1945{
09f75cd7 1946 struct net_device *dev = nic->netdev;
1da177e4
LT
1947 struct sk_buff *skb = rx->skb;
1948 struct rfd *rfd = (struct rfd *)skb->data;
1949 u16 rfd_status, actual_size;
719cdac5 1950 u16 fcs_pad = 0;
1da177e4 1951
f26251eb 1952 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1953 return -EAGAIN;
1954
1955 /* Need to sync before taking a peek at cb_complete bit */
1956 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1957 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1958 rfd_status = le16_to_cpu(rfd->status);
1959
fa05e1ad
JP
1960 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1961 "status=0x%04X\n", rfd_status);
2d0bb1c1 1962 rmb(); /* read size after status bit */
1da177e4
LT
1963
1964 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1965 if (unlikely(!(rfd_status & cb_complete))) {
1966 /* If the next buffer has the el bit, but we think the receiver
1967 * is still running, check to see if it really stopped while
1968 * we had interrupts off.
1969 * This allows for a fast restart without re-enabling
1970 * interrupts */
1971 if ((le16_to_cpu(rfd->command) & cb_el) &&
1972 (RU_RUNNING == nic->ru_running))
1973
17393dd6 1974 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1975 nic->ru_running = RU_SUSPENDED;
303d67c2
KH
1976 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1977 sizeof(struct rfd),
6ff9c2e7 1978 PCI_DMA_FROMDEVICE);
1f53367d 1979 return -ENODATA;
7734f6e6 1980 }
1da177e4
LT
1981
1982 /* Get actual data size */
719cdac5
BG
1983 if (unlikely(dev->features & NETIF_F_RXFCS))
1984 fcs_pad = 4;
1da177e4 1985 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 1986 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
1987 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1988
1989 /* Get data */
1990 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1991 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 1992
7734f6e6
DA
1993 /* If this buffer has the el bit, but we think the receiver
1994 * is still running, check to see if it really stopped while
1995 * we had interrupts off.
1996 * This allows for a fast restart without re-enabling interrupts.
1997 * This can happen when the RU sees the size change but also sees
1998 * the el bit set. */
1999 if ((le16_to_cpu(rfd->command) & cb_el) &&
2000 (RU_RUNNING == nic->ru_running)) {
2001
17393dd6 2002 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 2003 nic->ru_running = RU_SUSPENDED;
7734f6e6 2004 }
ca93ca42 2005
1da177e4
LT
2006 /* Pull off the RFD and put the actual data (minus eth hdr) */
2007 skb_reserve(skb, sizeof(struct rfd));
2008 skb_put(skb, actual_size);
2009 skb->protocol = eth_type_trans(skb, nic->netdev);
2010
0bf61e66
BG
2011 /* If we are receiving all frames, then don't bother
2012 * checking for errors.
2013 */
2014 if (unlikely(dev->features & NETIF_F_RXALL)) {
2015 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2016 /* Received oversized frame, but keep it. */
2017 nic->rx_over_length_errors++;
2018 goto process_skb;
2019 }
2020
f26251eb 2021 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 2022 /* Don't indicate if hardware indicates errors */
1da177e4 2023 dev_kfree_skb_any(skb);
719cdac5 2024 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
1da177e4
LT
2025 /* Don't indicate oversized frames */
2026 nic->rx_over_length_errors++;
1da177e4
LT
2027 dev_kfree_skb_any(skb);
2028 } else {
0bf61e66 2029process_skb:
09f75cd7 2030 dev->stats.rx_packets++;
719cdac5 2031 dev->stats.rx_bytes += (actual_size - fcs_pad);
1da177e4 2032 netif_receive_skb(skb);
f26251eb 2033 if (work_done)
1da177e4
LT
2034 (*work_done)++;
2035 }
2036
2037 rx->skb = NULL;
2038
2039 return 0;
2040}
2041
858119e1 2042static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
2043 unsigned int work_to_do)
2044{
2045 struct rx *rx;
7734f6e6
DA
2046 int restart_required = 0, err = 0;
2047 struct rx *old_before_last_rx, *new_before_last_rx;
2048 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
2049
2050 /* Indicate newly arrived packets */
f26251eb 2051 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
2052 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2053 /* Hit quota or no more to clean */
2054 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 2055 break;
1da177e4
LT
2056 }
2057
7734f6e6
DA
2058
2059 /* On EAGAIN, hit quota so have more work to do, restart once
2060 * cleanup is complete.
2061 * Else, are we already rnr? then pay attention!!! this ensures that
2062 * the state machine progression never allows a start with a
2063 * partially cleaned list, avoiding a race between hardware
2064 * and rx_to_clean when in NAPI mode */
2065 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2066 restart_required = 1;
2067
2068 old_before_last_rx = nic->rx_to_use->prev->prev;
2069 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 2070
1da177e4 2071 /* Alloc new skbs to refill list */
f26251eb
BA
2072 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2073 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
2074 break; /* Better luck next time (see watchdog) */
2075 }
ca93ca42 2076
7734f6e6
DA
2077 new_before_last_rx = nic->rx_to_use->prev->prev;
2078 if (new_before_last_rx != old_before_last_rx) {
2079 /* Set the el-bit on the buffer that is before the last buffer.
2080 * This lets us update the next pointer on the last buffer
2081 * without worrying about hardware touching it.
2082 * We set the size to 0 to prevent hardware from touching this
2083 * buffer.
2084 * When the hardware hits the before last buffer with el-bit
2085 * and size of 0, it will RNR interrupt, the RUS will go into
2086 * the No Resources state. It will not complete nor write to
2087 * this buffer. */
2088 new_before_last_rfd =
2089 (struct rfd *)new_before_last_rx->skb->data;
2090 new_before_last_rfd->size = 0;
2091 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2092 pci_dma_sync_single_for_device(nic->pdev,
2093 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2094 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2095
2096 /* Now that we have a new stopping point, we can clear the old
2097 * stopping point. We must sync twice to get the proper
2098 * ordering on the hardware side of things. */
2099 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2100 pci_dma_sync_single_for_device(nic->pdev,
2101 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2102 PCI_DMA_BIDIRECTIONAL);
719cdac5
BG
2103 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2104 + ETH_FCS_LEN);
7734f6e6
DA
2105 pci_dma_sync_single_for_device(nic->pdev,
2106 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2107 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2108 }
2109
f26251eb 2110 if (restart_required) {
ca93ca42 2111 // ack the rnr?
915e91d7 2112 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 2113 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 2114 if (work_done)
ca93ca42
JG
2115 (*work_done)++;
2116 }
1da177e4
LT
2117}
2118
2119static void e100_rx_clean_list(struct nic *nic)
2120{
2121 struct rx *rx;
2122 unsigned int i, count = nic->params.rfds.count;
2123
ca93ca42
JG
2124 nic->ru_running = RU_UNINITIALIZED;
2125
f26251eb
BA
2126 if (nic->rxs) {
2127 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2128 if (rx->skb) {
1da177e4 2129 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2130 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2131 dev_kfree_skb(rx->skb);
2132 }
2133 }
2134 kfree(nic->rxs);
2135 nic->rxs = NULL;
2136 }
2137
2138 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
2139}
2140
2141static int e100_rx_alloc_list(struct nic *nic)
2142{
2143 struct rx *rx;
2144 unsigned int i, count = nic->params.rfds.count;
7734f6e6 2145 struct rfd *before_last;
1da177e4
LT
2146
2147 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 2148 nic->ru_running = RU_UNINITIALIZED;
1da177e4 2149
f26251eb 2150 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2151 return -ENOMEM;
1da177e4 2152
f26251eb 2153 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
2154 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2155 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 2156 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
2157 e100_rx_clean_list(nic);
2158 return -ENOMEM;
2159 }
2160 }
7734f6e6
DA
2161 /* Set the el-bit on the buffer that is before the last buffer.
2162 * This lets us update the next pointer on the last buffer without
2163 * worrying about hardware touching it.
2164 * We set the size to 0 to prevent hardware from touching this buffer.
2165 * When the hardware hits the before last buffer with el-bit and size
2166 * of 0, it will RNR interrupt, the RU will go into the No Resources
2167 * state. It will not complete nor write to this buffer. */
2168 rx = nic->rxs->prev->prev;
2169 before_last = (struct rfd *)rx->skb->data;
2170 before_last->command |= cpu_to_le16(cb_el);
2171 before_last->size = 0;
2172 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 2173 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2174
2175 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2176 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2177
2178 return 0;
2179}
2180
7d12e780 2181static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2182{
2183 struct net_device *netdev = dev_id;
2184 struct nic *nic = netdev_priv(netdev);
27345bb6 2185 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4 2186
fa05e1ad
JP
2187 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2188 "stat_ack = 0x%02X\n", stat_ack);
1da177e4 2189
f26251eb 2190 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
2191 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2192 return IRQ_NONE;
2193
2194 /* Ack interrupt(s) */
27345bb6 2195 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2196
ca93ca42 2197 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 2198 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
2199 nic->ru_running = RU_SUSPENDED;
2200
288379f0 2201 if (likely(napi_schedule_prep(&nic->napi))) {
0685c31b 2202 e100_disable_irq(nic);
288379f0 2203 __napi_schedule(&nic->napi);
0685c31b 2204 }
1da177e4
LT
2205
2206 return IRQ_HANDLED;
2207}
2208
bea3348e 2209static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2210{
bea3348e 2211 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 2212 unsigned int work_done = 0;
1da177e4 2213
bea3348e 2214 e100_rx_clean(nic, &work_done, budget);
53e52c72 2215 e100_tx_clean(nic);
1da177e4 2216
53e52c72
DM
2217 /* If budget not fully consumed, exit the polling mode */
2218 if (work_done < budget) {
288379f0 2219 napi_complete(napi);
1da177e4 2220 e100_enable_irq(nic);
1da177e4
LT
2221 }
2222
bea3348e 2223 return work_done;
1da177e4
LT
2224}
2225
2226#ifdef CONFIG_NET_POLL_CONTROLLER
2227static void e100_netpoll(struct net_device *netdev)
2228{
2229 struct nic *nic = netdev_priv(netdev);
611494dc 2230
1da177e4 2231 e100_disable_irq(nic);
7d12e780 2232 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2233 e100_tx_clean(nic);
2234 e100_enable_irq(nic);
2235}
2236#endif
2237
1da177e4
LT
2238static int e100_set_mac_address(struct net_device *netdev, void *p)
2239{
2240 struct nic *nic = netdev_priv(netdev);
2241 struct sockaddr *addr = p;
2242
2243 if (!is_valid_ether_addr(addr->sa_data))
2244 return -EADDRNOTAVAIL;
2245
2246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2247 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2248
2249 return 0;
2250}
2251
2252static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2253{
f26251eb 2254 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2255 return -EINVAL;
2256 netdev->mtu = new_mtu;
2257 return 0;
2258}
2259
2260static int e100_asf(struct nic *nic)
2261{
2262 /* ASF can be enabled from eeprom */
807540ba 2263 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
1da177e4
LT
2264 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2265 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
807540ba 2266 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
1da177e4
LT
2267}
2268
2269static int e100_up(struct nic *nic)
2270{
2271 int err;
2272
f26251eb 2273 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2274 return err;
f26251eb 2275 if ((err = e100_alloc_cbs(nic)))
1da177e4 2276 goto err_rx_clean_list;
f26251eb 2277 if ((err = e100_hw_init(nic)))
1da177e4
LT
2278 goto err_clean_cbs;
2279 e100_set_multicast_list(nic->netdev);
ca93ca42 2280 e100_start_receiver(nic, NULL);
1da177e4 2281 mod_timer(&nic->watchdog, jiffies);
f26251eb 2282 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2283 nic->netdev->name, nic->netdev)))
2284 goto err_no_irq;
1da177e4 2285 netif_wake_queue(nic->netdev);
bea3348e 2286 napi_enable(&nic->napi);
0236ebb7
MC
2287 /* enable ints _after_ enabling poll, preventing a race between
2288 * disable ints+schedule */
2289 e100_enable_irq(nic);
1da177e4
LT
2290 return 0;
2291
2292err_no_irq:
2293 del_timer_sync(&nic->watchdog);
2294err_clean_cbs:
2295 e100_clean_cbs(nic);
2296err_rx_clean_list:
2297 e100_rx_clean_list(nic);
2298 return err;
2299}
2300
2301static void e100_down(struct nic *nic)
2302{
0236ebb7 2303 /* wait here for poll to complete */
bea3348e 2304 napi_disable(&nic->napi);
0236ebb7 2305 netif_stop_queue(nic->netdev);
1da177e4
LT
2306 e100_hw_reset(nic);
2307 free_irq(nic->pdev->irq, nic->netdev);
2308 del_timer_sync(&nic->watchdog);
2309 netif_carrier_off(nic->netdev);
1da177e4
LT
2310 e100_clean_cbs(nic);
2311 e100_rx_clean_list(nic);
2312}
2313
2314static void e100_tx_timeout(struct net_device *netdev)
2315{
2316 struct nic *nic = netdev_priv(netdev);
2317
05479938 2318 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2319 * in interrupt context */
2320 schedule_work(&nic->tx_timeout_task);
2321}
2322
c4028958 2323static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2324{
c4028958
DH
2325 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2326 struct net_device *netdev = nic->netdev;
2acdb1e0 2327
fa05e1ad
JP
2328 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2329 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
401da6ae
AC
2330
2331 rtnl_lock();
2332 if (netif_running(netdev)) {
2333 e100_down(netdev_priv(netdev));
2334 e100_up(netdev_priv(netdev));
2335 }
2336 rtnl_unlock();
1da177e4
LT
2337}
2338
2339static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2340{
2341 int err;
2342 struct sk_buff *skb;
2343
2344 /* Use driver resources to perform internal MAC or PHY
2345 * loopback test. A single packet is prepared and transmitted
2346 * in loopback mode, and the test passes if the received
2347 * packet compares byte-for-byte to the transmitted packet. */
2348
f26251eb 2349 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2350 return err;
f26251eb 2351 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2352 goto err_clean_rx;
2353
2354 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2355 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2356 loopback_mode = lb_mac;
2357
2358 nic->loopback = loopback_mode;
f26251eb 2359 if ((err = e100_hw_init(nic)))
1da177e4
LT
2360 goto err_loopback_none;
2361
f26251eb 2362 if (loopback_mode == lb_phy)
1da177e4
LT
2363 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2364 BMCR_LOOPBACK);
2365
ca93ca42 2366 e100_start_receiver(nic, NULL);
1da177e4 2367
f26251eb 2368 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2369 err = -ENOMEM;
2370 goto err_loopback_none;
2371 }
2372 skb_put(skb, ETH_DATA_LEN);
2373 memset(skb->data, 0xFF, ETH_DATA_LEN);
2374 e100_xmit_frame(skb, nic->netdev);
2375
2376 msleep(10);
2377
aa49cdd9 2378 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2379 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2380
f26251eb 2381 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2382 skb->data, ETH_DATA_LEN))
2383 err = -EAGAIN;
2384
2385err_loopback_none:
2386 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2387 nic->loopback = lb_none;
1da177e4 2388 e100_clean_cbs(nic);
aa49cdd9 2389 e100_hw_reset(nic);
1da177e4
LT
2390err_clean_rx:
2391 e100_rx_clean_list(nic);
2392 return err;
2393}
2394
2395#define MII_LED_CONTROL 0x1B
b55de80e
BA
2396#define E100_82552_LED_OVERRIDE 0x19
2397#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2398#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
1da177e4
LT
2399
2400static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2401{
2402 struct nic *nic = netdev_priv(netdev);
2403 return mii_ethtool_gset(&nic->mii, cmd);
2404}
2405
2406static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2407{
2408 struct nic *nic = netdev_priv(netdev);
2409 int err;
2410
2411 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2412 err = mii_ethtool_sset(&nic->mii, cmd);
2413 e100_exec_cb(nic, NULL, e100_configure);
2414
2415 return err;
2416}
2417
2418static void e100_get_drvinfo(struct net_device *netdev,
2419 struct ethtool_drvinfo *info)
2420{
2421 struct nic *nic = netdev_priv(netdev);
23020ab3
RJ
2422 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2423 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
23020ab3
RJ
2424 strlcpy(info->bus_info, pci_name(nic->pdev),
2425 sizeof(info->bus_info));
1da177e4
LT
2426}
2427
abf9b902 2428#define E100_PHY_REGS 0x1C
1da177e4
LT
2429static int e100_get_regs_len(struct net_device *netdev)
2430{
2431 struct nic *nic = netdev_priv(netdev);
abf9b902 2432 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2433}
2434
2435static void e100_get_regs(struct net_device *netdev,
2436 struct ethtool_regs *regs, void *p)
2437{
2438 struct nic *nic = netdev_priv(netdev);
2439 u32 *buff = p;
2440 int i;
2441
44c10138 2442 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2443 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2444 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2445 ioread16(&nic->csr->scb.status);
f26251eb 2446 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2447 buff[1 + E100_PHY_REGS - i] =
2448 mdio_read(netdev, nic->mii.phy_id, i);
2449 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2450 e100_exec_cb(nic, NULL, e100_dump);
2451 msleep(10);
2452 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2453 sizeof(nic->mem->dump_buf));
2454}
2455
2456static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2457{
2458 struct nic *nic = netdev_priv(netdev);
2459 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2460 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2461}
2462
2463static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2464{
2465 struct nic *nic = netdev_priv(netdev);
2466
bc79fc84
RW
2467 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2468 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2469 return -EOPNOTSUPP;
2470
f26251eb 2471 if (wol->wolopts)
1da177e4
LT
2472 nic->flags |= wol_magic;
2473 else
2474 nic->flags &= ~wol_magic;
2475
bc79fc84
RW
2476 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2477
1da177e4
LT
2478 e100_exec_cb(nic, NULL, e100_configure);
2479
2480 return 0;
2481}
2482
2483static u32 e100_get_msglevel(struct net_device *netdev)
2484{
2485 struct nic *nic = netdev_priv(netdev);
2486 return nic->msg_enable;
2487}
2488
2489static void e100_set_msglevel(struct net_device *netdev, u32 value)
2490{
2491 struct nic *nic = netdev_priv(netdev);
2492 nic->msg_enable = value;
2493}
2494
2495static int e100_nway_reset(struct net_device *netdev)
2496{
2497 struct nic *nic = netdev_priv(netdev);
2498 return mii_nway_restart(&nic->mii);
2499}
2500
2501static u32 e100_get_link(struct net_device *netdev)
2502{
2503 struct nic *nic = netdev_priv(netdev);
2504 return mii_link_ok(&nic->mii);
2505}
2506
2507static int e100_get_eeprom_len(struct net_device *netdev)
2508{
2509 struct nic *nic = netdev_priv(netdev);
2510 return nic->eeprom_wc << 1;
2511}
2512
2513#define E100_EEPROM_MAGIC 0x1234
2514static int e100_get_eeprom(struct net_device *netdev,
2515 struct ethtool_eeprom *eeprom, u8 *bytes)
2516{
2517 struct nic *nic = netdev_priv(netdev);
2518
2519 eeprom->magic = E100_EEPROM_MAGIC;
2520 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2521
2522 return 0;
2523}
2524
2525static int e100_set_eeprom(struct net_device *netdev,
2526 struct ethtool_eeprom *eeprom, u8 *bytes)
2527{
2528 struct nic *nic = netdev_priv(netdev);
2529
f26251eb 2530 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2531 return -EINVAL;
2532
2533 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2534
2535 return e100_eeprom_save(nic, eeprom->offset >> 1,
2536 (eeprom->len >> 1) + 1);
2537}
2538
2539static void e100_get_ringparam(struct net_device *netdev,
2540 struct ethtool_ringparam *ring)
2541{
2542 struct nic *nic = netdev_priv(netdev);
2543 struct param_range *rfds = &nic->params.rfds;
2544 struct param_range *cbs = &nic->params.cbs;
2545
2546 ring->rx_max_pending = rfds->max;
2547 ring->tx_max_pending = cbs->max;
1da177e4
LT
2548 ring->rx_pending = rfds->count;
2549 ring->tx_pending = cbs->count;
1da177e4
LT
2550}
2551
2552static int e100_set_ringparam(struct net_device *netdev,
2553 struct ethtool_ringparam *ring)
2554{
2555 struct nic *nic = netdev_priv(netdev);
2556 struct param_range *rfds = &nic->params.rfds;
2557 struct param_range *cbs = &nic->params.cbs;
2558
05479938 2559 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2560 return -EINVAL;
2561
f26251eb 2562 if (netif_running(netdev))
1da177e4
LT
2563 e100_down(nic);
2564 rfds->count = max(ring->rx_pending, rfds->min);
2565 rfds->count = min(rfds->count, rfds->max);
2566 cbs->count = max(ring->tx_pending, cbs->min);
2567 cbs->count = min(cbs->count, cbs->max);
fa05e1ad
JP
2568 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2569 rfds->count, cbs->count);
f26251eb 2570 if (netif_running(netdev))
1da177e4
LT
2571 e100_up(nic);
2572
2573 return 0;
2574}
2575
2576static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2577 "Link test (on/offline)",
2578 "Eeprom test (on/offline)",
2579 "Self test (offline)",
2580 "Mac loopback (offline)",
2581 "Phy loopback (offline)",
2582};
4c3616cd 2583#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2584
1da177e4
LT
2585static void e100_diag_test(struct net_device *netdev,
2586 struct ethtool_test *test, u64 *data)
2587{
2588 struct ethtool_cmd cmd;
2589 struct nic *nic = netdev_priv(netdev);
2590 int i, err;
2591
2592 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2593 data[0] = !mii_link_ok(&nic->mii);
2594 data[1] = e100_eeprom_load(nic);
f26251eb 2595 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2596
2597 /* save speed, duplex & autoneg settings */
2598 err = mii_ethtool_gset(&nic->mii, &cmd);
2599
f26251eb 2600 if (netif_running(netdev))
1da177e4
LT
2601 e100_down(nic);
2602 data[2] = e100_self_test(nic);
2603 data[3] = e100_loopback_test(nic, lb_mac);
2604 data[4] = e100_loopback_test(nic, lb_phy);
2605
2606 /* restore speed, duplex & autoneg settings */
2607 err = mii_ethtool_sset(&nic->mii, &cmd);
2608
f26251eb 2609 if (netif_running(netdev))
1da177e4
LT
2610 e100_up(nic);
2611 }
f26251eb 2612 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2613 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2614
2615 msleep_interruptible(4 * 1000);
1da177e4
LT
2616}
2617
a70b86ae
JK
2618static int e100_set_phys_id(struct net_device *netdev,
2619 enum ethtool_phys_id_state state)
1da177e4
LT
2620{
2621 struct nic *nic = netdev_priv(netdev);
a70b86ae
JK
2622 enum led_state {
2623 led_on = 0x01,
2624 led_off = 0x04,
2625 led_on_559 = 0x05,
2626 led_on_557 = 0x07,
2627 };
b55de80e 2628 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
a70b86ae
JK
2629 MII_LED_CONTROL;
2630 u16 leds = 0;
2631
2632 switch (state) {
2633 case ETHTOOL_ID_ACTIVE:
2634 return 2;
1da177e4 2635
a70b86ae
JK
2636 case ETHTOOL_ID_ON:
2637 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2638 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2639 break;
2640
2641 case ETHTOOL_ID_OFF:
2642 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2643 break;
2644
2645 case ETHTOOL_ID_INACTIVE:
2646 break;
2647 }
1da177e4 2648
a70b86ae 2649 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
1da177e4
LT
2650 return 0;
2651}
2652
2653static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2654 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2655 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2656 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2657 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2658 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2659 "tx_heartbeat_errors", "tx_window_errors",
2660 /* device-specific stats */
2661 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2662 "tx_flow_control_pause", "rx_flow_control_pause",
2663 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
6f66342c 2664 "rx_short_frame_errors", "rx_over_length_errors",
1da177e4
LT
2665};
2666#define E100_NET_STATS_LEN 21
4c3616cd 2667#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2668
b9f2c044 2669static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2670{
b9f2c044
JG
2671 switch (sset) {
2672 case ETH_SS_TEST:
2673 return E100_TEST_LEN;
2674 case ETH_SS_STATS:
2675 return E100_STATS_LEN;
2676 default:
2677 return -EOPNOTSUPP;
2678 }
1da177e4
LT
2679}
2680
2681static void e100_get_ethtool_stats(struct net_device *netdev,
2682 struct ethtool_stats *stats, u64 *data)
2683{
2684 struct nic *nic = netdev_priv(netdev);
2685 int i;
2686
f26251eb 2687 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2688 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2689
2690 data[i++] = nic->tx_deferred;
2691 data[i++] = nic->tx_single_collisions;
2692 data[i++] = nic->tx_multiple_collisions;
2693 data[i++] = nic->tx_fc_pause;
2694 data[i++] = nic->rx_fc_pause;
2695 data[i++] = nic->rx_fc_unsupported;
2696 data[i++] = nic->tx_tco_frames;
2697 data[i++] = nic->rx_tco_frames;
6f66342c
BG
2698 data[i++] = nic->rx_short_frame_errors;
2699 data[i++] = nic->rx_over_length_errors;
1da177e4
LT
2700}
2701
2702static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2703{
f26251eb 2704 switch (stringset) {
1da177e4
LT
2705 case ETH_SS_TEST:
2706 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2707 break;
2708 case ETH_SS_STATS:
2709 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2710 break;
2711 }
2712}
2713
7282d491 2714static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2715 .get_settings = e100_get_settings,
2716 .set_settings = e100_set_settings,
2717 .get_drvinfo = e100_get_drvinfo,
2718 .get_regs_len = e100_get_regs_len,
2719 .get_regs = e100_get_regs,
2720 .get_wol = e100_get_wol,
2721 .set_wol = e100_set_wol,
2722 .get_msglevel = e100_get_msglevel,
2723 .set_msglevel = e100_set_msglevel,
2724 .nway_reset = e100_nway_reset,
2725 .get_link = e100_get_link,
2726 .get_eeprom_len = e100_get_eeprom_len,
2727 .get_eeprom = e100_get_eeprom,
2728 .set_eeprom = e100_set_eeprom,
2729 .get_ringparam = e100_get_ringparam,
2730 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2731 .self_test = e100_diag_test,
2732 .get_strings = e100_get_strings,
a70b86ae 2733 .set_phys_id = e100_set_phys_id,
1da177e4 2734 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2735 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2736};
2737
2738static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2739{
2740 struct nic *nic = netdev_priv(netdev);
2741
2742 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2743}
2744
2745static int e100_alloc(struct nic *nic)
2746{
2747 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2748 &nic->dma_addr);
2749 return nic->mem ? 0 : -ENOMEM;
2750}
2751
2752static void e100_free(struct nic *nic)
2753{
f26251eb 2754 if (nic->mem) {
1da177e4
LT
2755 pci_free_consistent(nic->pdev, sizeof(struct mem),
2756 nic->mem, nic->dma_addr);
2757 nic->mem = NULL;
2758 }
2759}
2760
2761static int e100_open(struct net_device *netdev)
2762{
2763 struct nic *nic = netdev_priv(netdev);
2764 int err = 0;
2765
2766 netif_carrier_off(netdev);
f26251eb 2767 if ((err = e100_up(nic)))
fa05e1ad 2768 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
1da177e4
LT
2769 return err;
2770}
2771
2772static int e100_close(struct net_device *netdev)
2773{
2774 e100_down(netdev_priv(netdev));
2775 return 0;
2776}
2777
719cdac5
BG
2778static int e100_set_features(struct net_device *netdev,
2779 netdev_features_t features)
2780{
2781 struct nic *nic = netdev_priv(netdev);
2782 netdev_features_t changed = features ^ netdev->features;
2783
0bf61e66 2784 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
719cdac5
BG
2785 return 0;
2786
2787 netdev->features = features;
2788 e100_exec_cb(nic, NULL, e100_configure);
2789 return 0;
2790}
2791
acc78426
SH
2792static const struct net_device_ops e100_netdev_ops = {
2793 .ndo_open = e100_open,
2794 .ndo_stop = e100_close,
00829823 2795 .ndo_start_xmit = e100_xmit_frame,
acc78426 2796 .ndo_validate_addr = eth_validate_addr,
afc4b13d 2797 .ndo_set_rx_mode = e100_set_multicast_list,
acc78426
SH
2798 .ndo_set_mac_address = e100_set_mac_address,
2799 .ndo_change_mtu = e100_change_mtu,
2800 .ndo_do_ioctl = e100_do_ioctl,
2801 .ndo_tx_timeout = e100_tx_timeout,
2802#ifdef CONFIG_NET_POLL_CONTROLLER
2803 .ndo_poll_controller = e100_netpoll,
2804#endif
719cdac5 2805 .ndo_set_features = e100_set_features,
acc78426
SH
2806};
2807
1da177e4
LT
2808static int __devinit e100_probe(struct pci_dev *pdev,
2809 const struct pci_device_id *ent)
2810{
2811 struct net_device *netdev;
2812 struct nic *nic;
2813 int err;
2814
41de8d4c 2815 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
1da177e4 2816 return -ENOMEM;
1da177e4 2817
719cdac5 2818 netdev->hw_features |= NETIF_F_RXFCS;
75f58a53 2819 netdev->priv_flags |= IFF_SUPP_NOFCS;
0bf61e66 2820 netdev->hw_features |= NETIF_F_RXALL;
719cdac5 2821
acc78426 2822 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2823 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2824 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2825 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2826
2827 nic = netdev_priv(netdev);
bea3348e 2828 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2829 nic->netdev = netdev;
2830 nic->pdev = pdev;
2831 nic->msg_enable = (1 << debug) - 1;
72001762 2832 nic->mdio_ctrl = mdio_ctrl_hw;
1da177e4
LT
2833 pci_set_drvdata(pdev, netdev);
2834
f26251eb 2835 if ((err = pci_enable_device(pdev))) {
fa05e1ad 2836 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
2837 goto err_out_free_dev;
2838 }
2839
f26251eb 2840 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
fa05e1ad 2841 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
1da177e4
LT
2842 err = -ENODEV;
2843 goto err_out_disable_pdev;
2844 }
2845
f26251eb 2846 if ((err = pci_request_regions(pdev, DRV_NAME))) {
fa05e1ad 2847 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
2848 goto err_out_disable_pdev;
2849 }
2850
284901a9 2851 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
fa05e1ad 2852 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
1da177e4
LT
2853 goto err_out_free_res;
2854 }
2855
1da177e4
LT
2856 SET_NETDEV_DEV(netdev, &pdev->dev);
2857
27345bb6 2858 if (use_io)
fa05e1ad 2859 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
27345bb6
JB
2860
2861 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2862 if (!nic->csr) {
fa05e1ad 2863 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
1da177e4
LT
2864 err = -ENOMEM;
2865 goto err_out_free_res;
2866 }
2867
f26251eb 2868 if (ent->driver_data)
1da177e4
LT
2869 nic->flags |= ich;
2870 else
2871 nic->flags &= ~ich;
2872
2873 e100_get_defaults(nic);
2874
243559f4
JB
2875 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2876 if (nic->mac < mac_82558_D101_A4)
2877 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2878
1f53367d 2879 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2880 spin_lock_init(&nic->cb_lock);
2881 spin_lock_init(&nic->cmd_lock);
ac7c6669 2882 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2883
2884 /* Reset the device before pci_set_master() in case device is in some
2885 * funky state and has an interrupt pending - hint: we don't have the
2886 * interrupt handler registered yet. */
2887 e100_hw_reset(nic);
2888
2889 pci_set_master(pdev);
2890
2891 init_timer(&nic->watchdog);
2892 nic->watchdog.function = e100_watchdog;
2893 nic->watchdog.data = (unsigned long)nic;
1da177e4 2894
c4028958 2895 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2896
f26251eb 2897 if ((err = e100_alloc(nic))) {
fa05e1ad 2898 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
1da177e4
LT
2899 goto err_out_iounmap;
2900 }
2901
f26251eb 2902 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2903 goto err_out_free;
2904
f92d8728
MC
2905 e100_phy_init(nic);
2906
1da177e4 2907 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2908 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2909 if (!is_valid_ether_addr(netdev->perm_addr)) {
2910 if (!eeprom_bad_csum_allow) {
fa05e1ad 2911 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
948cd43f
JB
2912 err = -EAGAIN;
2913 goto err_out_free;
2914 } else {
fa05e1ad 2915 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
948cd43f 2916 }
1da177e4
LT
2917 }
2918
2919 /* Wol magic packet can be enabled from eeprom */
f26251eb 2920 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2921 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2922 nic->flags |= wol_magic;
bc79fc84
RW
2923 device_set_wakeup_enable(&pdev->dev, true);
2924 }
1da177e4 2925
6bdacb1a 2926 /* ack any pending wake events, disable PME */
e7272403 2927 pci_pme_active(pdev, false);
1da177e4
LT
2928
2929 strcpy(netdev->name, "eth%d");
f26251eb 2930 if ((err = register_netdev(netdev))) {
fa05e1ad 2931 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
1da177e4
LT
2932 goto err_out_free;
2933 }
98468efd
RO
2934 nic->cbs_pool = pci_pool_create(netdev->name,
2935 nic->pdev,
211a0d94 2936 nic->params.cbs.max * sizeof(struct cb),
98468efd
RO
2937 sizeof(u32),
2938 0);
fa05e1ad
JP
2939 netif_info(nic, probe, nic->netdev,
2940 "addr 0x%llx, irq %d, MAC addr %pM\n",
2941 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2942 pdev->irq, netdev->dev_addr);
1da177e4
LT
2943
2944 return 0;
2945
2946err_out_free:
2947 e100_free(nic);
2948err_out_iounmap:
27345bb6 2949 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2950err_out_free_res:
2951 pci_release_regions(pdev);
2952err_out_disable_pdev:
2953 pci_disable_device(pdev);
2954err_out_free_dev:
2955 pci_set_drvdata(pdev, NULL);
2956 free_netdev(netdev);
2957 return err;
2958}
2959
2960static void __devexit e100_remove(struct pci_dev *pdev)
2961{
2962 struct net_device *netdev = pci_get_drvdata(pdev);
2963
f26251eb 2964 if (netdev) {
1da177e4
LT
2965 struct nic *nic = netdev_priv(netdev);
2966 unregister_netdev(netdev);
2967 e100_free(nic);
915e91d7 2968 pci_iounmap(pdev, nic->csr);
98468efd 2969 pci_pool_destroy(nic->cbs_pool);
1da177e4
LT
2970 free_netdev(netdev);
2971 pci_release_regions(pdev);
2972 pci_disable_device(pdev);
2973 pci_set_drvdata(pdev, NULL);
2974 }
2975}
2976
b55de80e
BA
2977#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
2978#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2979#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
ac7c992c 2980static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
2981{
2982 struct net_device *netdev = pci_get_drvdata(pdev);
2983 struct nic *nic = netdev_priv(netdev);
2984
824545e7 2985 if (netif_running(netdev))
f902283b 2986 e100_down(nic);
518d8338 2987 netif_device_detach(netdev);
a53a33da 2988
1da177e4 2989 pci_save_state(pdev);
e8e82b76
AK
2990
2991 if ((nic->flags & wol_magic) | e100_asf(nic)) {
b55de80e
BA
2992 /* enable reverse auto-negotiation */
2993 if (nic->phy == phy_82552_v) {
2994 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2995 E100_82552_SMARTSPEED);
2996
2997 mdio_write(netdev, nic->mii.phy_id,
2998 E100_82552_SMARTSPEED, smartspeed |
2999 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3000 }
ac7c992c 3001 *enable_wake = true;
e8e82b76 3002 } else {
ac7c992c 3003 *enable_wake = false;
e8e82b76 3004 }
975b366a 3005
8543da66 3006 pci_disable_device(pdev);
ac7c992c 3007}
1da177e4 3008
ac7c992c
TLSC
3009static int __e100_power_off(struct pci_dev *pdev, bool wake)
3010{
6905b1f1 3011 if (wake)
ac7c992c 3012 return pci_prepare_to_sleep(pdev);
6905b1f1
RW
3013
3014 pci_wake_from_d3(pdev, false);
3015 pci_set_power_state(pdev, PCI_D3hot);
3016
3017 return 0;
1da177e4
LT
3018}
3019
f902283b 3020#ifdef CONFIG_PM
ac7c992c
TLSC
3021static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3022{
3023 bool wake;
3024 __e100_shutdown(pdev, &wake);
3025 return __e100_power_off(pdev, wake);
3026}
3027
1da177e4
LT
3028static int e100_resume(struct pci_dev *pdev)
3029{
3030 struct net_device *netdev = pci_get_drvdata(pdev);
3031 struct nic *nic = netdev_priv(netdev);
3032
975b366a 3033 pci_set_power_state(pdev, PCI_D0);
1da177e4 3034 pci_restore_state(pdev);
6bdacb1a 3035 /* ack any pending wake events, disable PME */
975b366a 3036 pci_enable_wake(pdev, 0, 0);
1da177e4 3037
4b512d26 3038 /* disable reverse auto-negotiation */
b55de80e
BA
3039 if (nic->phy == phy_82552_v) {
3040 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3041 E100_82552_SMARTSPEED);
3042
3043 mdio_write(netdev, nic->mii.phy_id,
3044 E100_82552_SMARTSPEED,
3045 smartspeed & ~(E100_82552_REV_ANEG));
3046 }
3047
1da177e4 3048 netif_device_attach(netdev);
975b366a 3049 if (netif_running(netdev))
1da177e4
LT
3050 e100_up(nic);
3051
3052 return 0;
3053}
975b366a 3054#endif /* CONFIG_PM */
1da177e4 3055
d18c3db5 3056static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 3057{
ac7c992c
TLSC
3058 bool wake;
3059 __e100_shutdown(pdev, &wake);
3060 if (system_state == SYSTEM_POWER_OFF)
3061 __e100_power_off(pdev, wake);
6bdacb1a
MC
3062}
3063
2cc30492
AK
3064/* ------------------ PCI Error Recovery infrastructure -------------- */
3065/**
3066 * e100_io_error_detected - called when PCI error is detected.
3067 * @pdev: Pointer to PCI device
0a0863af 3068 * @state: The current pci connection state
2cc30492
AK
3069 */
3070static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3071{
3072 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 3073 struct nic *nic = netdev_priv(netdev);
2cc30492 3074
2cc30492 3075 netif_device_detach(netdev);
ef681ce1
AD
3076
3077 if (state == pci_channel_io_perm_failure)
3078 return PCI_ERS_RESULT_DISCONNECT;
3079
3080 if (netif_running(netdev))
3081 e100_down(nic);
b1d26f24 3082 pci_disable_device(pdev);
2cc30492
AK
3083
3084 /* Request a slot reset. */
3085 return PCI_ERS_RESULT_NEED_RESET;
3086}
3087
3088/**
3089 * e100_io_slot_reset - called after the pci bus has been reset.
3090 * @pdev: Pointer to PCI device
3091 *
3092 * Restart the card from scratch.
3093 */
3094static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3095{
3096 struct net_device *netdev = pci_get_drvdata(pdev);
3097 struct nic *nic = netdev_priv(netdev);
3098
3099 if (pci_enable_device(pdev)) {
fa05e1ad 3100 pr_err("Cannot re-enable PCI device after reset\n");
2cc30492
AK
3101 return PCI_ERS_RESULT_DISCONNECT;
3102 }
3103 pci_set_master(pdev);
3104
3105 /* Only one device per card can do a reset */
3106 if (0 != PCI_FUNC(pdev->devfn))
3107 return PCI_ERS_RESULT_RECOVERED;
3108 e100_hw_reset(nic);
3109 e100_phy_init(nic);
3110
3111 return PCI_ERS_RESULT_RECOVERED;
3112}
3113
3114/**
3115 * e100_io_resume - resume normal operations
3116 * @pdev: Pointer to PCI device
3117 *
3118 * Resume normal operations after an error recovery
3119 * sequence has been completed.
3120 */
3121static void e100_io_resume(struct pci_dev *pdev)
3122{
3123 struct net_device *netdev = pci_get_drvdata(pdev);
3124 struct nic *nic = netdev_priv(netdev);
3125
3126 /* ack any pending wake events, disable PME */
3127 pci_enable_wake(pdev, 0, 0);
3128
3129 netif_device_attach(netdev);
3130 if (netif_running(netdev)) {
3131 e100_open(netdev);
3132 mod_timer(&nic->watchdog, jiffies);
3133 }
3134}
3135
3136static struct pci_error_handlers e100_err_handler = {
3137 .error_detected = e100_io_error_detected,
3138 .slot_reset = e100_io_slot_reset,
3139 .resume = e100_io_resume,
3140};
6bdacb1a 3141
1da177e4
LT
3142static struct pci_driver e100_driver = {
3143 .name = DRV_NAME,
3144 .id_table = e100_id_table,
3145 .probe = e100_probe,
3146 .remove = __devexit_p(e100_remove),
e8e82b76 3147#ifdef CONFIG_PM
975b366a 3148 /* Power Management hooks */
1da177e4
LT
3149 .suspend = e100_suspend,
3150 .resume = e100_resume,
3151#endif
05479938 3152 .shutdown = e100_shutdown,
2cc30492 3153 .err_handler = &e100_err_handler,
1da177e4
LT
3154};
3155
3156static int __init e100_init_module(void)
3157{
f26251eb 3158 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
fa05e1ad
JP
3159 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3160 pr_info("%s\n", DRV_COPYRIGHT);
1da177e4 3161 }
29917620 3162 return pci_register_driver(&e100_driver);
1da177e4
LT
3163}
3164
3165static void __exit e100_cleanup_module(void)
3166{
3167 pci_unregister_driver(&e100_driver);
3168}
3169
3170module_init(e100_init_module);
3171module_exit(e100_cleanup_module);
This page took 1.076929 seconds and 5 git commands to generate.