net: convert multicast list to list_head
[deliverable/linux.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
72001762
AM
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
1da177e4
LT
148 */
149
fa05e1ad
JP
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
1da177e4
LT
152#include <linux/module.h>
153#include <linux/moduleparam.h>
154#include <linux/kernel.h>
155#include <linux/types.h>
d43c36dc 156#include <linux/sched.h>
1da177e4
LT
157#include <linux/slab.h>
158#include <linux/delay.h>
159#include <linux/init.h>
160#include <linux/pci.h>
1e7f0bd8 161#include <linux/dma-mapping.h>
98468efd 162#include <linux/dmapool.h>
1da177e4
LT
163#include <linux/netdevice.h>
164#include <linux/etherdevice.h>
165#include <linux/mii.h>
166#include <linux/if_vlan.h>
167#include <linux/skbuff.h>
168#include <linux/ethtool.h>
169#include <linux/string.h>
9ac32e1b 170#include <linux/firmware.h>
1da177e4
LT
171#include <asm/unaligned.h>
172
173
174#define DRV_NAME "e100"
4e1dc97d 175#define DRV_EXT "-NAPI"
b55de80e 176#define DRV_VERSION "3.5.24-k2"DRV_EXT
1da177e4 177#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 178#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
179
180#define E100_WATCHDOG_PERIOD (2 * HZ)
181#define E100_NAPI_WEIGHT 16
182
9ac32e1b
JSR
183#define FIRMWARE_D101M "e100/d101m_ucode.bin"
184#define FIRMWARE_D101S "e100/d101s_ucode.bin"
185#define FIRMWARE_D102E "e100/d102e_ucode.bin"
186
1da177e4
LT
187MODULE_DESCRIPTION(DRV_DESCRIPTION);
188MODULE_AUTHOR(DRV_COPYRIGHT);
189MODULE_LICENSE("GPL");
190MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
191MODULE_FIRMWARE(FIRMWARE_D101M);
192MODULE_FIRMWARE(FIRMWARE_D101S);
193MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
194
195static int debug = 3;
8fb6f732 196static int eeprom_bad_csum_allow = 0;
27345bb6 197static int use_io = 0;
1da177e4 198module_param(debug, int, 0);
8fb6f732 199module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 200module_param(use_io, int, 0);
1da177e4 201MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 202MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 203MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
204
205#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
206 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
207 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
a3aa1884 208static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
1da177e4
LT
209 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
210 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
211 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
212 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
213 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
214 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
217 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
218 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
219 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
223 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
224 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
225 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
231 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
232 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
233 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
234 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
239 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
240 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
241 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
242 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
b55de80e 244 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
1da177e4
LT
245 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
246 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
247 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
248 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
249 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 250 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
251 { 0, }
252};
253MODULE_DEVICE_TABLE(pci, e100_id_table);
254
255enum mac {
256 mac_82557_D100_A = 0,
257 mac_82557_D100_B = 1,
258 mac_82557_D100_C = 2,
259 mac_82558_D101_A4 = 4,
260 mac_82558_D101_B0 = 5,
261 mac_82559_D101M = 8,
262 mac_82559_D101S = 9,
263 mac_82550_D102 = 12,
264 mac_82550_D102_C = 13,
265 mac_82551_E = 14,
266 mac_82551_F = 15,
267 mac_82551_10 = 16,
268 mac_unknown = 0xFF,
269};
270
271enum phy {
272 phy_100a = 0x000003E0,
273 phy_100c = 0x035002A8,
274 phy_82555_tx = 0x015002A8,
275 phy_nsc_tx = 0x5C002000,
276 phy_82562_et = 0x033002A8,
277 phy_82562_em = 0x032002A8,
278 phy_82562_ek = 0x031002A8,
279 phy_82562_eh = 0x017002A8,
b55de80e 280 phy_82552_v = 0xd061004d,
1da177e4
LT
281 phy_unknown = 0xFFFFFFFF,
282};
283
284/* CSR (Control/Status Registers) */
285struct csr {
286 struct {
287 u8 status;
288 u8 stat_ack;
289 u8 cmd_lo;
290 u8 cmd_hi;
291 u32 gen_ptr;
292 } scb;
293 u32 port;
294 u16 flash_ctrl;
295 u8 eeprom_ctrl_lo;
296 u8 eeprom_ctrl_hi;
297 u32 mdi_ctrl;
298 u32 rx_dma_count;
299};
300
301enum scb_status {
7734f6e6 302 rus_no_res = 0x08,
1da177e4
LT
303 rus_ready = 0x10,
304 rus_mask = 0x3C,
305};
306
ca93ca42
JG
307enum ru_state {
308 RU_SUSPENDED = 0,
309 RU_RUNNING = 1,
310 RU_UNINITIALIZED = -1,
311};
312
1da177e4
LT
313enum scb_stat_ack {
314 stat_ack_not_ours = 0x00,
315 stat_ack_sw_gen = 0x04,
316 stat_ack_rnr = 0x10,
317 stat_ack_cu_idle = 0x20,
318 stat_ack_frame_rx = 0x40,
319 stat_ack_cu_cmd_done = 0x80,
320 stat_ack_not_present = 0xFF,
321 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
322 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
323};
324
325enum scb_cmd_hi {
326 irq_mask_none = 0x00,
327 irq_mask_all = 0x01,
328 irq_sw_gen = 0x02,
329};
330
331enum scb_cmd_lo {
332 cuc_nop = 0x00,
333 ruc_start = 0x01,
334 ruc_load_base = 0x06,
335 cuc_start = 0x10,
336 cuc_resume = 0x20,
337 cuc_dump_addr = 0x40,
338 cuc_dump_stats = 0x50,
339 cuc_load_base = 0x60,
340 cuc_dump_reset = 0x70,
341};
342
343enum cuc_dump {
344 cuc_dump_complete = 0x0000A005,
345 cuc_dump_reset_complete = 0x0000A007,
346};
05479938 347
1da177e4
LT
348enum port {
349 software_reset = 0x0000,
350 selftest = 0x0001,
351 selective_reset = 0x0002,
352};
353
354enum eeprom_ctrl_lo {
355 eesk = 0x01,
356 eecs = 0x02,
357 eedi = 0x04,
358 eedo = 0x08,
359};
360
361enum mdi_ctrl {
362 mdi_write = 0x04000000,
363 mdi_read = 0x08000000,
364 mdi_ready = 0x10000000,
365};
366
367enum eeprom_op {
368 op_write = 0x05,
369 op_read = 0x06,
370 op_ewds = 0x10,
371 op_ewen = 0x13,
372};
373
374enum eeprom_offsets {
375 eeprom_cnfg_mdix = 0x03,
72001762 376 eeprom_phy_iface = 0x06,
1da177e4
LT
377 eeprom_id = 0x0A,
378 eeprom_config_asf = 0x0D,
379 eeprom_smbus_addr = 0x90,
380};
381
382enum eeprom_cnfg_mdix {
383 eeprom_mdix_enabled = 0x0080,
384};
385
72001762
AM
386enum eeprom_phy_iface {
387 NoSuchPhy = 0,
388 I82553AB,
389 I82553C,
390 I82503,
391 DP83840,
392 S80C240,
393 S80C24,
394 I82555,
395 DP83840A = 10,
396};
397
1da177e4
LT
398enum eeprom_id {
399 eeprom_id_wol = 0x0020,
400};
401
402enum eeprom_config_asf {
403 eeprom_asf = 0x8000,
404 eeprom_gcl = 0x4000,
405};
406
407enum cb_status {
408 cb_complete = 0x8000,
409 cb_ok = 0x2000,
410};
411
412enum cb_command {
413 cb_nop = 0x0000,
414 cb_iaaddr = 0x0001,
415 cb_config = 0x0002,
416 cb_multi = 0x0003,
417 cb_tx = 0x0004,
418 cb_ucode = 0x0005,
419 cb_dump = 0x0006,
420 cb_tx_sf = 0x0008,
421 cb_cid = 0x1f00,
422 cb_i = 0x2000,
423 cb_s = 0x4000,
424 cb_el = 0x8000,
425};
426
427struct rfd {
aaf918ba
AV
428 __le16 status;
429 __le16 command;
430 __le32 link;
431 __le32 rbd;
432 __le16 actual_size;
433 __le16 size;
1da177e4
LT
434};
435
436struct rx {
437 struct rx *next, *prev;
438 struct sk_buff *skb;
439 dma_addr_t dma_addr;
440};
441
442#if defined(__BIG_ENDIAN_BITFIELD)
443#define X(a,b) b,a
444#else
445#define X(a,b) a,b
446#endif
447struct config {
448/*0*/ u8 X(byte_count:6, pad0:2);
449/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
450/*2*/ u8 adaptive_ifs;
451/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
452 term_write_cache_line:1), pad3:4);
453/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
454/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
455/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
456 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
457 rx_discard_overruns:1), rx_save_bad_frames:1);
458/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
459 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
460 tx_dynamic_tbd:1);
461/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
462/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
463 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
464/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
465 loopback:2);
466/*11*/ u8 X(linear_priority:3, pad11:5);
467/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
468/*13*/ u8 ip_addr_lo;
469/*14*/ u8 ip_addr_hi;
470/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
471 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
472 pad15_2:1), crs_or_cdt:1);
473/*16*/ u8 fc_delay_lo;
474/*17*/ u8 fc_delay_hi;
475/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
476 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
477/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
478 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
479 full_duplex_force:1), full_duplex_pin:1);
480/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
481/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
482/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
483 u8 pad_d102[9];
484};
485
486#define E100_MAX_MULTICAST_ADDRS 64
487struct multi {
aaf918ba 488 __le16 count;
1da177e4
LT
489 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
490};
491
492/* Important: keep total struct u32-aligned */
493#define UCODE_SIZE 134
494struct cb {
aaf918ba
AV
495 __le16 status;
496 __le16 command;
497 __le32 link;
1da177e4
LT
498 union {
499 u8 iaaddr[ETH_ALEN];
aaf918ba 500 __le32 ucode[UCODE_SIZE];
1da177e4
LT
501 struct config config;
502 struct multi multi;
503 struct {
504 u32 tbd_array;
505 u16 tcb_byte_count;
506 u8 threshold;
507 u8 tbd_count;
508 struct {
aaf918ba
AV
509 __le32 buf_addr;
510 __le16 size;
1da177e4
LT
511 u16 eol;
512 } tbd;
513 } tcb;
aaf918ba 514 __le32 dump_buffer_addr;
1da177e4
LT
515 } u;
516 struct cb *next, *prev;
517 dma_addr_t dma_addr;
518 struct sk_buff *skb;
519};
520
521enum loopback {
522 lb_none = 0, lb_mac = 1, lb_phy = 3,
523};
524
525struct stats {
aaf918ba 526 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
527 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
528 tx_multiple_collisions, tx_total_collisions;
aaf918ba 529 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
530 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
531 rx_short_frame_errors;
aaf918ba
AV
532 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
533 __le16 xmt_tco_frames, rcv_tco_frames;
534 __le32 complete;
1da177e4
LT
535};
536
537struct mem {
538 struct {
539 u32 signature;
540 u32 result;
541 } selftest;
542 struct stats stats;
543 u8 dump_buf[596];
544};
545
546struct param_range {
547 u32 min;
548 u32 max;
549 u32 count;
550};
551
552struct params {
553 struct param_range rfds;
554 struct param_range cbs;
555};
556
557struct nic {
558 /* Begin: frequently used values: keep adjacent for cache effect */
559 u32 msg_enable ____cacheline_aligned;
560 struct net_device *netdev;
561 struct pci_dev *pdev;
72001762 562 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
1da177e4
LT
563
564 struct rx *rxs ____cacheline_aligned;
565 struct rx *rx_to_use;
566 struct rx *rx_to_clean;
567 struct rfd blank_rfd;
ca93ca42 568 enum ru_state ru_running;
1da177e4
LT
569
570 spinlock_t cb_lock ____cacheline_aligned;
571 spinlock_t cmd_lock;
572 struct csr __iomem *csr;
573 enum scb_cmd_lo cuc_cmd;
574 unsigned int cbs_avail;
bea3348e 575 struct napi_struct napi;
1da177e4
LT
576 struct cb *cbs;
577 struct cb *cb_to_use;
578 struct cb *cb_to_send;
579 struct cb *cb_to_clean;
aaf918ba 580 __le16 tx_command;
1da177e4
LT
581 /* End: frequently used values: keep adjacent for cache effect */
582
583 enum {
584 ich = (1 << 0),
585 promiscuous = (1 << 1),
586 multicast_all = (1 << 2),
587 wol_magic = (1 << 3),
588 ich_10h_workaround = (1 << 4),
589 } flags ____cacheline_aligned;
590
591 enum mac mac;
592 enum phy phy;
593 struct params params;
1da177e4
LT
594 struct timer_list watchdog;
595 struct timer_list blink_timer;
596 struct mii_if_info mii;
2acdb1e0 597 struct work_struct tx_timeout_task;
1da177e4
LT
598 enum loopback loopback;
599
600 struct mem *mem;
601 dma_addr_t dma_addr;
602
98468efd 603 struct pci_pool *cbs_pool;
1da177e4
LT
604 dma_addr_t cbs_dma_addr;
605 u8 adaptive_ifs;
606 u8 tx_threshold;
607 u32 tx_frames;
608 u32 tx_collisions;
609 u32 tx_deferred;
610 u32 tx_single_collisions;
611 u32 tx_multiple_collisions;
612 u32 tx_fc_pause;
613 u32 tx_tco_frames;
614
615 u32 rx_fc_pause;
616 u32 rx_fc_unsupported;
617 u32 rx_tco_frames;
618 u32 rx_over_length_errors;
619
1da177e4
LT
620 u16 leds;
621 u16 eeprom_wc;
aaf918ba 622 __le16 eeprom[256];
ac7c6669 623 spinlock_t mdio_lock;
7e15b0c9 624 const struct firmware *fw;
1da177e4
LT
625};
626
627static inline void e100_write_flush(struct nic *nic)
628{
629 /* Flush previous PCI writes through intermediate bridges
630 * by doing a benign read */
27345bb6 631 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
632}
633
858119e1 634static void e100_enable_irq(struct nic *nic)
1da177e4
LT
635{
636 unsigned long flags;
637
638 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 639 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 640 e100_write_flush(nic);
ad8c48ad 641 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
642}
643
858119e1 644static void e100_disable_irq(struct nic *nic)
1da177e4
LT
645{
646 unsigned long flags;
647
648 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 649 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 650 e100_write_flush(nic);
ad8c48ad 651 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
652}
653
654static void e100_hw_reset(struct nic *nic)
655{
656 /* Put CU and RU into idle with a selective reset to get
657 * device off of PCI bus */
27345bb6 658 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
659 e100_write_flush(nic); udelay(20);
660
661 /* Now fully reset device */
27345bb6 662 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
663 e100_write_flush(nic); udelay(20);
664
665 /* Mask off our interrupt line - it's unmasked after reset */
666 e100_disable_irq(nic);
667}
668
669static int e100_self_test(struct nic *nic)
670{
671 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
672
673 /* Passing the self-test is a pretty good indication
674 * that the device can DMA to/from host memory */
675
676 nic->mem->selftest.signature = 0;
677 nic->mem->selftest.result = 0xFFFFFFFF;
678
27345bb6 679 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
680 e100_write_flush(nic);
681 /* Wait 10 msec for self-test to complete */
682 msleep(10);
683
684 /* Interrupts are enabled after self-test */
685 e100_disable_irq(nic);
686
687 /* Check results of self-test */
f26251eb 688 if (nic->mem->selftest.result != 0) {
fa05e1ad
JP
689 netif_err(nic, hw, nic->netdev,
690 "Self-test failed: result=0x%08X\n",
691 nic->mem->selftest.result);
1da177e4
LT
692 return -ETIMEDOUT;
693 }
f26251eb 694 if (nic->mem->selftest.signature == 0) {
fa05e1ad 695 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
1da177e4
LT
696 return -ETIMEDOUT;
697 }
698
699 return 0;
700}
701
aaf918ba 702static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
703{
704 u32 cmd_addr_data[3];
705 u8 ctrl;
706 int i, j;
707
708 /* Three cmds: write/erase enable, write data, write/erase disable */
709 cmd_addr_data[0] = op_ewen << (addr_len - 2);
710 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 711 le16_to_cpu(data);
1da177e4
LT
712 cmd_addr_data[2] = op_ewds << (addr_len - 2);
713
714 /* Bit-bang cmds to write word to eeprom */
f26251eb 715 for (j = 0; j < 3; j++) {
1da177e4
LT
716
717 /* Chip select */
27345bb6 718 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
719 e100_write_flush(nic); udelay(4);
720
f26251eb 721 for (i = 31; i >= 0; i--) {
1da177e4
LT
722 ctrl = (cmd_addr_data[j] & (1 << i)) ?
723 eecs | eedi : eecs;
27345bb6 724 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
725 e100_write_flush(nic); udelay(4);
726
27345bb6 727 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
728 e100_write_flush(nic); udelay(4);
729 }
730 /* Wait 10 msec for cmd to complete */
731 msleep(10);
732
733 /* Chip deselect */
27345bb6 734 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
735 e100_write_flush(nic); udelay(4);
736 }
737};
738
739/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 740static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
741{
742 u32 cmd_addr_data;
743 u16 data = 0;
744 u8 ctrl;
745 int i;
746
747 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
748
749 /* Chip select */
27345bb6 750 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
751 e100_write_flush(nic); udelay(4);
752
753 /* Bit-bang to read word from eeprom */
f26251eb 754 for (i = 31; i >= 0; i--) {
1da177e4 755 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 756 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 757 e100_write_flush(nic); udelay(4);
05479938 758
27345bb6 759 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 760 e100_write_flush(nic); udelay(4);
05479938 761
1da177e4
LT
762 /* Eeprom drives a dummy zero to EEDO after receiving
763 * complete address. Use this to adjust addr_len. */
27345bb6 764 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 765 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
766 *addr_len -= (i - 16);
767 i = 17;
768 }
05479938 769
1da177e4
LT
770 data = (data << 1) | (ctrl & eedo ? 1 : 0);
771 }
772
773 /* Chip deselect */
27345bb6 774 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
775 e100_write_flush(nic); udelay(4);
776
aaf918ba 777 return cpu_to_le16(data);
1da177e4
LT
778};
779
780/* Load entire EEPROM image into driver cache and validate checksum */
781static int e100_eeprom_load(struct nic *nic)
782{
783 u16 addr, addr_len = 8, checksum = 0;
784
785 /* Try reading with an 8-bit addr len to discover actual addr len */
786 e100_eeprom_read(nic, &addr_len, 0);
787 nic->eeprom_wc = 1 << addr_len;
788
f26251eb 789 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 790 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 791 if (addr < nic->eeprom_wc - 1)
aaf918ba 792 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
793 }
794
795 /* The checksum, stored in the last word, is calculated such that
796 * the sum of words should be 0xBABA */
aaf918ba 797 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
fa05e1ad 798 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
8fb6f732
DM
799 if (!eeprom_bad_csum_allow)
800 return -EAGAIN;
1da177e4
LT
801 }
802
803 return 0;
804}
805
806/* Save (portion of) driver EEPROM cache to device and update checksum */
807static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
808{
809 u16 addr, addr_len = 8, checksum = 0;
810
811 /* Try reading with an 8-bit addr len to discover actual addr len */
812 e100_eeprom_read(nic, &addr_len, 0);
813 nic->eeprom_wc = 1 << addr_len;
814
f26251eb 815 if (start + count >= nic->eeprom_wc)
1da177e4
LT
816 return -EINVAL;
817
f26251eb 818 for (addr = start; addr < start + count; addr++)
1da177e4
LT
819 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
820
821 /* The checksum, stored in the last word, is calculated such that
822 * the sum of words should be 0xBABA */
f26251eb 823 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
824 checksum += le16_to_cpu(nic->eeprom[addr]);
825 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
826 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
827 nic->eeprom[nic->eeprom_wc - 1]);
828
829 return 0;
830}
831
962082b6 832#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 833#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 834static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
835{
836 unsigned long flags;
837 unsigned int i;
838 int err = 0;
839
840 spin_lock_irqsave(&nic->cmd_lock, flags);
841
842 /* Previous command is accepted when SCB clears */
f26251eb
BA
843 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
844 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
845 break;
846 cpu_relax();
f26251eb 847 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
848 udelay(5);
849 }
f26251eb 850 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
851 err = -EAGAIN;
852 goto err_unlock;
853 }
854
f26251eb 855 if (unlikely(cmd != cuc_resume))
27345bb6
JB
856 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
857 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
858
859err_unlock:
860 spin_unlock_irqrestore(&nic->cmd_lock, flags);
861
862 return err;
863}
864
858119e1 865static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
866 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
867{
868 struct cb *cb;
869 unsigned long flags;
870 int err = 0;
871
872 spin_lock_irqsave(&nic->cb_lock, flags);
873
f26251eb 874 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
875 err = -ENOMEM;
876 goto err_unlock;
877 }
878
879 cb = nic->cb_to_use;
880 nic->cb_to_use = cb->next;
881 nic->cbs_avail--;
882 cb->skb = skb;
883
f26251eb 884 if (unlikely(!nic->cbs_avail))
1da177e4
LT
885 err = -ENOSPC;
886
887 cb_prepare(nic, cb, skb);
888
889 /* Order is important otherwise we'll be in a race with h/w:
890 * set S-bit in current first, then clear S-bit in previous. */
891 cb->command |= cpu_to_le16(cb_s);
892 wmb();
893 cb->prev->command &= cpu_to_le16(~cb_s);
894
f26251eb
BA
895 while (nic->cb_to_send != nic->cb_to_use) {
896 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
897 nic->cb_to_send->dma_addr))) {
898 /* Ok, here's where things get sticky. It's
899 * possible that we can't schedule the command
900 * because the controller is too busy, so
901 * let's just queue the command and try again
902 * when another command is scheduled. */
f26251eb 903 if (err == -ENOSPC) {
962082b6
MC
904 //request a reset
905 schedule_work(&nic->tx_timeout_task);
906 }
1da177e4
LT
907 break;
908 } else {
909 nic->cuc_cmd = cuc_resume;
910 nic->cb_to_send = nic->cb_to_send->next;
911 }
912 }
913
914err_unlock:
915 spin_unlock_irqrestore(&nic->cb_lock, flags);
916
917 return err;
918}
919
72001762
AM
920static int mdio_read(struct net_device *netdev, int addr, int reg)
921{
922 struct nic *nic = netdev_priv(netdev);
923 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
924}
925
926static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
927{
928 struct nic *nic = netdev_priv(netdev);
929
930 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
931}
932
933/* the standard mdio_ctrl() function for usual MII-compliant hardware */
934static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
1da177e4
LT
935{
936 u32 data_out = 0;
937 unsigned int i;
ac7c6669 938 unsigned long flags;
1da177e4 939
ac7c6669
OM
940
941 /*
942 * Stratus87247: we shouldn't be writing the MDI control
943 * register until the Ready bit shows True. Also, since
944 * manipulation of the MDI control registers is a multi-step
945 * procedure it should be done under lock.
946 */
947 spin_lock_irqsave(&nic->mdio_lock, flags);
948 for (i = 100; i; --i) {
27345bb6 949 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
950 break;
951 udelay(20);
952 }
953 if (unlikely(!i)) {
fa05e1ad 954 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
ac7c6669
OM
955 spin_unlock_irqrestore(&nic->mdio_lock, flags);
956 return 0; /* No way to indicate timeout error */
957 }
27345bb6 958 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 959
ac7c6669 960 for (i = 0; i < 100; i++) {
1da177e4 961 udelay(20);
27345bb6 962 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
963 break;
964 }
ac7c6669 965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
fa05e1ad
JP
966 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
967 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
968 dir == mdi_read ? "READ" : "WRITE",
969 addr, reg, data, data_out);
1da177e4
LT
970 return (u16)data_out;
971}
972
72001762
AM
973/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
974static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
975 u32 addr,
976 u32 dir,
977 u32 reg,
978 u16 data)
979{
980 if ((reg == MII_BMCR) && (dir == mdi_write)) {
981 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
982 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
983 MII_ADVERTISE);
984
985 /*
986 * Workaround Si issue where sometimes the part will not
987 * autoneg to 100Mbps even when advertised.
988 */
989 if (advert & ADVERTISE_100FULL)
990 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
991 else if (advert & ADVERTISE_100HALF)
992 data |= BMCR_SPEED100;
993 }
994 }
995 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1da177e4
LT
996}
997
72001762
AM
998/* Fully software-emulated mdio_ctrl() function for cards without
999 * MII-compliant PHYs.
1000 * For now, this is mainly geared towards 80c24 support; in case of further
1001 * requirements for other types (i82503, ...?) either extend this mechanism
1002 * or split it, whichever is cleaner.
1003 */
1004static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1005 u32 addr,
1006 u32 dir,
1007 u32 reg,
1008 u16 data)
1009{
1010 /* might need to allocate a netdev_priv'ed register array eventually
1011 * to be able to record state changes, but for now
1012 * some fully hardcoded register handling ought to be ok I guess. */
1013
1014 if (dir == mdi_read) {
1015 switch (reg) {
1016 case MII_BMCR:
1017 /* Auto-negotiation, right? */
1018 return BMCR_ANENABLE |
1019 BMCR_FULLDPLX;
1020 case MII_BMSR:
1021 return BMSR_LSTATUS /* for mii_link_ok() */ |
1022 BMSR_ANEGCAPABLE |
1023 BMSR_10FULL;
1024 case MII_ADVERTISE:
1025 /* 80c24 is a "combo card" PHY, right? */
1026 return ADVERTISE_10HALF |
1027 ADVERTISE_10FULL;
1028 default:
fa05e1ad
JP
1029 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1030 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1031 dir == mdi_read ? "READ" : "WRITE",
1032 addr, reg, data);
72001762
AM
1033 return 0xFFFF;
1034 }
1035 } else {
1036 switch (reg) {
1037 default:
fa05e1ad
JP
1038 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1039 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1040 dir == mdi_read ? "READ" : "WRITE",
1041 addr, reg, data);
72001762
AM
1042 return 0xFFFF;
1043 }
b55de80e 1044 }
72001762
AM
1045}
1046static inline int e100_phy_supports_mii(struct nic *nic)
1047{
1048 /* for now, just check it by comparing whether we
1049 are using MII software emulation.
1050 */
1051 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1da177e4
LT
1052}
1053
1054static void e100_get_defaults(struct nic *nic)
1055{
2afecc04
JB
1056 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1057 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 1058
1da177e4 1059 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 1060 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 1061 if (nic->mac == mac_unknown)
1da177e4
LT
1062 nic->mac = mac_82557_D100_A;
1063
1064 nic->params.rfds = rfds;
1065 nic->params.cbs = cbs;
1066
1067 /* Quadwords to DMA into FIFO before starting frame transmit */
1068 nic->tx_threshold = 0xE0;
1069
0a0863af 1070 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
1071 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1072 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
1073
1074 /* Template for a freshly allocated RFD */
7734f6e6 1075 nic->blank_rfd.command = 0;
1172899a 1076 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1da177e4
LT
1077 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1078
1079 /* MII setup */
1080 nic->mii.phy_id_mask = 0x1F;
1081 nic->mii.reg_num_mask = 0x1F;
1082 nic->mii.dev = nic->netdev;
1083 nic->mii.mdio_read = mdio_read;
1084 nic->mii.mdio_write = mdio_write;
1085}
1086
1087static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1088{
1089 struct config *config = &cb->u.config;
1090 u8 *c = (u8 *)config;
1091
1092 cb->command = cpu_to_le16(cb_config);
1093
1094 memset(config, 0, sizeof(struct config));
1095
1096 config->byte_count = 0x16; /* bytes in this struct */
1097 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1098 config->direct_rx_dma = 0x1; /* reserved */
1099 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1100 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1101 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1102 config->tx_underrun_retry = 0x3; /* # of underrun retries */
72001762
AM
1103 if (e100_phy_supports_mii(nic))
1104 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1da177e4
LT
1105 config->pad10 = 0x6;
1106 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1107 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1108 config->ifs = 0x6; /* x16 = inter frame spacing */
1109 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1110 config->pad15_1 = 0x1;
1111 config->pad15_2 = 0x1;
1112 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1113 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1114 config->tx_padding = 0x1; /* 1=pad short frames */
1115 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1116 config->pad18 = 0x1;
1117 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1118 config->pad20_1 = 0x1F;
1119 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1120 config->pad21_1 = 0x5;
1121
1122 config->adaptive_ifs = nic->adaptive_ifs;
1123 config->loopback = nic->loopback;
1124
f26251eb 1125 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1126 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1127
f26251eb 1128 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1129 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1130 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1131 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1132 }
1133
f26251eb 1134 if (nic->flags & multicast_all)
1da177e4
LT
1135 config->multicast_all = 0x1; /* 1=accept, 0=no */
1136
6bdacb1a 1137 /* disable WoL when up */
f26251eb 1138 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1139 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1140
f26251eb 1141 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1142 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1143 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1144 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1145 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1146 if (nic->mac >= mac_82559_D101M) {
1da177e4 1147 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1148 /* Enable TCO in extended config */
1149 if (nic->mac >= mac_82551_10) {
1150 config->byte_count = 0x20; /* extended bytes */
1151 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1152 }
1153 } else {
1da177e4 1154 config->standard_stat_counter = 0x0;
44e4925e 1155 }
1da177e4
LT
1156 }
1157
fa05e1ad
JP
1158 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1159 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1160 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1161 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1162 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1163 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1164 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1165 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1166 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1da177e4
LT
1167}
1168
2afecc04
JB
1169/*************************************************************************
1170* CPUSaver parameters
1171*
1172* All CPUSaver parameters are 16-bit literals that are part of a
1173* "move immediate value" instruction. By changing the value of
1174* the literal in the instruction before the code is loaded, the
1175* driver can change the algorithm.
1176*
0779bf2d 1177* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1178* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1179* timer is reset each time a new packet is received. (see
1180* BUNDLEMAX below to set the limit on number of chained packets)
1181* The current default is 0x600 or 1536. Experiments show that
1182* the value should probably stay within the 0x200 - 0x1000.
1183*
05479938 1184* BUNDLEMAX -
2afecc04
JB
1185* This sets the maximum number of frames that will be bundled. In
1186* some situations, such as the TCP windowing algorithm, it may be
1187* better to limit the growth of the bundle size than let it go as
1188* high as it can, because that could cause too much added latency.
1189* The default is six, because this is the number of packets in the
1190* default TCP window size. A value of 1 would make CPUSaver indicate
1191* an interrupt for every frame received. If you do not want to put
1192* a limit on the bundle size, set this value to xFFFF.
1193*
05479938 1194* BUNDLESMALL -
2afecc04
JB
1195* This contains a bit-mask describing the minimum size frame that
1196* will be bundled. The default masks the lower 7 bits, which means
1197* that any frame less than 128 bytes in length will not be bundled,
1198* but will instead immediately generate an interrupt. This does
1199* not affect the current bundle in any way. Any frame that is 128
1200* bytes or large will be bundled normally. This feature is meant
1201* to provide immediate indication of ACK frames in a TCP environment.
1202* Customers were seeing poor performance when a machine with CPUSaver
1203* enabled was sending but not receiving. The delay introduced when
1204* the ACKs were received was enough to reduce total throughput, because
1205* the sender would sit idle until the ACK was finally seen.
1206*
1207* The current default is 0xFF80, which masks out the lower 7 bits.
1208* This means that any frame which is x7F (127) bytes or smaller
05479938 1209* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1210* bit mask, there are only a few valid values that can be used. To
1211* turn this feature off, the driver can write the value xFFFF to the
1212* lower word of this instruction (in the same way that the other
1213* parameters are used). Likewise, a value of 0xF800 (2047) would
1214* cause an interrupt to be generated for every frame, because all
1215* standard Ethernet frames are <= 2047 bytes in length.
1216*************************************************************************/
1217
05479938 1218/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1219 * workarounds it provides, set the following defines to:
1220 * BUNDLESMALL 0
1221 * BUNDLEMAX 1
1222 * INTDELAY 1
1223 */
1224#define BUNDLESMALL 1
1225#define BUNDLEMAX (u16)6
1226#define INTDELAY (u16)1536 /* 0x600 */
1227
9ac32e1b
JSR
1228/* Initialize firmware */
1229static const struct firmware *e100_request_firmware(struct nic *nic)
1230{
1231 const char *fw_name;
7e15b0c9 1232 const struct firmware *fw = nic->fw;
9ac32e1b 1233 u8 timer, bundle, min_size;
7e15b0c9 1234 int err = 0;
9ac32e1b 1235
2afecc04
JB
1236 /* do not load u-code for ICH devices */
1237 if (nic->flags & ich)
9ac32e1b 1238 return NULL;
2afecc04 1239
44c10138 1240 /* Search for ucode match against h/w revision */
9ac32e1b
JSR
1241 if (nic->mac == mac_82559_D101M)
1242 fw_name = FIRMWARE_D101M;
1243 else if (nic->mac == mac_82559_D101S)
1244 fw_name = FIRMWARE_D101S;
1245 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1246 fw_name = FIRMWARE_D102E;
1247 else /* No ucode on other devices */
1248 return NULL;
1249
7e15b0c9
DG
1250 /* If the firmware has not previously been loaded, request a pointer
1251 * to it. If it was previously loaded, we are reinitializing the
1252 * adapter, possibly in a resume from hibernate, in which case
1253 * request_firmware() cannot be used.
1254 */
1255 if (!fw)
1256 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1257
9ac32e1b 1258 if (err) {
fa05e1ad
JP
1259 netif_err(nic, probe, nic->netdev,
1260 "Failed to load firmware \"%s\": %d\n",
1261 fw_name, err);
9ac32e1b
JSR
1262 return ERR_PTR(err);
1263 }
7e15b0c9 1264
9ac32e1b
JSR
1265 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1266 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1267 if (fw->size != UCODE_SIZE * 4 + 3) {
fa05e1ad
JP
1268 netif_err(nic, probe, nic->netdev,
1269 "Firmware \"%s\" has wrong size %zu\n",
1270 fw_name, fw->size);
9ac32e1b
JSR
1271 release_firmware(fw);
1272 return ERR_PTR(-EINVAL);
2afecc04
JB
1273 }
1274
9ac32e1b
JSR
1275 /* Read timer, bundle and min_size from end of firmware blob */
1276 timer = fw->data[UCODE_SIZE * 4];
1277 bundle = fw->data[UCODE_SIZE * 4 + 1];
1278 min_size = fw->data[UCODE_SIZE * 4 + 2];
1279
1280 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1281 min_size >= UCODE_SIZE) {
fa05e1ad
JP
1282 netif_err(nic, probe, nic->netdev,
1283 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1284 fw_name, timer, bundle, min_size);
9ac32e1b
JSR
1285 release_firmware(fw);
1286 return ERR_PTR(-EINVAL);
1287 }
7e15b0c9
DG
1288
1289 /* OK, firmware is validated and ready to use. Save a pointer
1290 * to it in the nic */
1291 nic->fw = fw;
9ac32e1b 1292 return fw;
24180333
JB
1293}
1294
9ac32e1b
JSR
1295static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1296 struct sk_buff *skb)
24180333 1297{
9ac32e1b
JSR
1298 const struct firmware *fw = (void *)skb;
1299 u8 timer, bundle, min_size;
1300
1301 /* It's not a real skb; we just abused the fact that e100_exec_cb
1302 will pass it through to here... */
1303 cb->skb = NULL;
1304
1305 /* firmware is stored as little endian already */
1306 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1307
1308 /* Read timer, bundle and min_size from end of firmware blob */
1309 timer = fw->data[UCODE_SIZE * 4];
1310 bundle = fw->data[UCODE_SIZE * 4 + 1];
1311 min_size = fw->data[UCODE_SIZE * 4 + 2];
1312
1313 /* Insert user-tunable settings in cb->u.ucode */
1314 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1315 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1316 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1317 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1318 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1319 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1320
1321 cb->command = cpu_to_le16(cb_ucode | cb_el);
1322}
1323
1324static inline int e100_load_ucode_wait(struct nic *nic)
1325{
1326 const struct firmware *fw;
24180333
JB
1327 int err = 0, counter = 50;
1328 struct cb *cb = nic->cb_to_clean;
1329
9ac32e1b
JSR
1330 fw = e100_request_firmware(nic);
1331 /* If it's NULL, then no ucode is required */
1332 if (!fw || IS_ERR(fw))
1333 return PTR_ERR(fw);
1334
1335 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
fa05e1ad
JP
1336 netif_err(nic, probe, nic->netdev,
1337 "ucode cmd failed with error %d\n", err);
05479938 1338
24180333
JB
1339 /* must restart cuc */
1340 nic->cuc_cmd = cuc_start;
1341
1342 /* wait for completion */
1343 e100_write_flush(nic);
1344 udelay(10);
1345
1346 /* wait for possibly (ouch) 500ms */
1347 while (!(cb->status & cpu_to_le16(cb_complete))) {
1348 msleep(10);
1349 if (!--counter) break;
1350 }
05479938 1351
3a4fa0a2 1352 /* ack any interrupts, something could have been set */
27345bb6 1353 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1354
1355 /* if the command failed, or is not OK, notify and return */
1356 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
fa05e1ad 1357 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
24180333
JB
1358 err = -EPERM;
1359 }
05479938 1360
24180333 1361 return err;
1da177e4
LT
1362}
1363
1364static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1365 struct sk_buff *skb)
1366{
1367 cb->command = cpu_to_le16(cb_iaaddr);
1368 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1369}
1370
1371static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1372{
1373 cb->command = cpu_to_le16(cb_dump);
1374 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1375 offsetof(struct mem, dump_buf));
1376}
1377
72001762
AM
1378static int e100_phy_check_without_mii(struct nic *nic)
1379{
1380 u8 phy_type;
1381 int without_mii;
1382
1383 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1384
1385 switch (phy_type) {
1386 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1387 case I82503: /* Non-MII PHY; UNTESTED! */
1388 case S80C24: /* Non-MII PHY; tested and working */
1389 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1390 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1391 * doesn't have a programming interface of any sort. The
1392 * media is sensed automatically based on how the link partner
1393 * is configured. This is, in essence, manual configuration.
1394 */
fa05e1ad
JP
1395 netif_info(nic, probe, nic->netdev,
1396 "found MII-less i82503 or 80c24 or other PHY\n");
72001762
AM
1397
1398 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1399 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1400
1401 /* these might be needed for certain MII-less cards...
1402 * nic->flags |= ich;
1403 * nic->flags |= ich_10h_workaround; */
1404
1405 without_mii = 1;
1406 break;
1407 default:
1408 without_mii = 0;
1409 break;
1410 }
1411 return without_mii;
1412}
1413
1da177e4
LT
1414#define NCONFIG_AUTO_SWITCH 0x0080
1415#define MII_NSC_CONG MII_RESV1
1416#define NSC_CONG_ENABLE 0x0100
1417#define NSC_CONG_TXREADY 0x0400
1418#define ADVERTISE_FC_SUPPORTED 0x0400
1419static int e100_phy_init(struct nic *nic)
1420{
1421 struct net_device *netdev = nic->netdev;
1422 u32 addr;
1423 u16 bmcr, stat, id_lo, id_hi, cong;
1424
1425 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1426 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1427 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1428 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1429 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1430 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1431 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1432 break;
1433 }
72001762
AM
1434 if (addr == 32) {
1435 /* uhoh, no PHY detected: check whether we seem to be some
1436 * weird, rare variant which is *known* to not have any MII.
1437 * But do this AFTER MII checking only, since this does
1438 * lookup of EEPROM values which may easily be unreliable. */
1439 if (e100_phy_check_without_mii(nic))
1440 return 0; /* simply return and hope for the best */
1441 else {
1442 /* for unknown cases log a fatal error */
fa05e1ad
JP
1443 netif_err(nic, hw, nic->netdev,
1444 "Failed to locate any known PHY, aborting\n");
72001762
AM
1445 return -EAGAIN;
1446 }
1447 } else
fa05e1ad
JP
1448 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1449 "phy_addr = %d\n", nic->mii.phy_id);
1da177e4 1450
1da177e4
LT
1451 /* Get phy ID */
1452 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1453 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1454 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
fa05e1ad
JP
1455 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1456 "phy ID = 0x%08X\n", nic->phy);
1da177e4 1457
8fbd962e
BA
1458 /* Select the phy and isolate the rest */
1459 for (addr = 0; addr < 32; addr++) {
1460 if (addr != nic->mii.phy_id) {
1461 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1462 } else if (nic->phy != phy_82552_v) {
1463 bmcr = mdio_read(netdev, addr, MII_BMCR);
1464 mdio_write(netdev, addr, MII_BMCR,
1465 bmcr & ~BMCR_ISOLATE);
1466 }
1467 }
1468 /*
1469 * Workaround for 82552:
1470 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1471 * other phy_id's) using bmcr value from addr discovery loop above.
1472 */
1473 if (nic->phy == phy_82552_v)
1474 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1475 bmcr & ~BMCR_ISOLATE);
1476
1da177e4
LT
1477 /* Handle National tx phys */
1478#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1479 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1480 /* Disable congestion control */
1481 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1482 cong |= NSC_CONG_TXREADY;
1483 cong &= ~NSC_CONG_ENABLE;
1484 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1485 }
1486
b55de80e
BA
1487 if (nic->phy == phy_82552_v) {
1488 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1489
72001762
AM
1490 /* assign special tweaked mdio_ctrl() function */
1491 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1492
b55de80e
BA
1493 /* Workaround Si not advertising flow-control during autoneg */
1494 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1495 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1496
1497 /* Reset for the above changes to take effect */
1498 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1499 bmcr |= BMCR_RESET;
1500 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1501 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1502 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1503 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1504 /* enable/disable MDI/MDI-X auto-switching. */
1505 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1506 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1507 }
1da177e4
LT
1508
1509 return 0;
1510}
1511
1512static int e100_hw_init(struct nic *nic)
1513{
1514 int err;
1515
1516 e100_hw_reset(nic);
1517
fa05e1ad 1518 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
f26251eb 1519 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1520 return err;
1521
f26251eb 1522 if ((err = e100_phy_init(nic)))
1da177e4 1523 return err;
f26251eb 1524 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1525 return err;
f26251eb 1526 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1527 return err;
9ac32e1b 1528 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1529 return err;
f26251eb 1530 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1531 return err;
f26251eb 1532 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1533 return err;
f26251eb 1534 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1535 nic->dma_addr + offsetof(struct mem, stats))))
1536 return err;
f26251eb 1537 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1538 return err;
1539
1540 e100_disable_irq(nic);
1541
1542 return 0;
1543}
1544
1545static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1546{
1547 struct net_device *netdev = nic->netdev;
22bedad3 1548 struct netdev_hw_addr *ha;
4cd24eaf 1549 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1da177e4
LT
1550
1551 cb->command = cpu_to_le16(cb_multi);
1552 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
48e2f183 1553 i = 0;
22bedad3 1554 netdev_for_each_mc_addr(ha, netdev) {
48e2f183
JP
1555 if (i == count)
1556 break;
22bedad3 1557 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1da177e4 1558 ETH_ALEN);
48e2f183 1559 }
1da177e4
LT
1560}
1561
1562static void e100_set_multicast_list(struct net_device *netdev)
1563{
1564 struct nic *nic = netdev_priv(netdev);
1565
fa05e1ad
JP
1566 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1567 "mc_count=%d, flags=0x%04X\n",
1568 netdev_mc_count(netdev), netdev->flags);
1da177e4 1569
f26251eb 1570 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1571 nic->flags |= promiscuous;
1572 else
1573 nic->flags &= ~promiscuous;
1574
f26251eb 1575 if (netdev->flags & IFF_ALLMULTI ||
4cd24eaf 1576 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1da177e4
LT
1577 nic->flags |= multicast_all;
1578 else
1579 nic->flags &= ~multicast_all;
1580
1581 e100_exec_cb(nic, NULL, e100_configure);
1582 e100_exec_cb(nic, NULL, e100_multi);
1583}
1584
1585static void e100_update_stats(struct nic *nic)
1586{
09f75cd7
JG
1587 struct net_device *dev = nic->netdev;
1588 struct net_device_stats *ns = &dev->stats;
1da177e4 1589 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1590 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1591 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1592 &s->complete;
1593
1594 /* Device's stats reporting may take several microseconds to
0a0863af 1595 * complete, so we're always waiting for results of the
1da177e4
LT
1596 * previous command. */
1597
f26251eb 1598 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1599 *complete = 0;
1600 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1601 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1602 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1603 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1604 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1605 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1606 ns->collisions += nic->tx_collisions;
1607 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1608 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1609 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1610 nic->rx_over_length_errors;
1611 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1612 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1613 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1614 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1615 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1616 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1617 le32_to_cpu(s->rx_alignment_errors) +
1618 le32_to_cpu(s->rx_short_frame_errors) +
1619 le32_to_cpu(s->rx_cdt_errors);
1620 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1621 nic->tx_single_collisions +=
1622 le32_to_cpu(s->tx_single_collisions);
1623 nic->tx_multiple_collisions +=
1624 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1625 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1626 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1627 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1628 nic->rx_fc_unsupported +=
1629 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1630 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1631 nic->tx_tco_frames +=
1632 le16_to_cpu(s->xmt_tco_frames);
1633 nic->rx_tco_frames +=
1634 le16_to_cpu(s->rcv_tco_frames);
1635 }
1636 }
1637 }
1638
05479938 1639
f26251eb 1640 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
fa05e1ad
JP
1641 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1642 "exec cuc_dump_reset failed\n");
1da177e4
LT
1643}
1644
1645static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1646{
1647 /* Adjust inter-frame-spacing (IFS) between two transmits if
1648 * we're getting collisions on a half-duplex connection. */
1649
f26251eb 1650 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1651 u32 prev = nic->adaptive_ifs;
1652 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1653
f26251eb 1654 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1655 (nic->tx_frames > min_frames)) {
f26251eb 1656 if (nic->adaptive_ifs < 60)
1da177e4
LT
1657 nic->adaptive_ifs += 5;
1658 } else if (nic->tx_frames < min_frames) {
f26251eb 1659 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1660 nic->adaptive_ifs -= 5;
1661 }
f26251eb 1662 if (nic->adaptive_ifs != prev)
1da177e4
LT
1663 e100_exec_cb(nic, NULL, e100_configure);
1664 }
1665}
1666
1667static void e100_watchdog(unsigned long data)
1668{
1669 struct nic *nic = (struct nic *)data;
1670 struct ethtool_cmd cmd;
1671
fa05e1ad
JP
1672 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1673 "right now = %ld\n", jiffies);
1da177e4
LT
1674
1675 /* mii library handles link maintenance tasks */
1676
1677 mii_ethtool_gset(&nic->mii, &cmd);
1678
f26251eb 1679 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
fa05e1ad
JP
1680 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1681 cmd.speed == SPEED_100 ? 100 : 10,
1682 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1683 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
fa05e1ad 1684 netdev_info(nic->netdev, "NIC Link is Down\n");
1da177e4
LT
1685 }
1686
1687 mii_check_link(&nic->mii);
1688
1689 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1690 * allocation failure.
1691 * Unfortunately have to use a spinlock to not re-enable interrupts
1692 * accidentally, due to hardware that shares a register between the
1693 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1694 spin_lock_irq(&nic->cmd_lock);
27345bb6 1695 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1696 e100_write_flush(nic);
ad8c48ad 1697 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1698
1699 e100_update_stats(nic);
1700 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1701
f26251eb 1702 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1703 /* Issue a multicast command to workaround a 557 lock up */
1704 e100_set_multicast_list(nic->netdev);
1705
f26251eb 1706 if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1da177e4
LT
1707 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1708 nic->flags |= ich_10h_workaround;
1709 else
1710 nic->flags &= ~ich_10h_workaround;
1711
34c6417b
SH
1712 mod_timer(&nic->watchdog,
1713 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1714}
1715
858119e1 1716static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1717 struct sk_buff *skb)
1718{
1719 cb->command = nic->tx_command;
962082b6 1720 /* interrupt every 16 packets regardless of delay */
f26251eb 1721 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1722 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1723 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1724 cb->u.tcb.tcb_byte_count = 0;
1725 cb->u.tcb.threshold = nic->tx_threshold;
1726 cb->u.tcb.tbd_count = 1;
1727 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1728 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1729 /* check for mapping failure? */
1da177e4
LT
1730 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1731}
1732
3b29a56d
SH
1733static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1734 struct net_device *netdev)
1da177e4
LT
1735{
1736 struct nic *nic = netdev_priv(netdev);
1737 int err;
1738
f26251eb 1739 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1740 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1741 Issue a NOP command followed by a 1us delay before
1742 issuing the Tx command. */
f26251eb 1743 if (e100_exec_cmd(nic, cuc_nop, 0))
fa05e1ad
JP
1744 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1745 "exec cuc_nop failed\n");
1da177e4
LT
1746 udelay(1);
1747 }
1748
1749 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1750
f26251eb 1751 switch (err) {
1da177e4
LT
1752 case -ENOSPC:
1753 /* We queued the skb, but now we're out of space. */
fa05e1ad
JP
1754 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1755 "No space for CB\n");
1da177e4
LT
1756 netif_stop_queue(netdev);
1757 break;
1758 case -ENOMEM:
1759 /* This is a hard error - log it. */
fa05e1ad
JP
1760 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1761 "Out of Tx resources, returning skb\n");
1da177e4 1762 netif_stop_queue(netdev);
5b548140 1763 return NETDEV_TX_BUSY;
1da177e4
LT
1764 }
1765
1766 netdev->trans_start = jiffies;
6ed10654 1767 return NETDEV_TX_OK;
1da177e4
LT
1768}
1769
858119e1 1770static int e100_tx_clean(struct nic *nic)
1da177e4 1771{
09f75cd7 1772 struct net_device *dev = nic->netdev;
1da177e4
LT
1773 struct cb *cb;
1774 int tx_cleaned = 0;
1775
1776 spin_lock(&nic->cb_lock);
1777
1da177e4 1778 /* Clean CBs marked complete */
f26251eb 1779 for (cb = nic->cb_to_clean;
1da177e4
LT
1780 cb->status & cpu_to_le16(cb_complete);
1781 cb = nic->cb_to_clean = cb->next) {
fa05e1ad
JP
1782 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1783 "cb[%d]->status = 0x%04X\n",
1784 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1785 cb->status);
dc45010e 1786
f26251eb 1787 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1788 dev->stats.tx_packets++;
1789 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1790
1791 pci_unmap_single(nic->pdev,
1792 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1793 le16_to_cpu(cb->u.tcb.tbd.size),
1794 PCI_DMA_TODEVICE);
1795 dev_kfree_skb_any(cb->skb);
1796 cb->skb = NULL;
1797 tx_cleaned = 1;
1798 }
1799 cb->status = 0;
1800 nic->cbs_avail++;
1801 }
1802
1803 spin_unlock(&nic->cb_lock);
1804
1805 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1806 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1807 netif_wake_queue(nic->netdev);
1808
1809 return tx_cleaned;
1810}
1811
1812static void e100_clean_cbs(struct nic *nic)
1813{
f26251eb
BA
1814 if (nic->cbs) {
1815 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1816 struct cb *cb = nic->cb_to_clean;
f26251eb 1817 if (cb->skb) {
1da177e4
LT
1818 pci_unmap_single(nic->pdev,
1819 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1820 le16_to_cpu(cb->u.tcb.tbd.size),
1821 PCI_DMA_TODEVICE);
1822 dev_kfree_skb(cb->skb);
1823 }
1824 nic->cb_to_clean = nic->cb_to_clean->next;
1825 nic->cbs_avail++;
1826 }
98468efd 1827 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1da177e4
LT
1828 nic->cbs = NULL;
1829 nic->cbs_avail = 0;
1830 }
1831 nic->cuc_cmd = cuc_start;
1832 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1833 nic->cbs;
1834}
1835
1836static int e100_alloc_cbs(struct nic *nic)
1837{
1838 struct cb *cb;
1839 unsigned int i, count = nic->params.cbs.count;
1840
1841 nic->cuc_cmd = cuc_start;
1842 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1843 nic->cbs_avail = 0;
1844
98468efd
RO
1845 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1846 &nic->cbs_dma_addr);
f26251eb 1847 if (!nic->cbs)
1da177e4 1848 return -ENOMEM;
70abc8cb 1849 memset(nic->cbs, 0, count * sizeof(struct cb));
1da177e4 1850
f26251eb 1851 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1852 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1853 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1854
1855 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1856 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1857 ((i+1) % count) * sizeof(struct cb));
1da177e4
LT
1858 }
1859
1860 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1861 nic->cbs_avail = count;
1862
1863 return 0;
1864}
1865
ca93ca42 1866static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1867{
f26251eb
BA
1868 if (!nic->rxs) return;
1869 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1870
1871 /* handle init time starts */
f26251eb 1872 if (!rx) rx = nic->rxs;
ca93ca42
JG
1873
1874 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1875 if (rx->skb) {
ca93ca42
JG
1876 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1877 nic->ru_running = RU_RUNNING;
1878 }
1da177e4
LT
1879}
1880
1881#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1882static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1883{
89d71a66 1884 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1da177e4
LT
1885 return -ENOMEM;
1886
89d71a66 1887 /* Init, and map the RFD. */
27d7ff46 1888 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1889 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1890 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1891
8d8bb39b 1892 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1893 dev_kfree_skb_any(rx->skb);
097688ef 1894 rx->skb = NULL;
1f53367d
MC
1895 rx->dma_addr = 0;
1896 return -ENOMEM;
1897 }
1898
1da177e4 1899 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1900 * this one. We are safe to touch the previous RFD because
1901 * it is protected by the before last buffer's el bit being set */
aaf918ba 1902 if (rx->prev->skb) {
1da177e4 1903 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1904 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1905 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1906 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1907 }
1908
1909 return 0;
1910}
1911
858119e1 1912static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1913 unsigned int *work_done, unsigned int work_to_do)
1914{
09f75cd7 1915 struct net_device *dev = nic->netdev;
1da177e4
LT
1916 struct sk_buff *skb = rx->skb;
1917 struct rfd *rfd = (struct rfd *)skb->data;
1918 u16 rfd_status, actual_size;
1919
f26251eb 1920 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1921 return -EAGAIN;
1922
1923 /* Need to sync before taking a peek at cb_complete bit */
1924 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1925 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1926 rfd_status = le16_to_cpu(rfd->status);
1927
fa05e1ad
JP
1928 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1929 "status=0x%04X\n", rfd_status);
1da177e4
LT
1930
1931 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1932 if (unlikely(!(rfd_status & cb_complete))) {
1933 /* If the next buffer has the el bit, but we think the receiver
1934 * is still running, check to see if it really stopped while
1935 * we had interrupts off.
1936 * This allows for a fast restart without re-enabling
1937 * interrupts */
1938 if ((le16_to_cpu(rfd->command) & cb_el) &&
1939 (RU_RUNNING == nic->ru_running))
1940
17393dd6 1941 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1942 nic->ru_running = RU_SUSPENDED;
303d67c2
KH
1943 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1944 sizeof(struct rfd),
6ff9c2e7 1945 PCI_DMA_FROMDEVICE);
1f53367d 1946 return -ENODATA;
7734f6e6 1947 }
1da177e4
LT
1948
1949 /* Get actual data size */
1950 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 1951 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
1952 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1953
1954 /* Get data */
1955 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1956 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 1957
7734f6e6
DA
1958 /* If this buffer has the el bit, but we think the receiver
1959 * is still running, check to see if it really stopped while
1960 * we had interrupts off.
1961 * This allows for a fast restart without re-enabling interrupts.
1962 * This can happen when the RU sees the size change but also sees
1963 * the el bit set. */
1964 if ((le16_to_cpu(rfd->command) & cb_el) &&
1965 (RU_RUNNING == nic->ru_running)) {
1966
17393dd6 1967 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 1968 nic->ru_running = RU_SUSPENDED;
7734f6e6 1969 }
ca93ca42 1970
1da177e4
LT
1971 /* Pull off the RFD and put the actual data (minus eth hdr) */
1972 skb_reserve(skb, sizeof(struct rfd));
1973 skb_put(skb, actual_size);
1974 skb->protocol = eth_type_trans(skb, nic->netdev);
1975
f26251eb 1976 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 1977 /* Don't indicate if hardware indicates errors */
1da177e4 1978 dev_kfree_skb_any(skb);
f26251eb 1979 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1980 /* Don't indicate oversized frames */
1981 nic->rx_over_length_errors++;
1da177e4
LT
1982 dev_kfree_skb_any(skb);
1983 } else {
09f75cd7
JG
1984 dev->stats.rx_packets++;
1985 dev->stats.rx_bytes += actual_size;
1da177e4 1986 netif_receive_skb(skb);
f26251eb 1987 if (work_done)
1da177e4
LT
1988 (*work_done)++;
1989 }
1990
1991 rx->skb = NULL;
1992
1993 return 0;
1994}
1995
858119e1 1996static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1997 unsigned int work_to_do)
1998{
1999 struct rx *rx;
7734f6e6
DA
2000 int restart_required = 0, err = 0;
2001 struct rx *old_before_last_rx, *new_before_last_rx;
2002 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
2003
2004 /* Indicate newly arrived packets */
f26251eb 2005 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
2006 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2007 /* Hit quota or no more to clean */
2008 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 2009 break;
1da177e4
LT
2010 }
2011
7734f6e6
DA
2012
2013 /* On EAGAIN, hit quota so have more work to do, restart once
2014 * cleanup is complete.
2015 * Else, are we already rnr? then pay attention!!! this ensures that
2016 * the state machine progression never allows a start with a
2017 * partially cleaned list, avoiding a race between hardware
2018 * and rx_to_clean when in NAPI mode */
2019 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2020 restart_required = 1;
2021
2022 old_before_last_rx = nic->rx_to_use->prev->prev;
2023 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 2024
1da177e4 2025 /* Alloc new skbs to refill list */
f26251eb
BA
2026 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2027 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
2028 break; /* Better luck next time (see watchdog) */
2029 }
ca93ca42 2030
7734f6e6
DA
2031 new_before_last_rx = nic->rx_to_use->prev->prev;
2032 if (new_before_last_rx != old_before_last_rx) {
2033 /* Set the el-bit on the buffer that is before the last buffer.
2034 * This lets us update the next pointer on the last buffer
2035 * without worrying about hardware touching it.
2036 * We set the size to 0 to prevent hardware from touching this
2037 * buffer.
2038 * When the hardware hits the before last buffer with el-bit
2039 * and size of 0, it will RNR interrupt, the RUS will go into
2040 * the No Resources state. It will not complete nor write to
2041 * this buffer. */
2042 new_before_last_rfd =
2043 (struct rfd *)new_before_last_rx->skb->data;
2044 new_before_last_rfd->size = 0;
2045 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2046 pci_dma_sync_single_for_device(nic->pdev,
2047 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2048 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2049
2050 /* Now that we have a new stopping point, we can clear the old
2051 * stopping point. We must sync twice to get the proper
2052 * ordering on the hardware side of things. */
2053 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2054 pci_dma_sync_single_for_device(nic->pdev,
2055 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2056 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2057 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
2058 pci_dma_sync_single_for_device(nic->pdev,
2059 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2060 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2061 }
2062
f26251eb 2063 if (restart_required) {
ca93ca42 2064 // ack the rnr?
915e91d7 2065 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 2066 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 2067 if (work_done)
ca93ca42
JG
2068 (*work_done)++;
2069 }
1da177e4
LT
2070}
2071
2072static void e100_rx_clean_list(struct nic *nic)
2073{
2074 struct rx *rx;
2075 unsigned int i, count = nic->params.rfds.count;
2076
ca93ca42
JG
2077 nic->ru_running = RU_UNINITIALIZED;
2078
f26251eb
BA
2079 if (nic->rxs) {
2080 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2081 if (rx->skb) {
1da177e4 2082 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2083 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2084 dev_kfree_skb(rx->skb);
2085 }
2086 }
2087 kfree(nic->rxs);
2088 nic->rxs = NULL;
2089 }
2090
2091 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
2092}
2093
2094static int e100_rx_alloc_list(struct nic *nic)
2095{
2096 struct rx *rx;
2097 unsigned int i, count = nic->params.rfds.count;
7734f6e6 2098 struct rfd *before_last;
1da177e4
LT
2099
2100 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 2101 nic->ru_running = RU_UNINITIALIZED;
1da177e4 2102
f26251eb 2103 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2104 return -ENOMEM;
1da177e4 2105
f26251eb 2106 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
2107 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2108 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 2109 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
2110 e100_rx_clean_list(nic);
2111 return -ENOMEM;
2112 }
2113 }
7734f6e6
DA
2114 /* Set the el-bit on the buffer that is before the last buffer.
2115 * This lets us update the next pointer on the last buffer without
2116 * worrying about hardware touching it.
2117 * We set the size to 0 to prevent hardware from touching this buffer.
2118 * When the hardware hits the before last buffer with el-bit and size
2119 * of 0, it will RNR interrupt, the RU will go into the No Resources
2120 * state. It will not complete nor write to this buffer. */
2121 rx = nic->rxs->prev->prev;
2122 before_last = (struct rfd *)rx->skb->data;
2123 before_last->command |= cpu_to_le16(cb_el);
2124 before_last->size = 0;
2125 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 2126 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2127
2128 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2129 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2130
2131 return 0;
2132}
2133
7d12e780 2134static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2135{
2136 struct net_device *netdev = dev_id;
2137 struct nic *nic = netdev_priv(netdev);
27345bb6 2138 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4 2139
fa05e1ad
JP
2140 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2141 "stat_ack = 0x%02X\n", stat_ack);
1da177e4 2142
f26251eb 2143 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
2144 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2145 return IRQ_NONE;
2146
2147 /* Ack interrupt(s) */
27345bb6 2148 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2149
ca93ca42 2150 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 2151 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
2152 nic->ru_running = RU_SUSPENDED;
2153
288379f0 2154 if (likely(napi_schedule_prep(&nic->napi))) {
0685c31b 2155 e100_disable_irq(nic);
288379f0 2156 __napi_schedule(&nic->napi);
0685c31b 2157 }
1da177e4
LT
2158
2159 return IRQ_HANDLED;
2160}
2161
bea3348e 2162static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2163{
bea3348e 2164 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 2165 unsigned int work_done = 0;
1da177e4 2166
bea3348e 2167 e100_rx_clean(nic, &work_done, budget);
53e52c72 2168 e100_tx_clean(nic);
1da177e4 2169
53e52c72
DM
2170 /* If budget not fully consumed, exit the polling mode */
2171 if (work_done < budget) {
288379f0 2172 napi_complete(napi);
1da177e4 2173 e100_enable_irq(nic);
1da177e4
LT
2174 }
2175
bea3348e 2176 return work_done;
1da177e4
LT
2177}
2178
2179#ifdef CONFIG_NET_POLL_CONTROLLER
2180static void e100_netpoll(struct net_device *netdev)
2181{
2182 struct nic *nic = netdev_priv(netdev);
611494dc 2183
1da177e4 2184 e100_disable_irq(nic);
7d12e780 2185 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2186 e100_tx_clean(nic);
2187 e100_enable_irq(nic);
2188}
2189#endif
2190
1da177e4
LT
2191static int e100_set_mac_address(struct net_device *netdev, void *p)
2192{
2193 struct nic *nic = netdev_priv(netdev);
2194 struct sockaddr *addr = p;
2195
2196 if (!is_valid_ether_addr(addr->sa_data))
2197 return -EADDRNOTAVAIL;
2198
2199 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2200 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2201
2202 return 0;
2203}
2204
2205static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2206{
f26251eb 2207 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2208 return -EINVAL;
2209 netdev->mtu = new_mtu;
2210 return 0;
2211}
2212
2213static int e100_asf(struct nic *nic)
2214{
2215 /* ASF can be enabled from eeprom */
2216 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2217 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2218 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2219 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2220}
2221
2222static int e100_up(struct nic *nic)
2223{
2224 int err;
2225
f26251eb 2226 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2227 return err;
f26251eb 2228 if ((err = e100_alloc_cbs(nic)))
1da177e4 2229 goto err_rx_clean_list;
f26251eb 2230 if ((err = e100_hw_init(nic)))
1da177e4
LT
2231 goto err_clean_cbs;
2232 e100_set_multicast_list(nic->netdev);
ca93ca42 2233 e100_start_receiver(nic, NULL);
1da177e4 2234 mod_timer(&nic->watchdog, jiffies);
f26251eb 2235 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2236 nic->netdev->name, nic->netdev)))
2237 goto err_no_irq;
1da177e4 2238 netif_wake_queue(nic->netdev);
bea3348e 2239 napi_enable(&nic->napi);
0236ebb7
MC
2240 /* enable ints _after_ enabling poll, preventing a race between
2241 * disable ints+schedule */
2242 e100_enable_irq(nic);
1da177e4
LT
2243 return 0;
2244
2245err_no_irq:
2246 del_timer_sync(&nic->watchdog);
2247err_clean_cbs:
2248 e100_clean_cbs(nic);
2249err_rx_clean_list:
2250 e100_rx_clean_list(nic);
2251 return err;
2252}
2253
2254static void e100_down(struct nic *nic)
2255{
0236ebb7 2256 /* wait here for poll to complete */
bea3348e 2257 napi_disable(&nic->napi);
0236ebb7 2258 netif_stop_queue(nic->netdev);
1da177e4
LT
2259 e100_hw_reset(nic);
2260 free_irq(nic->pdev->irq, nic->netdev);
2261 del_timer_sync(&nic->watchdog);
2262 netif_carrier_off(nic->netdev);
1da177e4
LT
2263 e100_clean_cbs(nic);
2264 e100_rx_clean_list(nic);
2265}
2266
2267static void e100_tx_timeout(struct net_device *netdev)
2268{
2269 struct nic *nic = netdev_priv(netdev);
2270
05479938 2271 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2272 * in interrupt context */
2273 schedule_work(&nic->tx_timeout_task);
2274}
2275
c4028958 2276static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2277{
c4028958
DH
2278 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2279 struct net_device *netdev = nic->netdev;
2acdb1e0 2280
fa05e1ad
JP
2281 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2282 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
1da177e4
LT
2283 e100_down(netdev_priv(netdev));
2284 e100_up(netdev_priv(netdev));
2285}
2286
2287static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2288{
2289 int err;
2290 struct sk_buff *skb;
2291
2292 /* Use driver resources to perform internal MAC or PHY
2293 * loopback test. A single packet is prepared and transmitted
2294 * in loopback mode, and the test passes if the received
2295 * packet compares byte-for-byte to the transmitted packet. */
2296
f26251eb 2297 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2298 return err;
f26251eb 2299 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2300 goto err_clean_rx;
2301
2302 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2303 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2304 loopback_mode = lb_mac;
2305
2306 nic->loopback = loopback_mode;
f26251eb 2307 if ((err = e100_hw_init(nic)))
1da177e4
LT
2308 goto err_loopback_none;
2309
f26251eb 2310 if (loopback_mode == lb_phy)
1da177e4
LT
2311 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2312 BMCR_LOOPBACK);
2313
ca93ca42 2314 e100_start_receiver(nic, NULL);
1da177e4 2315
f26251eb 2316 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2317 err = -ENOMEM;
2318 goto err_loopback_none;
2319 }
2320 skb_put(skb, ETH_DATA_LEN);
2321 memset(skb->data, 0xFF, ETH_DATA_LEN);
2322 e100_xmit_frame(skb, nic->netdev);
2323
2324 msleep(10);
2325
aa49cdd9 2326 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2327 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2328
f26251eb 2329 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2330 skb->data, ETH_DATA_LEN))
2331 err = -EAGAIN;
2332
2333err_loopback_none:
2334 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2335 nic->loopback = lb_none;
1da177e4 2336 e100_clean_cbs(nic);
aa49cdd9 2337 e100_hw_reset(nic);
1da177e4
LT
2338err_clean_rx:
2339 e100_rx_clean_list(nic);
2340 return err;
2341}
2342
2343#define MII_LED_CONTROL 0x1B
b55de80e
BA
2344#define E100_82552_LED_OVERRIDE 0x19
2345#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2346#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
1da177e4
LT
2347static void e100_blink_led(unsigned long data)
2348{
2349 struct nic *nic = (struct nic *)data;
2350 enum led_state {
2351 led_on = 0x01,
2352 led_off = 0x04,
2353 led_on_559 = 0x05,
2354 led_on_557 = 0x07,
2355 };
b55de80e
BA
2356 u16 led_reg = MII_LED_CONTROL;
2357
2358 if (nic->phy == phy_82552_v) {
2359 led_reg = E100_82552_LED_OVERRIDE;
1da177e4 2360
b55de80e
BA
2361 nic->leds = (nic->leds == E100_82552_LED_ON) ?
2362 E100_82552_LED_OFF : E100_82552_LED_ON;
2363 } else {
2364 nic->leds = (nic->leds & led_on) ? led_off :
2365 (nic->mac < mac_82559_D101M) ? led_on_557 :
2366 led_on_559;
2367 }
2368 mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
1da177e4
LT
2369 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2370}
2371
2372static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2373{
2374 struct nic *nic = netdev_priv(netdev);
2375 return mii_ethtool_gset(&nic->mii, cmd);
2376}
2377
2378static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2379{
2380 struct nic *nic = netdev_priv(netdev);
2381 int err;
2382
2383 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2384 err = mii_ethtool_sset(&nic->mii, cmd);
2385 e100_exec_cb(nic, NULL, e100_configure);
2386
2387 return err;
2388}
2389
2390static void e100_get_drvinfo(struct net_device *netdev,
2391 struct ethtool_drvinfo *info)
2392{
2393 struct nic *nic = netdev_priv(netdev);
2394 strcpy(info->driver, DRV_NAME);
2395 strcpy(info->version, DRV_VERSION);
2396 strcpy(info->fw_version, "N/A");
2397 strcpy(info->bus_info, pci_name(nic->pdev));
2398}
2399
abf9b902 2400#define E100_PHY_REGS 0x1C
1da177e4
LT
2401static int e100_get_regs_len(struct net_device *netdev)
2402{
2403 struct nic *nic = netdev_priv(netdev);
abf9b902 2404 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2405}
2406
2407static void e100_get_regs(struct net_device *netdev,
2408 struct ethtool_regs *regs, void *p)
2409{
2410 struct nic *nic = netdev_priv(netdev);
2411 u32 *buff = p;
2412 int i;
2413
44c10138 2414 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2415 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2416 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2417 ioread16(&nic->csr->scb.status);
f26251eb 2418 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2419 buff[1 + E100_PHY_REGS - i] =
2420 mdio_read(netdev, nic->mii.phy_id, i);
2421 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2422 e100_exec_cb(nic, NULL, e100_dump);
2423 msleep(10);
2424 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2425 sizeof(nic->mem->dump_buf));
2426}
2427
2428static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2429{
2430 struct nic *nic = netdev_priv(netdev);
2431 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2432 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2433}
2434
2435static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2436{
2437 struct nic *nic = netdev_priv(netdev);
2438
bc79fc84
RW
2439 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2440 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2441 return -EOPNOTSUPP;
2442
f26251eb 2443 if (wol->wolopts)
1da177e4
LT
2444 nic->flags |= wol_magic;
2445 else
2446 nic->flags &= ~wol_magic;
2447
bc79fc84
RW
2448 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2449
1da177e4
LT
2450 e100_exec_cb(nic, NULL, e100_configure);
2451
2452 return 0;
2453}
2454
2455static u32 e100_get_msglevel(struct net_device *netdev)
2456{
2457 struct nic *nic = netdev_priv(netdev);
2458 return nic->msg_enable;
2459}
2460
2461static void e100_set_msglevel(struct net_device *netdev, u32 value)
2462{
2463 struct nic *nic = netdev_priv(netdev);
2464 nic->msg_enable = value;
2465}
2466
2467static int e100_nway_reset(struct net_device *netdev)
2468{
2469 struct nic *nic = netdev_priv(netdev);
2470 return mii_nway_restart(&nic->mii);
2471}
2472
2473static u32 e100_get_link(struct net_device *netdev)
2474{
2475 struct nic *nic = netdev_priv(netdev);
2476 return mii_link_ok(&nic->mii);
2477}
2478
2479static int e100_get_eeprom_len(struct net_device *netdev)
2480{
2481 struct nic *nic = netdev_priv(netdev);
2482 return nic->eeprom_wc << 1;
2483}
2484
2485#define E100_EEPROM_MAGIC 0x1234
2486static int e100_get_eeprom(struct net_device *netdev,
2487 struct ethtool_eeprom *eeprom, u8 *bytes)
2488{
2489 struct nic *nic = netdev_priv(netdev);
2490
2491 eeprom->magic = E100_EEPROM_MAGIC;
2492 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2493
2494 return 0;
2495}
2496
2497static int e100_set_eeprom(struct net_device *netdev,
2498 struct ethtool_eeprom *eeprom, u8 *bytes)
2499{
2500 struct nic *nic = netdev_priv(netdev);
2501
f26251eb 2502 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2503 return -EINVAL;
2504
2505 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2506
2507 return e100_eeprom_save(nic, eeprom->offset >> 1,
2508 (eeprom->len >> 1) + 1);
2509}
2510
2511static void e100_get_ringparam(struct net_device *netdev,
2512 struct ethtool_ringparam *ring)
2513{
2514 struct nic *nic = netdev_priv(netdev);
2515 struct param_range *rfds = &nic->params.rfds;
2516 struct param_range *cbs = &nic->params.cbs;
2517
2518 ring->rx_max_pending = rfds->max;
2519 ring->tx_max_pending = cbs->max;
2520 ring->rx_mini_max_pending = 0;
2521 ring->rx_jumbo_max_pending = 0;
2522 ring->rx_pending = rfds->count;
2523 ring->tx_pending = cbs->count;
2524 ring->rx_mini_pending = 0;
2525 ring->rx_jumbo_pending = 0;
2526}
2527
2528static int e100_set_ringparam(struct net_device *netdev,
2529 struct ethtool_ringparam *ring)
2530{
2531 struct nic *nic = netdev_priv(netdev);
2532 struct param_range *rfds = &nic->params.rfds;
2533 struct param_range *cbs = &nic->params.cbs;
2534
05479938 2535 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2536 return -EINVAL;
2537
f26251eb 2538 if (netif_running(netdev))
1da177e4
LT
2539 e100_down(nic);
2540 rfds->count = max(ring->rx_pending, rfds->min);
2541 rfds->count = min(rfds->count, rfds->max);
2542 cbs->count = max(ring->tx_pending, cbs->min);
2543 cbs->count = min(cbs->count, cbs->max);
fa05e1ad
JP
2544 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2545 rfds->count, cbs->count);
f26251eb 2546 if (netif_running(netdev))
1da177e4
LT
2547 e100_up(nic);
2548
2549 return 0;
2550}
2551
2552static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2553 "Link test (on/offline)",
2554 "Eeprom test (on/offline)",
2555 "Self test (offline)",
2556 "Mac loopback (offline)",
2557 "Phy loopback (offline)",
2558};
4c3616cd 2559#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2560
1da177e4
LT
2561static void e100_diag_test(struct net_device *netdev,
2562 struct ethtool_test *test, u64 *data)
2563{
2564 struct ethtool_cmd cmd;
2565 struct nic *nic = netdev_priv(netdev);
2566 int i, err;
2567
2568 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2569 data[0] = !mii_link_ok(&nic->mii);
2570 data[1] = e100_eeprom_load(nic);
f26251eb 2571 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2572
2573 /* save speed, duplex & autoneg settings */
2574 err = mii_ethtool_gset(&nic->mii, &cmd);
2575
f26251eb 2576 if (netif_running(netdev))
1da177e4
LT
2577 e100_down(nic);
2578 data[2] = e100_self_test(nic);
2579 data[3] = e100_loopback_test(nic, lb_mac);
2580 data[4] = e100_loopback_test(nic, lb_phy);
2581
2582 /* restore speed, duplex & autoneg settings */
2583 err = mii_ethtool_sset(&nic->mii, &cmd);
2584
f26251eb 2585 if (netif_running(netdev))
1da177e4
LT
2586 e100_up(nic);
2587 }
f26251eb 2588 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2589 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2590
2591 msleep_interruptible(4 * 1000);
1da177e4
LT
2592}
2593
2594static int e100_phys_id(struct net_device *netdev, u32 data)
2595{
2596 struct nic *nic = netdev_priv(netdev);
b55de80e
BA
2597 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2598 MII_LED_CONTROL;
1da177e4 2599
f26251eb 2600 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1da177e4
LT
2601 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2602 mod_timer(&nic->blink_timer, jiffies);
2603 msleep_interruptible(data * 1000);
2604 del_timer_sync(&nic->blink_timer);
b55de80e 2605 mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
1da177e4
LT
2606
2607 return 0;
2608}
2609
2610static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2611 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2612 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2613 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2614 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2615 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2616 "tx_heartbeat_errors", "tx_window_errors",
2617 /* device-specific stats */
2618 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2619 "tx_flow_control_pause", "rx_flow_control_pause",
2620 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2621};
2622#define E100_NET_STATS_LEN 21
4c3616cd 2623#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2624
b9f2c044 2625static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2626{
b9f2c044
JG
2627 switch (sset) {
2628 case ETH_SS_TEST:
2629 return E100_TEST_LEN;
2630 case ETH_SS_STATS:
2631 return E100_STATS_LEN;
2632 default:
2633 return -EOPNOTSUPP;
2634 }
1da177e4
LT
2635}
2636
2637static void e100_get_ethtool_stats(struct net_device *netdev,
2638 struct ethtool_stats *stats, u64 *data)
2639{
2640 struct nic *nic = netdev_priv(netdev);
2641 int i;
2642
f26251eb 2643 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2644 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2645
2646 data[i++] = nic->tx_deferred;
2647 data[i++] = nic->tx_single_collisions;
2648 data[i++] = nic->tx_multiple_collisions;
2649 data[i++] = nic->tx_fc_pause;
2650 data[i++] = nic->rx_fc_pause;
2651 data[i++] = nic->rx_fc_unsupported;
2652 data[i++] = nic->tx_tco_frames;
2653 data[i++] = nic->rx_tco_frames;
2654}
2655
2656static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2657{
f26251eb 2658 switch (stringset) {
1da177e4
LT
2659 case ETH_SS_TEST:
2660 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2661 break;
2662 case ETH_SS_STATS:
2663 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2664 break;
2665 }
2666}
2667
7282d491 2668static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2669 .get_settings = e100_get_settings,
2670 .set_settings = e100_set_settings,
2671 .get_drvinfo = e100_get_drvinfo,
2672 .get_regs_len = e100_get_regs_len,
2673 .get_regs = e100_get_regs,
2674 .get_wol = e100_get_wol,
2675 .set_wol = e100_set_wol,
2676 .get_msglevel = e100_get_msglevel,
2677 .set_msglevel = e100_set_msglevel,
2678 .nway_reset = e100_nway_reset,
2679 .get_link = e100_get_link,
2680 .get_eeprom_len = e100_get_eeprom_len,
2681 .get_eeprom = e100_get_eeprom,
2682 .set_eeprom = e100_set_eeprom,
2683 .get_ringparam = e100_get_ringparam,
2684 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2685 .self_test = e100_diag_test,
2686 .get_strings = e100_get_strings,
2687 .phys_id = e100_phys_id,
1da177e4 2688 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2689 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2690};
2691
2692static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2693{
2694 struct nic *nic = netdev_priv(netdev);
2695
2696 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2697}
2698
2699static int e100_alloc(struct nic *nic)
2700{
2701 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2702 &nic->dma_addr);
2703 return nic->mem ? 0 : -ENOMEM;
2704}
2705
2706static void e100_free(struct nic *nic)
2707{
f26251eb 2708 if (nic->mem) {
1da177e4
LT
2709 pci_free_consistent(nic->pdev, sizeof(struct mem),
2710 nic->mem, nic->dma_addr);
2711 nic->mem = NULL;
2712 }
2713}
2714
2715static int e100_open(struct net_device *netdev)
2716{
2717 struct nic *nic = netdev_priv(netdev);
2718 int err = 0;
2719
2720 netif_carrier_off(netdev);
f26251eb 2721 if ((err = e100_up(nic)))
fa05e1ad 2722 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
1da177e4
LT
2723 return err;
2724}
2725
2726static int e100_close(struct net_device *netdev)
2727{
2728 e100_down(netdev_priv(netdev));
2729 return 0;
2730}
2731
acc78426
SH
2732static const struct net_device_ops e100_netdev_ops = {
2733 .ndo_open = e100_open,
2734 .ndo_stop = e100_close,
00829823 2735 .ndo_start_xmit = e100_xmit_frame,
acc78426
SH
2736 .ndo_validate_addr = eth_validate_addr,
2737 .ndo_set_multicast_list = e100_set_multicast_list,
2738 .ndo_set_mac_address = e100_set_mac_address,
2739 .ndo_change_mtu = e100_change_mtu,
2740 .ndo_do_ioctl = e100_do_ioctl,
2741 .ndo_tx_timeout = e100_tx_timeout,
2742#ifdef CONFIG_NET_POLL_CONTROLLER
2743 .ndo_poll_controller = e100_netpoll,
2744#endif
2745};
2746
1da177e4
LT
2747static int __devinit e100_probe(struct pci_dev *pdev,
2748 const struct pci_device_id *ent)
2749{
2750 struct net_device *netdev;
2751 struct nic *nic;
2752 int err;
2753
f26251eb
BA
2754 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2755 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
fa05e1ad 2756 pr_err("Etherdev alloc failed, aborting\n");
1da177e4
LT
2757 return -ENOMEM;
2758 }
2759
acc78426 2760 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2761 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2762 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2763 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2764
2765 nic = netdev_priv(netdev);
bea3348e 2766 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2767 nic->netdev = netdev;
2768 nic->pdev = pdev;
2769 nic->msg_enable = (1 << debug) - 1;
72001762 2770 nic->mdio_ctrl = mdio_ctrl_hw;
1da177e4
LT
2771 pci_set_drvdata(pdev, netdev);
2772
f26251eb 2773 if ((err = pci_enable_device(pdev))) {
fa05e1ad 2774 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
2775 goto err_out_free_dev;
2776 }
2777
f26251eb 2778 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
fa05e1ad 2779 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
1da177e4
LT
2780 err = -ENODEV;
2781 goto err_out_disable_pdev;
2782 }
2783
f26251eb 2784 if ((err = pci_request_regions(pdev, DRV_NAME))) {
fa05e1ad 2785 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
2786 goto err_out_disable_pdev;
2787 }
2788
284901a9 2789 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
fa05e1ad 2790 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
1da177e4
LT
2791 goto err_out_free_res;
2792 }
2793
1da177e4
LT
2794 SET_NETDEV_DEV(netdev, &pdev->dev);
2795
27345bb6 2796 if (use_io)
fa05e1ad 2797 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
27345bb6
JB
2798
2799 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2800 if (!nic->csr) {
fa05e1ad 2801 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
1da177e4
LT
2802 err = -ENOMEM;
2803 goto err_out_free_res;
2804 }
2805
f26251eb 2806 if (ent->driver_data)
1da177e4
LT
2807 nic->flags |= ich;
2808 else
2809 nic->flags &= ~ich;
2810
2811 e100_get_defaults(nic);
2812
1f53367d 2813 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2814 spin_lock_init(&nic->cb_lock);
2815 spin_lock_init(&nic->cmd_lock);
ac7c6669 2816 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2817
2818 /* Reset the device before pci_set_master() in case device is in some
2819 * funky state and has an interrupt pending - hint: we don't have the
2820 * interrupt handler registered yet. */
2821 e100_hw_reset(nic);
2822
2823 pci_set_master(pdev);
2824
2825 init_timer(&nic->watchdog);
2826 nic->watchdog.function = e100_watchdog;
2827 nic->watchdog.data = (unsigned long)nic;
2828 init_timer(&nic->blink_timer);
2829 nic->blink_timer.function = e100_blink_led;
2830 nic->blink_timer.data = (unsigned long)nic;
2831
c4028958 2832 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2833
f26251eb 2834 if ((err = e100_alloc(nic))) {
fa05e1ad 2835 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
1da177e4
LT
2836 goto err_out_iounmap;
2837 }
2838
f26251eb 2839 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2840 goto err_out_free;
2841
f92d8728
MC
2842 e100_phy_init(nic);
2843
1da177e4 2844 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2845 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2846 if (!is_valid_ether_addr(netdev->perm_addr)) {
2847 if (!eeprom_bad_csum_allow) {
fa05e1ad 2848 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
948cd43f
JB
2849 err = -EAGAIN;
2850 goto err_out_free;
2851 } else {
fa05e1ad 2852 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
948cd43f 2853 }
1da177e4
LT
2854 }
2855
2856 /* Wol magic packet can be enabled from eeprom */
f26251eb 2857 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2858 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2859 nic->flags |= wol_magic;
bc79fc84
RW
2860 device_set_wakeup_enable(&pdev->dev, true);
2861 }
1da177e4 2862
6bdacb1a 2863 /* ack any pending wake events, disable PME */
e7272403 2864 pci_pme_active(pdev, false);
1da177e4
LT
2865
2866 strcpy(netdev->name, "eth%d");
f26251eb 2867 if ((err = register_netdev(netdev))) {
fa05e1ad 2868 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
1da177e4
LT
2869 goto err_out_free;
2870 }
98468efd
RO
2871 nic->cbs_pool = pci_pool_create(netdev->name,
2872 nic->pdev,
211a0d94 2873 nic->params.cbs.max * sizeof(struct cb),
98468efd
RO
2874 sizeof(u32),
2875 0);
fa05e1ad
JP
2876 netif_info(nic, probe, nic->netdev,
2877 "addr 0x%llx, irq %d, MAC addr %pM\n",
2878 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2879 pdev->irq, netdev->dev_addr);
1da177e4
LT
2880
2881 return 0;
2882
2883err_out_free:
2884 e100_free(nic);
2885err_out_iounmap:
27345bb6 2886 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2887err_out_free_res:
2888 pci_release_regions(pdev);
2889err_out_disable_pdev:
2890 pci_disable_device(pdev);
2891err_out_free_dev:
2892 pci_set_drvdata(pdev, NULL);
2893 free_netdev(netdev);
2894 return err;
2895}
2896
2897static void __devexit e100_remove(struct pci_dev *pdev)
2898{
2899 struct net_device *netdev = pci_get_drvdata(pdev);
2900
f26251eb 2901 if (netdev) {
1da177e4
LT
2902 struct nic *nic = netdev_priv(netdev);
2903 unregister_netdev(netdev);
2904 e100_free(nic);
915e91d7 2905 pci_iounmap(pdev, nic->csr);
98468efd 2906 pci_pool_destroy(nic->cbs_pool);
1da177e4
LT
2907 free_netdev(netdev);
2908 pci_release_regions(pdev);
2909 pci_disable_device(pdev);
2910 pci_set_drvdata(pdev, NULL);
2911 }
2912}
2913
b55de80e
BA
2914#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
2915#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2916#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
ac7c992c 2917static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
2918{
2919 struct net_device *netdev = pci_get_drvdata(pdev);
2920 struct nic *nic = netdev_priv(netdev);
2921
824545e7 2922 if (netif_running(netdev))
f902283b 2923 e100_down(nic);
518d8338 2924 netif_device_detach(netdev);
a53a33da 2925
1da177e4 2926 pci_save_state(pdev);
e8e82b76
AK
2927
2928 if ((nic->flags & wol_magic) | e100_asf(nic)) {
b55de80e
BA
2929 /* enable reverse auto-negotiation */
2930 if (nic->phy == phy_82552_v) {
2931 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2932 E100_82552_SMARTSPEED);
2933
2934 mdio_write(netdev, nic->mii.phy_id,
2935 E100_82552_SMARTSPEED, smartspeed |
2936 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
2937 }
ac7c992c 2938 *enable_wake = true;
e8e82b76 2939 } else {
ac7c992c 2940 *enable_wake = false;
e8e82b76 2941 }
975b366a 2942
8543da66 2943 pci_disable_device(pdev);
ac7c992c 2944}
1da177e4 2945
ac7c992c
TLSC
2946static int __e100_power_off(struct pci_dev *pdev, bool wake)
2947{
6905b1f1 2948 if (wake)
ac7c992c 2949 return pci_prepare_to_sleep(pdev);
6905b1f1
RW
2950
2951 pci_wake_from_d3(pdev, false);
2952 pci_set_power_state(pdev, PCI_D3hot);
2953
2954 return 0;
1da177e4
LT
2955}
2956
f902283b 2957#ifdef CONFIG_PM
ac7c992c
TLSC
2958static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2959{
2960 bool wake;
2961 __e100_shutdown(pdev, &wake);
2962 return __e100_power_off(pdev, wake);
2963}
2964
1da177e4
LT
2965static int e100_resume(struct pci_dev *pdev)
2966{
2967 struct net_device *netdev = pci_get_drvdata(pdev);
2968 struct nic *nic = netdev_priv(netdev);
2969
975b366a 2970 pci_set_power_state(pdev, PCI_D0);
1da177e4 2971 pci_restore_state(pdev);
6bdacb1a 2972 /* ack any pending wake events, disable PME */
975b366a 2973 pci_enable_wake(pdev, 0, 0);
1da177e4 2974
4b512d26 2975 /* disable reverse auto-negotiation */
b55de80e
BA
2976 if (nic->phy == phy_82552_v) {
2977 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2978 E100_82552_SMARTSPEED);
2979
2980 mdio_write(netdev, nic->mii.phy_id,
2981 E100_82552_SMARTSPEED,
2982 smartspeed & ~(E100_82552_REV_ANEG));
2983 }
2984
1da177e4 2985 netif_device_attach(netdev);
975b366a 2986 if (netif_running(netdev))
1da177e4
LT
2987 e100_up(nic);
2988
2989 return 0;
2990}
975b366a 2991#endif /* CONFIG_PM */
1da177e4 2992
d18c3db5 2993static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2994{
ac7c992c
TLSC
2995 bool wake;
2996 __e100_shutdown(pdev, &wake);
2997 if (system_state == SYSTEM_POWER_OFF)
2998 __e100_power_off(pdev, wake);
6bdacb1a
MC
2999}
3000
2cc30492
AK
3001/* ------------------ PCI Error Recovery infrastructure -------------- */
3002/**
3003 * e100_io_error_detected - called when PCI error is detected.
3004 * @pdev: Pointer to PCI device
0a0863af 3005 * @state: The current pci connection state
2cc30492
AK
3006 */
3007static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3008{
3009 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 3010 struct nic *nic = netdev_priv(netdev);
2cc30492 3011
2cc30492 3012 netif_device_detach(netdev);
ef681ce1
AD
3013
3014 if (state == pci_channel_io_perm_failure)
3015 return PCI_ERS_RESULT_DISCONNECT;
3016
3017 if (netif_running(netdev))
3018 e100_down(nic);
b1d26f24 3019 pci_disable_device(pdev);
2cc30492
AK
3020
3021 /* Request a slot reset. */
3022 return PCI_ERS_RESULT_NEED_RESET;
3023}
3024
3025/**
3026 * e100_io_slot_reset - called after the pci bus has been reset.
3027 * @pdev: Pointer to PCI device
3028 *
3029 * Restart the card from scratch.
3030 */
3031static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3032{
3033 struct net_device *netdev = pci_get_drvdata(pdev);
3034 struct nic *nic = netdev_priv(netdev);
3035
3036 if (pci_enable_device(pdev)) {
fa05e1ad 3037 pr_err("Cannot re-enable PCI device after reset\n");
2cc30492
AK
3038 return PCI_ERS_RESULT_DISCONNECT;
3039 }
3040 pci_set_master(pdev);
3041
3042 /* Only one device per card can do a reset */
3043 if (0 != PCI_FUNC(pdev->devfn))
3044 return PCI_ERS_RESULT_RECOVERED;
3045 e100_hw_reset(nic);
3046 e100_phy_init(nic);
3047
3048 return PCI_ERS_RESULT_RECOVERED;
3049}
3050
3051/**
3052 * e100_io_resume - resume normal operations
3053 * @pdev: Pointer to PCI device
3054 *
3055 * Resume normal operations after an error recovery
3056 * sequence has been completed.
3057 */
3058static void e100_io_resume(struct pci_dev *pdev)
3059{
3060 struct net_device *netdev = pci_get_drvdata(pdev);
3061 struct nic *nic = netdev_priv(netdev);
3062
3063 /* ack any pending wake events, disable PME */
3064 pci_enable_wake(pdev, 0, 0);
3065
3066 netif_device_attach(netdev);
3067 if (netif_running(netdev)) {
3068 e100_open(netdev);
3069 mod_timer(&nic->watchdog, jiffies);
3070 }
3071}
3072
3073static struct pci_error_handlers e100_err_handler = {
3074 .error_detected = e100_io_error_detected,
3075 .slot_reset = e100_io_slot_reset,
3076 .resume = e100_io_resume,
3077};
6bdacb1a 3078
1da177e4
LT
3079static struct pci_driver e100_driver = {
3080 .name = DRV_NAME,
3081 .id_table = e100_id_table,
3082 .probe = e100_probe,
3083 .remove = __devexit_p(e100_remove),
e8e82b76 3084#ifdef CONFIG_PM
975b366a 3085 /* Power Management hooks */
1da177e4
LT
3086 .suspend = e100_suspend,
3087 .resume = e100_resume,
3088#endif
05479938 3089 .shutdown = e100_shutdown,
2cc30492 3090 .err_handler = &e100_err_handler,
1da177e4
LT
3091};
3092
3093static int __init e100_init_module(void)
3094{
f26251eb 3095 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
fa05e1ad
JP
3096 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3097 pr_info("%s\n", DRV_COPYRIGHT);
1da177e4 3098 }
29917620 3099 return pci_register_driver(&e100_driver);
1da177e4
LT
3100}
3101
3102static void __exit e100_cleanup_module(void)
3103{
3104 pci_unregister_driver(&e100_driver);
3105}
3106
3107module_init(e100_init_module);
3108module_exit(e100_cleanup_module);
This page took 1.031713 seconds and 5 git commands to generate.