headers: remove sched.h from interrupt.h
[deliverable/linux.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
72001762
AM
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
1da177e4
LT
148 */
149
1da177e4
LT
150#include <linux/module.h>
151#include <linux/moduleparam.h>
152#include <linux/kernel.h>
153#include <linux/types.h>
d43c36dc 154#include <linux/sched.h>
1da177e4
LT
155#include <linux/slab.h>
156#include <linux/delay.h>
157#include <linux/init.h>
158#include <linux/pci.h>
1e7f0bd8 159#include <linux/dma-mapping.h>
1da177e4
LT
160#include <linux/netdevice.h>
161#include <linux/etherdevice.h>
162#include <linux/mii.h>
163#include <linux/if_vlan.h>
164#include <linux/skbuff.h>
165#include <linux/ethtool.h>
166#include <linux/string.h>
9ac32e1b 167#include <linux/firmware.h>
1da177e4
LT
168#include <asm/unaligned.h>
169
170
171#define DRV_NAME "e100"
4e1dc97d 172#define DRV_EXT "-NAPI"
b55de80e 173#define DRV_VERSION "3.5.24-k2"DRV_EXT
1da177e4 174#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 175#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
176#define PFX DRV_NAME ": "
177
178#define E100_WATCHDOG_PERIOD (2 * HZ)
179#define E100_NAPI_WEIGHT 16
180
9ac32e1b
JSR
181#define FIRMWARE_D101M "e100/d101m_ucode.bin"
182#define FIRMWARE_D101S "e100/d101s_ucode.bin"
183#define FIRMWARE_D102E "e100/d102e_ucode.bin"
184
1da177e4
LT
185MODULE_DESCRIPTION(DRV_DESCRIPTION);
186MODULE_AUTHOR(DRV_COPYRIGHT);
187MODULE_LICENSE("GPL");
188MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
189MODULE_FIRMWARE(FIRMWARE_D101M);
190MODULE_FIRMWARE(FIRMWARE_D101S);
191MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
192
193static int debug = 3;
8fb6f732 194static int eeprom_bad_csum_allow = 0;
27345bb6 195static int use_io = 0;
1da177e4 196module_param(debug, int, 0);
8fb6f732 197module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 198module_param(use_io, int, 0);
1da177e4 199MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 200MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 201MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
202#define DPRINTK(nlevel, klevel, fmt, args...) \
203 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
204 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
b39d66a8 205 __func__ , ## args))
1da177e4
LT
206
207#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
208 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
209 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
210static struct pci_device_id e100_id_table[] = {
211 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
212 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
214 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
219 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
225 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
233 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
234 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
241 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
242 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
b55de80e 246 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
1da177e4
LT
247 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
248 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
250 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 252 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
253 { 0, }
254};
255MODULE_DEVICE_TABLE(pci, e100_id_table);
256
257enum mac {
258 mac_82557_D100_A = 0,
259 mac_82557_D100_B = 1,
260 mac_82557_D100_C = 2,
261 mac_82558_D101_A4 = 4,
262 mac_82558_D101_B0 = 5,
263 mac_82559_D101M = 8,
264 mac_82559_D101S = 9,
265 mac_82550_D102 = 12,
266 mac_82550_D102_C = 13,
267 mac_82551_E = 14,
268 mac_82551_F = 15,
269 mac_82551_10 = 16,
270 mac_unknown = 0xFF,
271};
272
273enum phy {
274 phy_100a = 0x000003E0,
275 phy_100c = 0x035002A8,
276 phy_82555_tx = 0x015002A8,
277 phy_nsc_tx = 0x5C002000,
278 phy_82562_et = 0x033002A8,
279 phy_82562_em = 0x032002A8,
280 phy_82562_ek = 0x031002A8,
281 phy_82562_eh = 0x017002A8,
b55de80e 282 phy_82552_v = 0xd061004d,
1da177e4
LT
283 phy_unknown = 0xFFFFFFFF,
284};
285
286/* CSR (Control/Status Registers) */
287struct csr {
288 struct {
289 u8 status;
290 u8 stat_ack;
291 u8 cmd_lo;
292 u8 cmd_hi;
293 u32 gen_ptr;
294 } scb;
295 u32 port;
296 u16 flash_ctrl;
297 u8 eeprom_ctrl_lo;
298 u8 eeprom_ctrl_hi;
299 u32 mdi_ctrl;
300 u32 rx_dma_count;
301};
302
303enum scb_status {
7734f6e6 304 rus_no_res = 0x08,
1da177e4
LT
305 rus_ready = 0x10,
306 rus_mask = 0x3C,
307};
308
ca93ca42
JG
309enum ru_state {
310 RU_SUSPENDED = 0,
311 RU_RUNNING = 1,
312 RU_UNINITIALIZED = -1,
313};
314
1da177e4
LT
315enum scb_stat_ack {
316 stat_ack_not_ours = 0x00,
317 stat_ack_sw_gen = 0x04,
318 stat_ack_rnr = 0x10,
319 stat_ack_cu_idle = 0x20,
320 stat_ack_frame_rx = 0x40,
321 stat_ack_cu_cmd_done = 0x80,
322 stat_ack_not_present = 0xFF,
323 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
324 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
325};
326
327enum scb_cmd_hi {
328 irq_mask_none = 0x00,
329 irq_mask_all = 0x01,
330 irq_sw_gen = 0x02,
331};
332
333enum scb_cmd_lo {
334 cuc_nop = 0x00,
335 ruc_start = 0x01,
336 ruc_load_base = 0x06,
337 cuc_start = 0x10,
338 cuc_resume = 0x20,
339 cuc_dump_addr = 0x40,
340 cuc_dump_stats = 0x50,
341 cuc_load_base = 0x60,
342 cuc_dump_reset = 0x70,
343};
344
345enum cuc_dump {
346 cuc_dump_complete = 0x0000A005,
347 cuc_dump_reset_complete = 0x0000A007,
348};
05479938 349
1da177e4
LT
350enum port {
351 software_reset = 0x0000,
352 selftest = 0x0001,
353 selective_reset = 0x0002,
354};
355
356enum eeprom_ctrl_lo {
357 eesk = 0x01,
358 eecs = 0x02,
359 eedi = 0x04,
360 eedo = 0x08,
361};
362
363enum mdi_ctrl {
364 mdi_write = 0x04000000,
365 mdi_read = 0x08000000,
366 mdi_ready = 0x10000000,
367};
368
369enum eeprom_op {
370 op_write = 0x05,
371 op_read = 0x06,
372 op_ewds = 0x10,
373 op_ewen = 0x13,
374};
375
376enum eeprom_offsets {
377 eeprom_cnfg_mdix = 0x03,
72001762 378 eeprom_phy_iface = 0x06,
1da177e4
LT
379 eeprom_id = 0x0A,
380 eeprom_config_asf = 0x0D,
381 eeprom_smbus_addr = 0x90,
382};
383
384enum eeprom_cnfg_mdix {
385 eeprom_mdix_enabled = 0x0080,
386};
387
72001762
AM
388enum eeprom_phy_iface {
389 NoSuchPhy = 0,
390 I82553AB,
391 I82553C,
392 I82503,
393 DP83840,
394 S80C240,
395 S80C24,
396 I82555,
397 DP83840A = 10,
398};
399
1da177e4
LT
400enum eeprom_id {
401 eeprom_id_wol = 0x0020,
402};
403
404enum eeprom_config_asf {
405 eeprom_asf = 0x8000,
406 eeprom_gcl = 0x4000,
407};
408
409enum cb_status {
410 cb_complete = 0x8000,
411 cb_ok = 0x2000,
412};
413
414enum cb_command {
415 cb_nop = 0x0000,
416 cb_iaaddr = 0x0001,
417 cb_config = 0x0002,
418 cb_multi = 0x0003,
419 cb_tx = 0x0004,
420 cb_ucode = 0x0005,
421 cb_dump = 0x0006,
422 cb_tx_sf = 0x0008,
423 cb_cid = 0x1f00,
424 cb_i = 0x2000,
425 cb_s = 0x4000,
426 cb_el = 0x8000,
427};
428
429struct rfd {
aaf918ba
AV
430 __le16 status;
431 __le16 command;
432 __le32 link;
433 __le32 rbd;
434 __le16 actual_size;
435 __le16 size;
1da177e4
LT
436};
437
438struct rx {
439 struct rx *next, *prev;
440 struct sk_buff *skb;
441 dma_addr_t dma_addr;
442};
443
444#if defined(__BIG_ENDIAN_BITFIELD)
445#define X(a,b) b,a
446#else
447#define X(a,b) a,b
448#endif
449struct config {
450/*0*/ u8 X(byte_count:6, pad0:2);
451/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
452/*2*/ u8 adaptive_ifs;
453/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
454 term_write_cache_line:1), pad3:4);
455/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
456/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
457/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
458 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
459 rx_discard_overruns:1), rx_save_bad_frames:1);
460/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
461 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
462 tx_dynamic_tbd:1);
463/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
464/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
465 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
466/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
467 loopback:2);
468/*11*/ u8 X(linear_priority:3, pad11:5);
469/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
470/*13*/ u8 ip_addr_lo;
471/*14*/ u8 ip_addr_hi;
472/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
473 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
474 pad15_2:1), crs_or_cdt:1);
475/*16*/ u8 fc_delay_lo;
476/*17*/ u8 fc_delay_hi;
477/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
478 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
479/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
480 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
481 full_duplex_force:1), full_duplex_pin:1);
482/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
483/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
484/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
485 u8 pad_d102[9];
486};
487
488#define E100_MAX_MULTICAST_ADDRS 64
489struct multi {
aaf918ba 490 __le16 count;
1da177e4
LT
491 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
492};
493
494/* Important: keep total struct u32-aligned */
495#define UCODE_SIZE 134
496struct cb {
aaf918ba
AV
497 __le16 status;
498 __le16 command;
499 __le32 link;
1da177e4
LT
500 union {
501 u8 iaaddr[ETH_ALEN];
aaf918ba 502 __le32 ucode[UCODE_SIZE];
1da177e4
LT
503 struct config config;
504 struct multi multi;
505 struct {
506 u32 tbd_array;
507 u16 tcb_byte_count;
508 u8 threshold;
509 u8 tbd_count;
510 struct {
aaf918ba
AV
511 __le32 buf_addr;
512 __le16 size;
1da177e4
LT
513 u16 eol;
514 } tbd;
515 } tcb;
aaf918ba 516 __le32 dump_buffer_addr;
1da177e4
LT
517 } u;
518 struct cb *next, *prev;
519 dma_addr_t dma_addr;
520 struct sk_buff *skb;
521};
522
523enum loopback {
524 lb_none = 0, lb_mac = 1, lb_phy = 3,
525};
526
527struct stats {
aaf918ba 528 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
529 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
530 tx_multiple_collisions, tx_total_collisions;
aaf918ba 531 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
532 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
533 rx_short_frame_errors;
aaf918ba
AV
534 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
535 __le16 xmt_tco_frames, rcv_tco_frames;
536 __le32 complete;
1da177e4
LT
537};
538
539struct mem {
540 struct {
541 u32 signature;
542 u32 result;
543 } selftest;
544 struct stats stats;
545 u8 dump_buf[596];
546};
547
548struct param_range {
549 u32 min;
550 u32 max;
551 u32 count;
552};
553
554struct params {
555 struct param_range rfds;
556 struct param_range cbs;
557};
558
559struct nic {
560 /* Begin: frequently used values: keep adjacent for cache effect */
561 u32 msg_enable ____cacheline_aligned;
562 struct net_device *netdev;
563 struct pci_dev *pdev;
72001762 564 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
1da177e4
LT
565
566 struct rx *rxs ____cacheline_aligned;
567 struct rx *rx_to_use;
568 struct rx *rx_to_clean;
569 struct rfd blank_rfd;
ca93ca42 570 enum ru_state ru_running;
1da177e4
LT
571
572 spinlock_t cb_lock ____cacheline_aligned;
573 spinlock_t cmd_lock;
574 struct csr __iomem *csr;
575 enum scb_cmd_lo cuc_cmd;
576 unsigned int cbs_avail;
bea3348e 577 struct napi_struct napi;
1da177e4
LT
578 struct cb *cbs;
579 struct cb *cb_to_use;
580 struct cb *cb_to_send;
581 struct cb *cb_to_clean;
aaf918ba 582 __le16 tx_command;
1da177e4
LT
583 /* End: frequently used values: keep adjacent for cache effect */
584
585 enum {
586 ich = (1 << 0),
587 promiscuous = (1 << 1),
588 multicast_all = (1 << 2),
589 wol_magic = (1 << 3),
590 ich_10h_workaround = (1 << 4),
591 } flags ____cacheline_aligned;
592
593 enum mac mac;
594 enum phy phy;
595 struct params params;
1da177e4
LT
596 struct timer_list watchdog;
597 struct timer_list blink_timer;
598 struct mii_if_info mii;
2acdb1e0 599 struct work_struct tx_timeout_task;
1da177e4
LT
600 enum loopback loopback;
601
602 struct mem *mem;
603 dma_addr_t dma_addr;
604
605 dma_addr_t cbs_dma_addr;
606 u8 adaptive_ifs;
607 u8 tx_threshold;
608 u32 tx_frames;
609 u32 tx_collisions;
610 u32 tx_deferred;
611 u32 tx_single_collisions;
612 u32 tx_multiple_collisions;
613 u32 tx_fc_pause;
614 u32 tx_tco_frames;
615
616 u32 rx_fc_pause;
617 u32 rx_fc_unsupported;
618 u32 rx_tco_frames;
619 u32 rx_over_length_errors;
620
1da177e4
LT
621 u16 leds;
622 u16 eeprom_wc;
aaf918ba 623 __le16 eeprom[256];
ac7c6669 624 spinlock_t mdio_lock;
1da177e4
LT
625};
626
627static inline void e100_write_flush(struct nic *nic)
628{
629 /* Flush previous PCI writes through intermediate bridges
630 * by doing a benign read */
27345bb6 631 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
632}
633
858119e1 634static void e100_enable_irq(struct nic *nic)
1da177e4
LT
635{
636 unsigned long flags;
637
638 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 639 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 640 e100_write_flush(nic);
ad8c48ad 641 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
642}
643
858119e1 644static void e100_disable_irq(struct nic *nic)
1da177e4
LT
645{
646 unsigned long flags;
647
648 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 649 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 650 e100_write_flush(nic);
ad8c48ad 651 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
652}
653
654static void e100_hw_reset(struct nic *nic)
655{
656 /* Put CU and RU into idle with a selective reset to get
657 * device off of PCI bus */
27345bb6 658 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
659 e100_write_flush(nic); udelay(20);
660
661 /* Now fully reset device */
27345bb6 662 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
663 e100_write_flush(nic); udelay(20);
664
665 /* Mask off our interrupt line - it's unmasked after reset */
666 e100_disable_irq(nic);
667}
668
669static int e100_self_test(struct nic *nic)
670{
671 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
672
673 /* Passing the self-test is a pretty good indication
674 * that the device can DMA to/from host memory */
675
676 nic->mem->selftest.signature = 0;
677 nic->mem->selftest.result = 0xFFFFFFFF;
678
27345bb6 679 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
680 e100_write_flush(nic);
681 /* Wait 10 msec for self-test to complete */
682 msleep(10);
683
684 /* Interrupts are enabled after self-test */
685 e100_disable_irq(nic);
686
687 /* Check results of self-test */
f26251eb 688 if (nic->mem->selftest.result != 0) {
1da177e4
LT
689 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
690 nic->mem->selftest.result);
691 return -ETIMEDOUT;
692 }
f26251eb 693 if (nic->mem->selftest.signature == 0) {
1da177e4
LT
694 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
695 return -ETIMEDOUT;
696 }
697
698 return 0;
699}
700
aaf918ba 701static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
702{
703 u32 cmd_addr_data[3];
704 u8 ctrl;
705 int i, j;
706
707 /* Three cmds: write/erase enable, write data, write/erase disable */
708 cmd_addr_data[0] = op_ewen << (addr_len - 2);
709 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 710 le16_to_cpu(data);
1da177e4
LT
711 cmd_addr_data[2] = op_ewds << (addr_len - 2);
712
713 /* Bit-bang cmds to write word to eeprom */
f26251eb 714 for (j = 0; j < 3; j++) {
1da177e4
LT
715
716 /* Chip select */
27345bb6 717 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
718 e100_write_flush(nic); udelay(4);
719
f26251eb 720 for (i = 31; i >= 0; i--) {
1da177e4
LT
721 ctrl = (cmd_addr_data[j] & (1 << i)) ?
722 eecs | eedi : eecs;
27345bb6 723 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
724 e100_write_flush(nic); udelay(4);
725
27345bb6 726 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
727 e100_write_flush(nic); udelay(4);
728 }
729 /* Wait 10 msec for cmd to complete */
730 msleep(10);
731
732 /* Chip deselect */
27345bb6 733 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
734 e100_write_flush(nic); udelay(4);
735 }
736};
737
738/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 739static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
740{
741 u32 cmd_addr_data;
742 u16 data = 0;
743 u8 ctrl;
744 int i;
745
746 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
747
748 /* Chip select */
27345bb6 749 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
750 e100_write_flush(nic); udelay(4);
751
752 /* Bit-bang to read word from eeprom */
f26251eb 753 for (i = 31; i >= 0; i--) {
1da177e4 754 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 755 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 756 e100_write_flush(nic); udelay(4);
05479938 757
27345bb6 758 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 759 e100_write_flush(nic); udelay(4);
05479938 760
1da177e4
LT
761 /* Eeprom drives a dummy zero to EEDO after receiving
762 * complete address. Use this to adjust addr_len. */
27345bb6 763 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 764 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
765 *addr_len -= (i - 16);
766 i = 17;
767 }
05479938 768
1da177e4
LT
769 data = (data << 1) | (ctrl & eedo ? 1 : 0);
770 }
771
772 /* Chip deselect */
27345bb6 773 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
774 e100_write_flush(nic); udelay(4);
775
aaf918ba 776 return cpu_to_le16(data);
1da177e4
LT
777};
778
779/* Load entire EEPROM image into driver cache and validate checksum */
780static int e100_eeprom_load(struct nic *nic)
781{
782 u16 addr, addr_len = 8, checksum = 0;
783
784 /* Try reading with an 8-bit addr len to discover actual addr len */
785 e100_eeprom_read(nic, &addr_len, 0);
786 nic->eeprom_wc = 1 << addr_len;
787
f26251eb 788 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 789 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 790 if (addr < nic->eeprom_wc - 1)
aaf918ba 791 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
792 }
793
794 /* The checksum, stored in the last word, is calculated such that
795 * the sum of words should be 0xBABA */
aaf918ba 796 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
1da177e4 797 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
798 if (!eeprom_bad_csum_allow)
799 return -EAGAIN;
1da177e4
LT
800 }
801
802 return 0;
803}
804
805/* Save (portion of) driver EEPROM cache to device and update checksum */
806static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
807{
808 u16 addr, addr_len = 8, checksum = 0;
809
810 /* Try reading with an 8-bit addr len to discover actual addr len */
811 e100_eeprom_read(nic, &addr_len, 0);
812 nic->eeprom_wc = 1 << addr_len;
813
f26251eb 814 if (start + count >= nic->eeprom_wc)
1da177e4
LT
815 return -EINVAL;
816
f26251eb 817 for (addr = start; addr < start + count; addr++)
1da177e4
LT
818 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
819
820 /* The checksum, stored in the last word, is calculated such that
821 * the sum of words should be 0xBABA */
f26251eb 822 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
823 checksum += le16_to_cpu(nic->eeprom[addr]);
824 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
825 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
826 nic->eeprom[nic->eeprom_wc - 1]);
827
828 return 0;
829}
830
962082b6 831#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 832#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 833static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
834{
835 unsigned long flags;
836 unsigned int i;
837 int err = 0;
838
839 spin_lock_irqsave(&nic->cmd_lock, flags);
840
841 /* Previous command is accepted when SCB clears */
f26251eb
BA
842 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
843 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
844 break;
845 cpu_relax();
f26251eb 846 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
847 udelay(5);
848 }
f26251eb 849 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
850 err = -EAGAIN;
851 goto err_unlock;
852 }
853
f26251eb 854 if (unlikely(cmd != cuc_resume))
27345bb6
JB
855 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
856 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
857
858err_unlock:
859 spin_unlock_irqrestore(&nic->cmd_lock, flags);
860
861 return err;
862}
863
858119e1 864static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
865 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
866{
867 struct cb *cb;
868 unsigned long flags;
869 int err = 0;
870
871 spin_lock_irqsave(&nic->cb_lock, flags);
872
f26251eb 873 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
874 err = -ENOMEM;
875 goto err_unlock;
876 }
877
878 cb = nic->cb_to_use;
879 nic->cb_to_use = cb->next;
880 nic->cbs_avail--;
881 cb->skb = skb;
882
f26251eb 883 if (unlikely(!nic->cbs_avail))
1da177e4
LT
884 err = -ENOSPC;
885
886 cb_prepare(nic, cb, skb);
887
888 /* Order is important otherwise we'll be in a race with h/w:
889 * set S-bit in current first, then clear S-bit in previous. */
890 cb->command |= cpu_to_le16(cb_s);
891 wmb();
892 cb->prev->command &= cpu_to_le16(~cb_s);
893
f26251eb
BA
894 while (nic->cb_to_send != nic->cb_to_use) {
895 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
896 nic->cb_to_send->dma_addr))) {
897 /* Ok, here's where things get sticky. It's
898 * possible that we can't schedule the command
899 * because the controller is too busy, so
900 * let's just queue the command and try again
901 * when another command is scheduled. */
f26251eb 902 if (err == -ENOSPC) {
962082b6
MC
903 //request a reset
904 schedule_work(&nic->tx_timeout_task);
905 }
1da177e4
LT
906 break;
907 } else {
908 nic->cuc_cmd = cuc_resume;
909 nic->cb_to_send = nic->cb_to_send->next;
910 }
911 }
912
913err_unlock:
914 spin_unlock_irqrestore(&nic->cb_lock, flags);
915
916 return err;
917}
918
72001762
AM
919static int mdio_read(struct net_device *netdev, int addr, int reg)
920{
921 struct nic *nic = netdev_priv(netdev);
922 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
923}
924
925static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
926{
927 struct nic *nic = netdev_priv(netdev);
928
929 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
930}
931
932/* the standard mdio_ctrl() function for usual MII-compliant hardware */
933static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
1da177e4
LT
934{
935 u32 data_out = 0;
936 unsigned int i;
ac7c6669 937 unsigned long flags;
1da177e4 938
ac7c6669
OM
939
940 /*
941 * Stratus87247: we shouldn't be writing the MDI control
942 * register until the Ready bit shows True. Also, since
943 * manipulation of the MDI control registers is a multi-step
944 * procedure it should be done under lock.
945 */
946 spin_lock_irqsave(&nic->mdio_lock, flags);
947 for (i = 100; i; --i) {
27345bb6 948 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
949 break;
950 udelay(20);
951 }
952 if (unlikely(!i)) {
953 printk("e100.mdio_ctrl(%s) won't go Ready\n",
954 nic->netdev->name );
955 spin_unlock_irqrestore(&nic->mdio_lock, flags);
956 return 0; /* No way to indicate timeout error */
957 }
27345bb6 958 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 959
ac7c6669 960 for (i = 0; i < 100; i++) {
1da177e4 961 udelay(20);
27345bb6 962 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
963 break;
964 }
ac7c6669 965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
966 DPRINTK(HW, DEBUG,
967 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
968 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
969 return (u16)data_out;
970}
971
72001762
AM
972/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
973static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
974 u32 addr,
975 u32 dir,
976 u32 reg,
977 u16 data)
978{
979 if ((reg == MII_BMCR) && (dir == mdi_write)) {
980 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
981 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
982 MII_ADVERTISE);
983
984 /*
985 * Workaround Si issue where sometimes the part will not
986 * autoneg to 100Mbps even when advertised.
987 */
988 if (advert & ADVERTISE_100FULL)
989 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
990 else if (advert & ADVERTISE_100HALF)
991 data |= BMCR_SPEED100;
992 }
993 }
994 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1da177e4
LT
995}
996
72001762
AM
997/* Fully software-emulated mdio_ctrl() function for cards without
998 * MII-compliant PHYs.
999 * For now, this is mainly geared towards 80c24 support; in case of further
1000 * requirements for other types (i82503, ...?) either extend this mechanism
1001 * or split it, whichever is cleaner.
1002 */
1003static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1004 u32 addr,
1005 u32 dir,
1006 u32 reg,
1007 u16 data)
1008{
1009 /* might need to allocate a netdev_priv'ed register array eventually
1010 * to be able to record state changes, but for now
1011 * some fully hardcoded register handling ought to be ok I guess. */
1012
1013 if (dir == mdi_read) {
1014 switch (reg) {
1015 case MII_BMCR:
1016 /* Auto-negotiation, right? */
1017 return BMCR_ANENABLE |
1018 BMCR_FULLDPLX;
1019 case MII_BMSR:
1020 return BMSR_LSTATUS /* for mii_link_ok() */ |
1021 BMSR_ANEGCAPABLE |
1022 BMSR_10FULL;
1023 case MII_ADVERTISE:
1024 /* 80c24 is a "combo card" PHY, right? */
1025 return ADVERTISE_10HALF |
1026 ADVERTISE_10FULL;
1027 default:
1028 DPRINTK(HW, DEBUG,
1029 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1030 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1031 return 0xFFFF;
1032 }
1033 } else {
1034 switch (reg) {
1035 default:
1036 DPRINTK(HW, DEBUG,
1037 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1038 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1039 return 0xFFFF;
1040 }
b55de80e 1041 }
72001762
AM
1042}
1043static inline int e100_phy_supports_mii(struct nic *nic)
1044{
1045 /* for now, just check it by comparing whether we
1046 are using MII software emulation.
1047 */
1048 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1da177e4
LT
1049}
1050
1051static void e100_get_defaults(struct nic *nic)
1052{
2afecc04
JB
1053 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1054 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 1055
1da177e4 1056 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 1057 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 1058 if (nic->mac == mac_unknown)
1da177e4
LT
1059 nic->mac = mac_82557_D100_A;
1060
1061 nic->params.rfds = rfds;
1062 nic->params.cbs = cbs;
1063
1064 /* Quadwords to DMA into FIFO before starting frame transmit */
1065 nic->tx_threshold = 0xE0;
1066
0a0863af 1067 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
1068 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1069 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
1070
1071 /* Template for a freshly allocated RFD */
7734f6e6 1072 nic->blank_rfd.command = 0;
1172899a 1073 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1da177e4
LT
1074 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1075
1076 /* MII setup */
1077 nic->mii.phy_id_mask = 0x1F;
1078 nic->mii.reg_num_mask = 0x1F;
1079 nic->mii.dev = nic->netdev;
1080 nic->mii.mdio_read = mdio_read;
1081 nic->mii.mdio_write = mdio_write;
1082}
1083
1084static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1085{
1086 struct config *config = &cb->u.config;
1087 u8 *c = (u8 *)config;
1088
1089 cb->command = cpu_to_le16(cb_config);
1090
1091 memset(config, 0, sizeof(struct config));
1092
1093 config->byte_count = 0x16; /* bytes in this struct */
1094 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1095 config->direct_rx_dma = 0x1; /* reserved */
1096 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1097 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1098 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1099 config->tx_underrun_retry = 0x3; /* # of underrun retries */
72001762
AM
1100 if (e100_phy_supports_mii(nic))
1101 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1da177e4
LT
1102 config->pad10 = 0x6;
1103 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1104 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1105 config->ifs = 0x6; /* x16 = inter frame spacing */
1106 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1107 config->pad15_1 = 0x1;
1108 config->pad15_2 = 0x1;
1109 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1110 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1111 config->tx_padding = 0x1; /* 1=pad short frames */
1112 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1113 config->pad18 = 0x1;
1114 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1115 config->pad20_1 = 0x1F;
1116 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1117 config->pad21_1 = 0x5;
1118
1119 config->adaptive_ifs = nic->adaptive_ifs;
1120 config->loopback = nic->loopback;
1121
f26251eb 1122 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1123 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1124
f26251eb 1125 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1126 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1127 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1128 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1129 }
1130
f26251eb 1131 if (nic->flags & multicast_all)
1da177e4
LT
1132 config->multicast_all = 0x1; /* 1=accept, 0=no */
1133
6bdacb1a 1134 /* disable WoL when up */
f26251eb 1135 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1136 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1137
f26251eb 1138 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1139 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1140 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1141 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1142 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1143 if (nic->mac >= mac_82559_D101M) {
1da177e4 1144 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1145 /* Enable TCO in extended config */
1146 if (nic->mac >= mac_82551_10) {
1147 config->byte_count = 0x20; /* extended bytes */
1148 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1149 }
1150 } else {
1da177e4 1151 config->standard_stat_counter = 0x0;
44e4925e 1152 }
1da177e4
LT
1153 }
1154
1155 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1156 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1157 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1158 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1159 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1160 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1161}
1162
2afecc04
JB
1163/*************************************************************************
1164* CPUSaver parameters
1165*
1166* All CPUSaver parameters are 16-bit literals that are part of a
1167* "move immediate value" instruction. By changing the value of
1168* the literal in the instruction before the code is loaded, the
1169* driver can change the algorithm.
1170*
0779bf2d 1171* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1172* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1173* timer is reset each time a new packet is received. (see
1174* BUNDLEMAX below to set the limit on number of chained packets)
1175* The current default is 0x600 or 1536. Experiments show that
1176* the value should probably stay within the 0x200 - 0x1000.
1177*
05479938 1178* BUNDLEMAX -
2afecc04
JB
1179* This sets the maximum number of frames that will be bundled. In
1180* some situations, such as the TCP windowing algorithm, it may be
1181* better to limit the growth of the bundle size than let it go as
1182* high as it can, because that could cause too much added latency.
1183* The default is six, because this is the number of packets in the
1184* default TCP window size. A value of 1 would make CPUSaver indicate
1185* an interrupt for every frame received. If you do not want to put
1186* a limit on the bundle size, set this value to xFFFF.
1187*
05479938 1188* BUNDLESMALL -
2afecc04
JB
1189* This contains a bit-mask describing the minimum size frame that
1190* will be bundled. The default masks the lower 7 bits, which means
1191* that any frame less than 128 bytes in length will not be bundled,
1192* but will instead immediately generate an interrupt. This does
1193* not affect the current bundle in any way. Any frame that is 128
1194* bytes or large will be bundled normally. This feature is meant
1195* to provide immediate indication of ACK frames in a TCP environment.
1196* Customers were seeing poor performance when a machine with CPUSaver
1197* enabled was sending but not receiving. The delay introduced when
1198* the ACKs were received was enough to reduce total throughput, because
1199* the sender would sit idle until the ACK was finally seen.
1200*
1201* The current default is 0xFF80, which masks out the lower 7 bits.
1202* This means that any frame which is x7F (127) bytes or smaller
05479938 1203* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1204* bit mask, there are only a few valid values that can be used. To
1205* turn this feature off, the driver can write the value xFFFF to the
1206* lower word of this instruction (in the same way that the other
1207* parameters are used). Likewise, a value of 0xF800 (2047) would
1208* cause an interrupt to be generated for every frame, because all
1209* standard Ethernet frames are <= 2047 bytes in length.
1210*************************************************************************/
1211
05479938 1212/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1213 * workarounds it provides, set the following defines to:
1214 * BUNDLESMALL 0
1215 * BUNDLEMAX 1
1216 * INTDELAY 1
1217 */
1218#define BUNDLESMALL 1
1219#define BUNDLEMAX (u16)6
1220#define INTDELAY (u16)1536 /* 0x600 */
1221
9ac32e1b
JSR
1222/* Initialize firmware */
1223static const struct firmware *e100_request_firmware(struct nic *nic)
1224{
1225 const char *fw_name;
1226 const struct firmware *fw;
1227 u8 timer, bundle, min_size;
1228 int err;
1229
2afecc04
JB
1230 /* do not load u-code for ICH devices */
1231 if (nic->flags & ich)
9ac32e1b 1232 return NULL;
2afecc04 1233
44c10138 1234 /* Search for ucode match against h/w revision */
9ac32e1b
JSR
1235 if (nic->mac == mac_82559_D101M)
1236 fw_name = FIRMWARE_D101M;
1237 else if (nic->mac == mac_82559_D101S)
1238 fw_name = FIRMWARE_D101S;
1239 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1240 fw_name = FIRMWARE_D102E;
1241 else /* No ucode on other devices */
1242 return NULL;
1243
1244 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1245 if (err) {
1246 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1247 fw_name, err);
1248 return ERR_PTR(err);
1249 }
1250 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1251 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1252 if (fw->size != UCODE_SIZE * 4 + 3) {
1253 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
1254 fw_name, fw->size);
1255 release_firmware(fw);
1256 return ERR_PTR(-EINVAL);
2afecc04
JB
1257 }
1258
9ac32e1b
JSR
1259 /* Read timer, bundle and min_size from end of firmware blob */
1260 timer = fw->data[UCODE_SIZE * 4];
1261 bundle = fw->data[UCODE_SIZE * 4 + 1];
1262 min_size = fw->data[UCODE_SIZE * 4 + 2];
1263
1264 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1265 min_size >= UCODE_SIZE) {
1266 DPRINTK(PROBE, ERR,
1267 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1268 fw_name, timer, bundle, min_size);
1269 release_firmware(fw);
1270 return ERR_PTR(-EINVAL);
1271 }
1272 /* OK, firmware is validated and ready to use... */
1273 return fw;
24180333
JB
1274}
1275
9ac32e1b
JSR
1276static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1277 struct sk_buff *skb)
24180333 1278{
9ac32e1b
JSR
1279 const struct firmware *fw = (void *)skb;
1280 u8 timer, bundle, min_size;
1281
1282 /* It's not a real skb; we just abused the fact that e100_exec_cb
1283 will pass it through to here... */
1284 cb->skb = NULL;
1285
1286 /* firmware is stored as little endian already */
1287 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1288
1289 /* Read timer, bundle and min_size from end of firmware blob */
1290 timer = fw->data[UCODE_SIZE * 4];
1291 bundle = fw->data[UCODE_SIZE * 4 + 1];
1292 min_size = fw->data[UCODE_SIZE * 4 + 2];
1293
1294 /* Insert user-tunable settings in cb->u.ucode */
1295 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1296 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1297 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1298 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1299 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1300 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1301
1302 cb->command = cpu_to_le16(cb_ucode | cb_el);
1303}
1304
1305static inline int e100_load_ucode_wait(struct nic *nic)
1306{
1307 const struct firmware *fw;
24180333
JB
1308 int err = 0, counter = 50;
1309 struct cb *cb = nic->cb_to_clean;
1310
9ac32e1b
JSR
1311 fw = e100_request_firmware(nic);
1312 /* If it's NULL, then no ucode is required */
1313 if (!fw || IS_ERR(fw))
1314 return PTR_ERR(fw);
1315
1316 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
24180333 1317 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1318
24180333
JB
1319 /* must restart cuc */
1320 nic->cuc_cmd = cuc_start;
1321
1322 /* wait for completion */
1323 e100_write_flush(nic);
1324 udelay(10);
1325
1326 /* wait for possibly (ouch) 500ms */
1327 while (!(cb->status & cpu_to_le16(cb_complete))) {
1328 msleep(10);
1329 if (!--counter) break;
1330 }
05479938 1331
3a4fa0a2 1332 /* ack any interrupts, something could have been set */
27345bb6 1333 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1334
1335 /* if the command failed, or is not OK, notify and return */
1336 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1337 DPRINTK(PROBE,ERR, "ucode load failed\n");
1338 err = -EPERM;
1339 }
05479938 1340
24180333 1341 return err;
1da177e4
LT
1342}
1343
1344static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1345 struct sk_buff *skb)
1346{
1347 cb->command = cpu_to_le16(cb_iaaddr);
1348 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1349}
1350
1351static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1352{
1353 cb->command = cpu_to_le16(cb_dump);
1354 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1355 offsetof(struct mem, dump_buf));
1356}
1357
72001762
AM
1358static int e100_phy_check_without_mii(struct nic *nic)
1359{
1360 u8 phy_type;
1361 int without_mii;
1362
1363 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1364
1365 switch (phy_type) {
1366 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1367 case I82503: /* Non-MII PHY; UNTESTED! */
1368 case S80C24: /* Non-MII PHY; tested and working */
1369 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1370 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1371 * doesn't have a programming interface of any sort. The
1372 * media is sensed automatically based on how the link partner
1373 * is configured. This is, in essence, manual configuration.
1374 */
1375 DPRINTK(PROBE, INFO,
1376 "found MII-less i82503 or 80c24 or other PHY\n");
1377
1378 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1379 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1380
1381 /* these might be needed for certain MII-less cards...
1382 * nic->flags |= ich;
1383 * nic->flags |= ich_10h_workaround; */
1384
1385 without_mii = 1;
1386 break;
1387 default:
1388 without_mii = 0;
1389 break;
1390 }
1391 return without_mii;
1392}
1393
1da177e4
LT
1394#define NCONFIG_AUTO_SWITCH 0x0080
1395#define MII_NSC_CONG MII_RESV1
1396#define NSC_CONG_ENABLE 0x0100
1397#define NSC_CONG_TXREADY 0x0400
1398#define ADVERTISE_FC_SUPPORTED 0x0400
1399static int e100_phy_init(struct nic *nic)
1400{
1401 struct net_device *netdev = nic->netdev;
1402 u32 addr;
1403 u16 bmcr, stat, id_lo, id_hi, cong;
1404
1405 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1406 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1407 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1408 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1409 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1410 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1411 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1412 break;
1413 }
72001762
AM
1414 if (addr == 32) {
1415 /* uhoh, no PHY detected: check whether we seem to be some
1416 * weird, rare variant which is *known* to not have any MII.
1417 * But do this AFTER MII checking only, since this does
1418 * lookup of EEPROM values which may easily be unreliable. */
1419 if (e100_phy_check_without_mii(nic))
1420 return 0; /* simply return and hope for the best */
1421 else {
1422 /* for unknown cases log a fatal error */
1423 DPRINTK(HW, ERR,
1424 "Failed to locate any known PHY, aborting.\n");
1425 return -EAGAIN;
1426 }
1427 } else
1428 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1da177e4 1429
b55de80e
BA
1430 /* Isolate all the PHY ids */
1431 for (addr = 0; addr < 32; addr++)
1432 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1433 /* Select the discovered PHY */
1434 bmcr &= ~BMCR_ISOLATE;
1435 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1da177e4
LT
1436
1437 /* Get phy ID */
1438 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1439 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1440 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1441 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1442
1443 /* Handle National tx phys */
1444#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1445 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1446 /* Disable congestion control */
1447 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1448 cong |= NSC_CONG_TXREADY;
1449 cong &= ~NSC_CONG_ENABLE;
1450 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1451 }
1452
b55de80e
BA
1453 if (nic->phy == phy_82552_v) {
1454 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1455
72001762
AM
1456 /* assign special tweaked mdio_ctrl() function */
1457 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1458
b55de80e
BA
1459 /* Workaround Si not advertising flow-control during autoneg */
1460 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1461 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1462
1463 /* Reset for the above changes to take effect */
1464 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1465 bmcr |= BMCR_RESET;
1466 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1467 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1468 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1469 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1470 /* enable/disable MDI/MDI-X auto-switching. */
1471 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1472 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1473 }
1da177e4
LT
1474
1475 return 0;
1476}
1477
1478static int e100_hw_init(struct nic *nic)
1479{
1480 int err;
1481
1482 e100_hw_reset(nic);
1483
1484 DPRINTK(HW, ERR, "e100_hw_init\n");
f26251eb 1485 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1486 return err;
1487
f26251eb 1488 if ((err = e100_phy_init(nic)))
1da177e4 1489 return err;
f26251eb 1490 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1491 return err;
f26251eb 1492 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1493 return err;
9ac32e1b 1494 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1495 return err;
f26251eb 1496 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1497 return err;
f26251eb 1498 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1499 return err;
f26251eb 1500 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1501 nic->dma_addr + offsetof(struct mem, stats))))
1502 return err;
f26251eb 1503 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1504 return err;
1505
1506 e100_disable_irq(nic);
1507
1508 return 0;
1509}
1510
1511static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1512{
1513 struct net_device *netdev = nic->netdev;
1514 struct dev_mc_list *list = netdev->mc_list;
1515 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1516
1517 cb->command = cpu_to_le16(cb_multi);
1518 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
f26251eb 1519 for (i = 0; list && i < count; i++, list = list->next)
1da177e4
LT
1520 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1521 ETH_ALEN);
1522}
1523
1524static void e100_set_multicast_list(struct net_device *netdev)
1525{
1526 struct nic *nic = netdev_priv(netdev);
1527
1528 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1529 netdev->mc_count, netdev->flags);
1530
f26251eb 1531 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1532 nic->flags |= promiscuous;
1533 else
1534 nic->flags &= ~promiscuous;
1535
f26251eb 1536 if (netdev->flags & IFF_ALLMULTI ||
1da177e4
LT
1537 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1538 nic->flags |= multicast_all;
1539 else
1540 nic->flags &= ~multicast_all;
1541
1542 e100_exec_cb(nic, NULL, e100_configure);
1543 e100_exec_cb(nic, NULL, e100_multi);
1544}
1545
1546static void e100_update_stats(struct nic *nic)
1547{
09f75cd7
JG
1548 struct net_device *dev = nic->netdev;
1549 struct net_device_stats *ns = &dev->stats;
1da177e4 1550 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1551 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1552 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1553 &s->complete;
1554
1555 /* Device's stats reporting may take several microseconds to
0a0863af 1556 * complete, so we're always waiting for results of the
1da177e4
LT
1557 * previous command. */
1558
f26251eb 1559 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1560 *complete = 0;
1561 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1562 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1563 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1564 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1565 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1566 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1567 ns->collisions += nic->tx_collisions;
1568 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1569 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1570 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1571 nic->rx_over_length_errors;
1572 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1573 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1574 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1575 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1576 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1577 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1578 le32_to_cpu(s->rx_alignment_errors) +
1579 le32_to_cpu(s->rx_short_frame_errors) +
1580 le32_to_cpu(s->rx_cdt_errors);
1581 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1582 nic->tx_single_collisions +=
1583 le32_to_cpu(s->tx_single_collisions);
1584 nic->tx_multiple_collisions +=
1585 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1586 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1587 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1588 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1589 nic->rx_fc_unsupported +=
1590 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1591 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1592 nic->tx_tco_frames +=
1593 le16_to_cpu(s->xmt_tco_frames);
1594 nic->rx_tco_frames +=
1595 le16_to_cpu(s->rcv_tco_frames);
1596 }
1597 }
1598 }
1599
05479938 1600
f26251eb 1601 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1f53367d 1602 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1603}
1604
1605static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1606{
1607 /* Adjust inter-frame-spacing (IFS) between two transmits if
1608 * we're getting collisions on a half-duplex connection. */
1609
f26251eb 1610 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1611 u32 prev = nic->adaptive_ifs;
1612 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1613
f26251eb 1614 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1615 (nic->tx_frames > min_frames)) {
f26251eb 1616 if (nic->adaptive_ifs < 60)
1da177e4
LT
1617 nic->adaptive_ifs += 5;
1618 } else if (nic->tx_frames < min_frames) {
f26251eb 1619 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1620 nic->adaptive_ifs -= 5;
1621 }
f26251eb 1622 if (nic->adaptive_ifs != prev)
1da177e4
LT
1623 e100_exec_cb(nic, NULL, e100_configure);
1624 }
1625}
1626
1627static void e100_watchdog(unsigned long data)
1628{
1629 struct nic *nic = (struct nic *)data;
1630 struct ethtool_cmd cmd;
1631
1632 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1633
1634 /* mii library handles link maintenance tasks */
1635
1636 mii_ethtool_gset(&nic->mii, &cmd);
1637
f26251eb 1638 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
f4113030
JK
1639 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
1640 nic->netdev->name,
1641 cmd.speed == SPEED_100 ? "100" : "10",
1642 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1643 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
f4113030
JK
1644 printk(KERN_INFO "e100: %s NIC Link is Down\n",
1645 nic->netdev->name);
1da177e4
LT
1646 }
1647
1648 mii_check_link(&nic->mii);
1649
1650 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1651 * allocation failure.
1652 * Unfortunately have to use a spinlock to not re-enable interrupts
1653 * accidentally, due to hardware that shares a register between the
1654 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1655 spin_lock_irq(&nic->cmd_lock);
27345bb6 1656 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1657 e100_write_flush(nic);
ad8c48ad 1658 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1659
1660 e100_update_stats(nic);
1661 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1662
f26251eb 1663 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1664 /* Issue a multicast command to workaround a 557 lock up */
1665 e100_set_multicast_list(nic->netdev);
1666
f26251eb 1667 if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1da177e4
LT
1668 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1669 nic->flags |= ich_10h_workaround;
1670 else
1671 nic->flags &= ~ich_10h_workaround;
1672
34c6417b
SH
1673 mod_timer(&nic->watchdog,
1674 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1675}
1676
858119e1 1677static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1678 struct sk_buff *skb)
1679{
1680 cb->command = nic->tx_command;
962082b6 1681 /* interrupt every 16 packets regardless of delay */
f26251eb 1682 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1683 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1684 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1685 cb->u.tcb.tcb_byte_count = 0;
1686 cb->u.tcb.threshold = nic->tx_threshold;
1687 cb->u.tcb.tbd_count = 1;
1688 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1689 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1690 /* check for mapping failure? */
1da177e4
LT
1691 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1692}
1693
3b29a56d
SH
1694static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1695 struct net_device *netdev)
1da177e4
LT
1696{
1697 struct nic *nic = netdev_priv(netdev);
1698 int err;
1699
f26251eb 1700 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1701 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1702 Issue a NOP command followed by a 1us delay before
1703 issuing the Tx command. */
f26251eb 1704 if (e100_exec_cmd(nic, cuc_nop, 0))
1f53367d 1705 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1706 udelay(1);
1707 }
1708
1709 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1710
f26251eb 1711 switch (err) {
1da177e4
LT
1712 case -ENOSPC:
1713 /* We queued the skb, but now we're out of space. */
1714 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1715 netif_stop_queue(netdev);
1716 break;
1717 case -ENOMEM:
1718 /* This is a hard error - log it. */
1719 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1720 netif_stop_queue(netdev);
5b548140 1721 return NETDEV_TX_BUSY;
1da177e4
LT
1722 }
1723
1724 netdev->trans_start = jiffies;
6ed10654 1725 return NETDEV_TX_OK;
1da177e4
LT
1726}
1727
858119e1 1728static int e100_tx_clean(struct nic *nic)
1da177e4 1729{
09f75cd7 1730 struct net_device *dev = nic->netdev;
1da177e4
LT
1731 struct cb *cb;
1732 int tx_cleaned = 0;
1733
1734 spin_lock(&nic->cb_lock);
1735
1da177e4 1736 /* Clean CBs marked complete */
f26251eb 1737 for (cb = nic->cb_to_clean;
1da177e4
LT
1738 cb->status & cpu_to_le16(cb_complete);
1739 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1740 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1741 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1742 cb->status);
1743
f26251eb 1744 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1745 dev->stats.tx_packets++;
1746 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1747
1748 pci_unmap_single(nic->pdev,
1749 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1750 le16_to_cpu(cb->u.tcb.tbd.size),
1751 PCI_DMA_TODEVICE);
1752 dev_kfree_skb_any(cb->skb);
1753 cb->skb = NULL;
1754 tx_cleaned = 1;
1755 }
1756 cb->status = 0;
1757 nic->cbs_avail++;
1758 }
1759
1760 spin_unlock(&nic->cb_lock);
1761
1762 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1763 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1764 netif_wake_queue(nic->netdev);
1765
1766 return tx_cleaned;
1767}
1768
1769static void e100_clean_cbs(struct nic *nic)
1770{
f26251eb
BA
1771 if (nic->cbs) {
1772 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1773 struct cb *cb = nic->cb_to_clean;
f26251eb 1774 if (cb->skb) {
1da177e4
LT
1775 pci_unmap_single(nic->pdev,
1776 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1777 le16_to_cpu(cb->u.tcb.tbd.size),
1778 PCI_DMA_TODEVICE);
1779 dev_kfree_skb(cb->skb);
1780 }
1781 nic->cb_to_clean = nic->cb_to_clean->next;
1782 nic->cbs_avail++;
1783 }
1784 pci_free_consistent(nic->pdev,
1785 sizeof(struct cb) * nic->params.cbs.count,
1786 nic->cbs, nic->cbs_dma_addr);
1787 nic->cbs = NULL;
1788 nic->cbs_avail = 0;
1789 }
1790 nic->cuc_cmd = cuc_start;
1791 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1792 nic->cbs;
1793}
1794
1795static int e100_alloc_cbs(struct nic *nic)
1796{
1797 struct cb *cb;
1798 unsigned int i, count = nic->params.cbs.count;
1799
1800 nic->cuc_cmd = cuc_start;
1801 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1802 nic->cbs_avail = 0;
1803
1804 nic->cbs = pci_alloc_consistent(nic->pdev,
1805 sizeof(struct cb) * count, &nic->cbs_dma_addr);
f26251eb 1806 if (!nic->cbs)
1da177e4
LT
1807 return -ENOMEM;
1808
f26251eb 1809 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1810 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1811 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1812
1813 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1814 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1815 ((i+1) % count) * sizeof(struct cb));
1816 cb->skb = NULL;
1817 }
1818
1819 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1820 nic->cbs_avail = count;
1821
1822 return 0;
1823}
1824
ca93ca42 1825static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1826{
f26251eb
BA
1827 if (!nic->rxs) return;
1828 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1829
1830 /* handle init time starts */
f26251eb 1831 if (!rx) rx = nic->rxs;
ca93ca42
JG
1832
1833 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1834 if (rx->skb) {
ca93ca42
JG
1835 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1836 nic->ru_running = RU_RUNNING;
1837 }
1da177e4
LT
1838}
1839
1840#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1841static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1842{
f26251eb 1843 if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1844 return -ENOMEM;
1845
1846 /* Align, init, and map the RFD. */
1da177e4 1847 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1848 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1849 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1850 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1851
8d8bb39b 1852 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1853 dev_kfree_skb_any(rx->skb);
097688ef 1854 rx->skb = NULL;
1f53367d
MC
1855 rx->dma_addr = 0;
1856 return -ENOMEM;
1857 }
1858
1da177e4 1859 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1860 * this one. We are safe to touch the previous RFD because
1861 * it is protected by the before last buffer's el bit being set */
aaf918ba 1862 if (rx->prev->skb) {
1da177e4 1863 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1864 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1865 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1866 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1867 }
1868
1869 return 0;
1870}
1871
858119e1 1872static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1873 unsigned int *work_done, unsigned int work_to_do)
1874{
09f75cd7 1875 struct net_device *dev = nic->netdev;
1da177e4
LT
1876 struct sk_buff *skb = rx->skb;
1877 struct rfd *rfd = (struct rfd *)skb->data;
1878 u16 rfd_status, actual_size;
1879
f26251eb 1880 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1881 return -EAGAIN;
1882
1883 /* Need to sync before taking a peek at cb_complete bit */
1884 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1885 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1886 rfd_status = le16_to_cpu(rfd->status);
1887
1888 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1889
1890 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1891 if (unlikely(!(rfd_status & cb_complete))) {
1892 /* If the next buffer has the el bit, but we think the receiver
1893 * is still running, check to see if it really stopped while
1894 * we had interrupts off.
1895 * This allows for a fast restart without re-enabling
1896 * interrupts */
1897 if ((le16_to_cpu(rfd->command) & cb_el) &&
1898 (RU_RUNNING == nic->ru_running))
1899
17393dd6 1900 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1901 nic->ru_running = RU_SUSPENDED;
303d67c2
KH
1902 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1903 sizeof(struct rfd),
6ff9c2e7 1904 PCI_DMA_FROMDEVICE);
1f53367d 1905 return -ENODATA;
7734f6e6 1906 }
1da177e4
LT
1907
1908 /* Get actual data size */
1909 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 1910 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
1911 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1912
1913 /* Get data */
1914 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1915 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 1916
7734f6e6
DA
1917 /* If this buffer has the el bit, but we think the receiver
1918 * is still running, check to see if it really stopped while
1919 * we had interrupts off.
1920 * This allows for a fast restart without re-enabling interrupts.
1921 * This can happen when the RU sees the size change but also sees
1922 * the el bit set. */
1923 if ((le16_to_cpu(rfd->command) & cb_el) &&
1924 (RU_RUNNING == nic->ru_running)) {
1925
17393dd6 1926 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 1927 nic->ru_running = RU_SUSPENDED;
7734f6e6 1928 }
ca93ca42 1929
1da177e4
LT
1930 /* Pull off the RFD and put the actual data (minus eth hdr) */
1931 skb_reserve(skb, sizeof(struct rfd));
1932 skb_put(skb, actual_size);
1933 skb->protocol = eth_type_trans(skb, nic->netdev);
1934
f26251eb 1935 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 1936 /* Don't indicate if hardware indicates errors */
1da177e4 1937 dev_kfree_skb_any(skb);
f26251eb 1938 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1939 /* Don't indicate oversized frames */
1940 nic->rx_over_length_errors++;
1da177e4
LT
1941 dev_kfree_skb_any(skb);
1942 } else {
09f75cd7
JG
1943 dev->stats.rx_packets++;
1944 dev->stats.rx_bytes += actual_size;
1da177e4 1945 netif_receive_skb(skb);
f26251eb 1946 if (work_done)
1da177e4
LT
1947 (*work_done)++;
1948 }
1949
1950 rx->skb = NULL;
1951
1952 return 0;
1953}
1954
858119e1 1955static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1956 unsigned int work_to_do)
1957{
1958 struct rx *rx;
7734f6e6
DA
1959 int restart_required = 0, err = 0;
1960 struct rx *old_before_last_rx, *new_before_last_rx;
1961 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
1962
1963 /* Indicate newly arrived packets */
f26251eb 1964 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
1965 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1966 /* Hit quota or no more to clean */
1967 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 1968 break;
1da177e4
LT
1969 }
1970
7734f6e6
DA
1971
1972 /* On EAGAIN, hit quota so have more work to do, restart once
1973 * cleanup is complete.
1974 * Else, are we already rnr? then pay attention!!! this ensures that
1975 * the state machine progression never allows a start with a
1976 * partially cleaned list, avoiding a race between hardware
1977 * and rx_to_clean when in NAPI mode */
1978 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
1979 restart_required = 1;
1980
1981 old_before_last_rx = nic->rx_to_use->prev->prev;
1982 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 1983
1da177e4 1984 /* Alloc new skbs to refill list */
f26251eb
BA
1985 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1986 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
1987 break; /* Better luck next time (see watchdog) */
1988 }
ca93ca42 1989
7734f6e6
DA
1990 new_before_last_rx = nic->rx_to_use->prev->prev;
1991 if (new_before_last_rx != old_before_last_rx) {
1992 /* Set the el-bit on the buffer that is before the last buffer.
1993 * This lets us update the next pointer on the last buffer
1994 * without worrying about hardware touching it.
1995 * We set the size to 0 to prevent hardware from touching this
1996 * buffer.
1997 * When the hardware hits the before last buffer with el-bit
1998 * and size of 0, it will RNR interrupt, the RUS will go into
1999 * the No Resources state. It will not complete nor write to
2000 * this buffer. */
2001 new_before_last_rfd =
2002 (struct rfd *)new_before_last_rx->skb->data;
2003 new_before_last_rfd->size = 0;
2004 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2005 pci_dma_sync_single_for_device(nic->pdev,
2006 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2007 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2008
2009 /* Now that we have a new stopping point, we can clear the old
2010 * stopping point. We must sync twice to get the proper
2011 * ordering on the hardware side of things. */
2012 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2013 pci_dma_sync_single_for_device(nic->pdev,
2014 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2015 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2016 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
2017 pci_dma_sync_single_for_device(nic->pdev,
2018 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2019 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2020 }
2021
f26251eb 2022 if (restart_required) {
ca93ca42 2023 // ack the rnr?
915e91d7 2024 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 2025 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 2026 if (work_done)
ca93ca42
JG
2027 (*work_done)++;
2028 }
1da177e4
LT
2029}
2030
2031static void e100_rx_clean_list(struct nic *nic)
2032{
2033 struct rx *rx;
2034 unsigned int i, count = nic->params.rfds.count;
2035
ca93ca42
JG
2036 nic->ru_running = RU_UNINITIALIZED;
2037
f26251eb
BA
2038 if (nic->rxs) {
2039 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2040 if (rx->skb) {
1da177e4 2041 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2042 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2043 dev_kfree_skb(rx->skb);
2044 }
2045 }
2046 kfree(nic->rxs);
2047 nic->rxs = NULL;
2048 }
2049
2050 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
2051}
2052
2053static int e100_rx_alloc_list(struct nic *nic)
2054{
2055 struct rx *rx;
2056 unsigned int i, count = nic->params.rfds.count;
7734f6e6 2057 struct rfd *before_last;
1da177e4
LT
2058
2059 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 2060 nic->ru_running = RU_UNINITIALIZED;
1da177e4 2061
f26251eb 2062 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2063 return -ENOMEM;
1da177e4 2064
f26251eb 2065 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
2066 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2067 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 2068 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
2069 e100_rx_clean_list(nic);
2070 return -ENOMEM;
2071 }
2072 }
7734f6e6
DA
2073 /* Set the el-bit on the buffer that is before the last buffer.
2074 * This lets us update the next pointer on the last buffer without
2075 * worrying about hardware touching it.
2076 * We set the size to 0 to prevent hardware from touching this buffer.
2077 * When the hardware hits the before last buffer with el-bit and size
2078 * of 0, it will RNR interrupt, the RU will go into the No Resources
2079 * state. It will not complete nor write to this buffer. */
2080 rx = nic->rxs->prev->prev;
2081 before_last = (struct rfd *)rx->skb->data;
2082 before_last->command |= cpu_to_le16(cb_el);
2083 before_last->size = 0;
2084 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 2085 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2086
2087 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2088 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2089
2090 return 0;
2091}
2092
7d12e780 2093static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2094{
2095 struct net_device *netdev = dev_id;
2096 struct nic *nic = netdev_priv(netdev);
27345bb6 2097 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
2098
2099 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
2100
f26251eb 2101 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
2102 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2103 return IRQ_NONE;
2104
2105 /* Ack interrupt(s) */
27345bb6 2106 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2107
ca93ca42 2108 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 2109 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
2110 nic->ru_running = RU_SUSPENDED;
2111
288379f0 2112 if (likely(napi_schedule_prep(&nic->napi))) {
0685c31b 2113 e100_disable_irq(nic);
288379f0 2114 __napi_schedule(&nic->napi);
0685c31b 2115 }
1da177e4
LT
2116
2117 return IRQ_HANDLED;
2118}
2119
bea3348e 2120static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2121{
bea3348e 2122 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 2123 unsigned int work_done = 0;
1da177e4 2124
bea3348e 2125 e100_rx_clean(nic, &work_done, budget);
53e52c72 2126 e100_tx_clean(nic);
1da177e4 2127
53e52c72
DM
2128 /* If budget not fully consumed, exit the polling mode */
2129 if (work_done < budget) {
288379f0 2130 napi_complete(napi);
1da177e4 2131 e100_enable_irq(nic);
1da177e4
LT
2132 }
2133
bea3348e 2134 return work_done;
1da177e4
LT
2135}
2136
2137#ifdef CONFIG_NET_POLL_CONTROLLER
2138static void e100_netpoll(struct net_device *netdev)
2139{
2140 struct nic *nic = netdev_priv(netdev);
611494dc 2141
1da177e4 2142 e100_disable_irq(nic);
7d12e780 2143 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2144 e100_tx_clean(nic);
2145 e100_enable_irq(nic);
2146}
2147#endif
2148
1da177e4
LT
2149static int e100_set_mac_address(struct net_device *netdev, void *p)
2150{
2151 struct nic *nic = netdev_priv(netdev);
2152 struct sockaddr *addr = p;
2153
2154 if (!is_valid_ether_addr(addr->sa_data))
2155 return -EADDRNOTAVAIL;
2156
2157 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2158 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2159
2160 return 0;
2161}
2162
2163static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2164{
f26251eb 2165 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2166 return -EINVAL;
2167 netdev->mtu = new_mtu;
2168 return 0;
2169}
2170
2171static int e100_asf(struct nic *nic)
2172{
2173 /* ASF can be enabled from eeprom */
2174 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2175 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2176 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2177 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2178}
2179
2180static int e100_up(struct nic *nic)
2181{
2182 int err;
2183
f26251eb 2184 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2185 return err;
f26251eb 2186 if ((err = e100_alloc_cbs(nic)))
1da177e4 2187 goto err_rx_clean_list;
f26251eb 2188 if ((err = e100_hw_init(nic)))
1da177e4
LT
2189 goto err_clean_cbs;
2190 e100_set_multicast_list(nic->netdev);
ca93ca42 2191 e100_start_receiver(nic, NULL);
1da177e4 2192 mod_timer(&nic->watchdog, jiffies);
f26251eb 2193 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2194 nic->netdev->name, nic->netdev)))
2195 goto err_no_irq;
1da177e4 2196 netif_wake_queue(nic->netdev);
bea3348e 2197 napi_enable(&nic->napi);
0236ebb7
MC
2198 /* enable ints _after_ enabling poll, preventing a race between
2199 * disable ints+schedule */
2200 e100_enable_irq(nic);
1da177e4
LT
2201 return 0;
2202
2203err_no_irq:
2204 del_timer_sync(&nic->watchdog);
2205err_clean_cbs:
2206 e100_clean_cbs(nic);
2207err_rx_clean_list:
2208 e100_rx_clean_list(nic);
2209 return err;
2210}
2211
2212static void e100_down(struct nic *nic)
2213{
0236ebb7 2214 /* wait here for poll to complete */
bea3348e 2215 napi_disable(&nic->napi);
0236ebb7 2216 netif_stop_queue(nic->netdev);
1da177e4
LT
2217 e100_hw_reset(nic);
2218 free_irq(nic->pdev->irq, nic->netdev);
2219 del_timer_sync(&nic->watchdog);
2220 netif_carrier_off(nic->netdev);
1da177e4
LT
2221 e100_clean_cbs(nic);
2222 e100_rx_clean_list(nic);
2223}
2224
2225static void e100_tx_timeout(struct net_device *netdev)
2226{
2227 struct nic *nic = netdev_priv(netdev);
2228
05479938 2229 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2230 * in interrupt context */
2231 schedule_work(&nic->tx_timeout_task);
2232}
2233
c4028958 2234static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2235{
c4028958
DH
2236 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2237 struct net_device *netdev = nic->netdev;
2acdb1e0 2238
1da177e4 2239 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2240 ioread8(&nic->csr->scb.status));
1da177e4
LT
2241 e100_down(netdev_priv(netdev));
2242 e100_up(netdev_priv(netdev));
2243}
2244
2245static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2246{
2247 int err;
2248 struct sk_buff *skb;
2249
2250 /* Use driver resources to perform internal MAC or PHY
2251 * loopback test. A single packet is prepared and transmitted
2252 * in loopback mode, and the test passes if the received
2253 * packet compares byte-for-byte to the transmitted packet. */
2254
f26251eb 2255 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2256 return err;
f26251eb 2257 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2258 goto err_clean_rx;
2259
2260 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2261 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2262 loopback_mode = lb_mac;
2263
2264 nic->loopback = loopback_mode;
f26251eb 2265 if ((err = e100_hw_init(nic)))
1da177e4
LT
2266 goto err_loopback_none;
2267
f26251eb 2268 if (loopback_mode == lb_phy)
1da177e4
LT
2269 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2270 BMCR_LOOPBACK);
2271
ca93ca42 2272 e100_start_receiver(nic, NULL);
1da177e4 2273
f26251eb 2274 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2275 err = -ENOMEM;
2276 goto err_loopback_none;
2277 }
2278 skb_put(skb, ETH_DATA_LEN);
2279 memset(skb->data, 0xFF, ETH_DATA_LEN);
2280 e100_xmit_frame(skb, nic->netdev);
2281
2282 msleep(10);
2283
aa49cdd9 2284 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2285 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2286
f26251eb 2287 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2288 skb->data, ETH_DATA_LEN))
2289 err = -EAGAIN;
2290
2291err_loopback_none:
2292 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2293 nic->loopback = lb_none;
1da177e4 2294 e100_clean_cbs(nic);
aa49cdd9 2295 e100_hw_reset(nic);
1da177e4
LT
2296err_clean_rx:
2297 e100_rx_clean_list(nic);
2298 return err;
2299}
2300
2301#define MII_LED_CONTROL 0x1B
b55de80e
BA
2302#define E100_82552_LED_OVERRIDE 0x19
2303#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2304#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
1da177e4
LT
2305static void e100_blink_led(unsigned long data)
2306{
2307 struct nic *nic = (struct nic *)data;
2308 enum led_state {
2309 led_on = 0x01,
2310 led_off = 0x04,
2311 led_on_559 = 0x05,
2312 led_on_557 = 0x07,
2313 };
b55de80e
BA
2314 u16 led_reg = MII_LED_CONTROL;
2315
2316 if (nic->phy == phy_82552_v) {
2317 led_reg = E100_82552_LED_OVERRIDE;
1da177e4 2318
b55de80e
BA
2319 nic->leds = (nic->leds == E100_82552_LED_ON) ?
2320 E100_82552_LED_OFF : E100_82552_LED_ON;
2321 } else {
2322 nic->leds = (nic->leds & led_on) ? led_off :
2323 (nic->mac < mac_82559_D101M) ? led_on_557 :
2324 led_on_559;
2325 }
2326 mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
1da177e4
LT
2327 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2328}
2329
2330static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2331{
2332 struct nic *nic = netdev_priv(netdev);
2333 return mii_ethtool_gset(&nic->mii, cmd);
2334}
2335
2336static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2337{
2338 struct nic *nic = netdev_priv(netdev);
2339 int err;
2340
2341 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2342 err = mii_ethtool_sset(&nic->mii, cmd);
2343 e100_exec_cb(nic, NULL, e100_configure);
2344
2345 return err;
2346}
2347
2348static void e100_get_drvinfo(struct net_device *netdev,
2349 struct ethtool_drvinfo *info)
2350{
2351 struct nic *nic = netdev_priv(netdev);
2352 strcpy(info->driver, DRV_NAME);
2353 strcpy(info->version, DRV_VERSION);
2354 strcpy(info->fw_version, "N/A");
2355 strcpy(info->bus_info, pci_name(nic->pdev));
2356}
2357
abf9b902 2358#define E100_PHY_REGS 0x1C
1da177e4
LT
2359static int e100_get_regs_len(struct net_device *netdev)
2360{
2361 struct nic *nic = netdev_priv(netdev);
abf9b902 2362 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2363}
2364
2365static void e100_get_regs(struct net_device *netdev,
2366 struct ethtool_regs *regs, void *p)
2367{
2368 struct nic *nic = netdev_priv(netdev);
2369 u32 *buff = p;
2370 int i;
2371
44c10138 2372 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2373 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2374 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2375 ioread16(&nic->csr->scb.status);
f26251eb 2376 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2377 buff[1 + E100_PHY_REGS - i] =
2378 mdio_read(netdev, nic->mii.phy_id, i);
2379 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2380 e100_exec_cb(nic, NULL, e100_dump);
2381 msleep(10);
2382 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2383 sizeof(nic->mem->dump_buf));
2384}
2385
2386static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2387{
2388 struct nic *nic = netdev_priv(netdev);
2389 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2390 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2391}
2392
2393static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2394{
2395 struct nic *nic = netdev_priv(netdev);
2396
bc79fc84
RW
2397 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2398 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2399 return -EOPNOTSUPP;
2400
f26251eb 2401 if (wol->wolopts)
1da177e4
LT
2402 nic->flags |= wol_magic;
2403 else
2404 nic->flags &= ~wol_magic;
2405
bc79fc84
RW
2406 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2407
1da177e4
LT
2408 e100_exec_cb(nic, NULL, e100_configure);
2409
2410 return 0;
2411}
2412
2413static u32 e100_get_msglevel(struct net_device *netdev)
2414{
2415 struct nic *nic = netdev_priv(netdev);
2416 return nic->msg_enable;
2417}
2418
2419static void e100_set_msglevel(struct net_device *netdev, u32 value)
2420{
2421 struct nic *nic = netdev_priv(netdev);
2422 nic->msg_enable = value;
2423}
2424
2425static int e100_nway_reset(struct net_device *netdev)
2426{
2427 struct nic *nic = netdev_priv(netdev);
2428 return mii_nway_restart(&nic->mii);
2429}
2430
2431static u32 e100_get_link(struct net_device *netdev)
2432{
2433 struct nic *nic = netdev_priv(netdev);
2434 return mii_link_ok(&nic->mii);
2435}
2436
2437static int e100_get_eeprom_len(struct net_device *netdev)
2438{
2439 struct nic *nic = netdev_priv(netdev);
2440 return nic->eeprom_wc << 1;
2441}
2442
2443#define E100_EEPROM_MAGIC 0x1234
2444static int e100_get_eeprom(struct net_device *netdev,
2445 struct ethtool_eeprom *eeprom, u8 *bytes)
2446{
2447 struct nic *nic = netdev_priv(netdev);
2448
2449 eeprom->magic = E100_EEPROM_MAGIC;
2450 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2451
2452 return 0;
2453}
2454
2455static int e100_set_eeprom(struct net_device *netdev,
2456 struct ethtool_eeprom *eeprom, u8 *bytes)
2457{
2458 struct nic *nic = netdev_priv(netdev);
2459
f26251eb 2460 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2461 return -EINVAL;
2462
2463 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2464
2465 return e100_eeprom_save(nic, eeprom->offset >> 1,
2466 (eeprom->len >> 1) + 1);
2467}
2468
2469static void e100_get_ringparam(struct net_device *netdev,
2470 struct ethtool_ringparam *ring)
2471{
2472 struct nic *nic = netdev_priv(netdev);
2473 struct param_range *rfds = &nic->params.rfds;
2474 struct param_range *cbs = &nic->params.cbs;
2475
2476 ring->rx_max_pending = rfds->max;
2477 ring->tx_max_pending = cbs->max;
2478 ring->rx_mini_max_pending = 0;
2479 ring->rx_jumbo_max_pending = 0;
2480 ring->rx_pending = rfds->count;
2481 ring->tx_pending = cbs->count;
2482 ring->rx_mini_pending = 0;
2483 ring->rx_jumbo_pending = 0;
2484}
2485
2486static int e100_set_ringparam(struct net_device *netdev,
2487 struct ethtool_ringparam *ring)
2488{
2489 struct nic *nic = netdev_priv(netdev);
2490 struct param_range *rfds = &nic->params.rfds;
2491 struct param_range *cbs = &nic->params.cbs;
2492
05479938 2493 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2494 return -EINVAL;
2495
f26251eb 2496 if (netif_running(netdev))
1da177e4
LT
2497 e100_down(nic);
2498 rfds->count = max(ring->rx_pending, rfds->min);
2499 rfds->count = min(rfds->count, rfds->max);
2500 cbs->count = max(ring->tx_pending, cbs->min);
2501 cbs->count = min(cbs->count, cbs->max);
2502 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2503 rfds->count, cbs->count);
f26251eb 2504 if (netif_running(netdev))
1da177e4
LT
2505 e100_up(nic);
2506
2507 return 0;
2508}
2509
2510static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2511 "Link test (on/offline)",
2512 "Eeprom test (on/offline)",
2513 "Self test (offline)",
2514 "Mac loopback (offline)",
2515 "Phy loopback (offline)",
2516};
4c3616cd 2517#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2518
1da177e4
LT
2519static void e100_diag_test(struct net_device *netdev,
2520 struct ethtool_test *test, u64 *data)
2521{
2522 struct ethtool_cmd cmd;
2523 struct nic *nic = netdev_priv(netdev);
2524 int i, err;
2525
2526 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2527 data[0] = !mii_link_ok(&nic->mii);
2528 data[1] = e100_eeprom_load(nic);
f26251eb 2529 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2530
2531 /* save speed, duplex & autoneg settings */
2532 err = mii_ethtool_gset(&nic->mii, &cmd);
2533
f26251eb 2534 if (netif_running(netdev))
1da177e4
LT
2535 e100_down(nic);
2536 data[2] = e100_self_test(nic);
2537 data[3] = e100_loopback_test(nic, lb_mac);
2538 data[4] = e100_loopback_test(nic, lb_phy);
2539
2540 /* restore speed, duplex & autoneg settings */
2541 err = mii_ethtool_sset(&nic->mii, &cmd);
2542
f26251eb 2543 if (netif_running(netdev))
1da177e4
LT
2544 e100_up(nic);
2545 }
f26251eb 2546 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2547 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2548
2549 msleep_interruptible(4 * 1000);
1da177e4
LT
2550}
2551
2552static int e100_phys_id(struct net_device *netdev, u32 data)
2553{
2554 struct nic *nic = netdev_priv(netdev);
b55de80e
BA
2555 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2556 MII_LED_CONTROL;
1da177e4 2557
f26251eb 2558 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1da177e4
LT
2559 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2560 mod_timer(&nic->blink_timer, jiffies);
2561 msleep_interruptible(data * 1000);
2562 del_timer_sync(&nic->blink_timer);
b55de80e 2563 mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
1da177e4
LT
2564
2565 return 0;
2566}
2567
2568static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2569 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2570 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2571 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2572 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2573 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2574 "tx_heartbeat_errors", "tx_window_errors",
2575 /* device-specific stats */
2576 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2577 "tx_flow_control_pause", "rx_flow_control_pause",
2578 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2579};
2580#define E100_NET_STATS_LEN 21
4c3616cd 2581#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2582
b9f2c044 2583static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2584{
b9f2c044
JG
2585 switch (sset) {
2586 case ETH_SS_TEST:
2587 return E100_TEST_LEN;
2588 case ETH_SS_STATS:
2589 return E100_STATS_LEN;
2590 default:
2591 return -EOPNOTSUPP;
2592 }
1da177e4
LT
2593}
2594
2595static void e100_get_ethtool_stats(struct net_device *netdev,
2596 struct ethtool_stats *stats, u64 *data)
2597{
2598 struct nic *nic = netdev_priv(netdev);
2599 int i;
2600
f26251eb 2601 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2602 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2603
2604 data[i++] = nic->tx_deferred;
2605 data[i++] = nic->tx_single_collisions;
2606 data[i++] = nic->tx_multiple_collisions;
2607 data[i++] = nic->tx_fc_pause;
2608 data[i++] = nic->rx_fc_pause;
2609 data[i++] = nic->rx_fc_unsupported;
2610 data[i++] = nic->tx_tco_frames;
2611 data[i++] = nic->rx_tco_frames;
2612}
2613
2614static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2615{
f26251eb 2616 switch (stringset) {
1da177e4
LT
2617 case ETH_SS_TEST:
2618 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2619 break;
2620 case ETH_SS_STATS:
2621 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2622 break;
2623 }
2624}
2625
7282d491 2626static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2627 .get_settings = e100_get_settings,
2628 .set_settings = e100_set_settings,
2629 .get_drvinfo = e100_get_drvinfo,
2630 .get_regs_len = e100_get_regs_len,
2631 .get_regs = e100_get_regs,
2632 .get_wol = e100_get_wol,
2633 .set_wol = e100_set_wol,
2634 .get_msglevel = e100_get_msglevel,
2635 .set_msglevel = e100_set_msglevel,
2636 .nway_reset = e100_nway_reset,
2637 .get_link = e100_get_link,
2638 .get_eeprom_len = e100_get_eeprom_len,
2639 .get_eeprom = e100_get_eeprom,
2640 .set_eeprom = e100_set_eeprom,
2641 .get_ringparam = e100_get_ringparam,
2642 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2643 .self_test = e100_diag_test,
2644 .get_strings = e100_get_strings,
2645 .phys_id = e100_phys_id,
1da177e4 2646 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2647 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2648};
2649
2650static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2651{
2652 struct nic *nic = netdev_priv(netdev);
2653
2654 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2655}
2656
2657static int e100_alloc(struct nic *nic)
2658{
2659 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2660 &nic->dma_addr);
2661 return nic->mem ? 0 : -ENOMEM;
2662}
2663
2664static void e100_free(struct nic *nic)
2665{
f26251eb 2666 if (nic->mem) {
1da177e4
LT
2667 pci_free_consistent(nic->pdev, sizeof(struct mem),
2668 nic->mem, nic->dma_addr);
2669 nic->mem = NULL;
2670 }
2671}
2672
2673static int e100_open(struct net_device *netdev)
2674{
2675 struct nic *nic = netdev_priv(netdev);
2676 int err = 0;
2677
2678 netif_carrier_off(netdev);
f26251eb 2679 if ((err = e100_up(nic)))
1da177e4
LT
2680 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2681 return err;
2682}
2683
2684static int e100_close(struct net_device *netdev)
2685{
2686 e100_down(netdev_priv(netdev));
2687 return 0;
2688}
2689
acc78426
SH
2690static const struct net_device_ops e100_netdev_ops = {
2691 .ndo_open = e100_open,
2692 .ndo_stop = e100_close,
00829823 2693 .ndo_start_xmit = e100_xmit_frame,
acc78426
SH
2694 .ndo_validate_addr = eth_validate_addr,
2695 .ndo_set_multicast_list = e100_set_multicast_list,
2696 .ndo_set_mac_address = e100_set_mac_address,
2697 .ndo_change_mtu = e100_change_mtu,
2698 .ndo_do_ioctl = e100_do_ioctl,
2699 .ndo_tx_timeout = e100_tx_timeout,
2700#ifdef CONFIG_NET_POLL_CONTROLLER
2701 .ndo_poll_controller = e100_netpoll,
2702#endif
2703};
2704
1da177e4
LT
2705static int __devinit e100_probe(struct pci_dev *pdev,
2706 const struct pci_device_id *ent)
2707{
2708 struct net_device *netdev;
2709 struct nic *nic;
2710 int err;
2711
f26251eb
BA
2712 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2713 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
1da177e4
LT
2714 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2715 return -ENOMEM;
2716 }
2717
acc78426 2718 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2719 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2720 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2721 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2722
2723 nic = netdev_priv(netdev);
bea3348e 2724 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2725 nic->netdev = netdev;
2726 nic->pdev = pdev;
2727 nic->msg_enable = (1 << debug) - 1;
72001762 2728 nic->mdio_ctrl = mdio_ctrl_hw;
1da177e4
LT
2729 pci_set_drvdata(pdev, netdev);
2730
f26251eb 2731 if ((err = pci_enable_device(pdev))) {
1da177e4
LT
2732 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2733 goto err_out_free_dev;
2734 }
2735
f26251eb 2736 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1da177e4
LT
2737 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2738 "base address, aborting.\n");
2739 err = -ENODEV;
2740 goto err_out_disable_pdev;
2741 }
2742
f26251eb 2743 if ((err = pci_request_regions(pdev, DRV_NAME))) {
1da177e4
LT
2744 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2745 goto err_out_disable_pdev;
2746 }
2747
284901a9 2748 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1da177e4
LT
2749 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2750 goto err_out_free_res;
2751 }
2752
1da177e4
LT
2753 SET_NETDEV_DEV(netdev, &pdev->dev);
2754
27345bb6
JB
2755 if (use_io)
2756 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2757
2758 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2759 if (!nic->csr) {
1da177e4
LT
2760 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2761 err = -ENOMEM;
2762 goto err_out_free_res;
2763 }
2764
f26251eb 2765 if (ent->driver_data)
1da177e4
LT
2766 nic->flags |= ich;
2767 else
2768 nic->flags &= ~ich;
2769
2770 e100_get_defaults(nic);
2771
1f53367d 2772 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2773 spin_lock_init(&nic->cb_lock);
2774 spin_lock_init(&nic->cmd_lock);
ac7c6669 2775 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2776
2777 /* Reset the device before pci_set_master() in case device is in some
2778 * funky state and has an interrupt pending - hint: we don't have the
2779 * interrupt handler registered yet. */
2780 e100_hw_reset(nic);
2781
2782 pci_set_master(pdev);
2783
2784 init_timer(&nic->watchdog);
2785 nic->watchdog.function = e100_watchdog;
2786 nic->watchdog.data = (unsigned long)nic;
2787 init_timer(&nic->blink_timer);
2788 nic->blink_timer.function = e100_blink_led;
2789 nic->blink_timer.data = (unsigned long)nic;
2790
c4028958 2791 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2792
f26251eb 2793 if ((err = e100_alloc(nic))) {
1da177e4
LT
2794 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2795 goto err_out_iounmap;
2796 }
2797
f26251eb 2798 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2799 goto err_out_free;
2800
f92d8728
MC
2801 e100_phy_init(nic);
2802
1da177e4 2803 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2804 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2805 if (!is_valid_ether_addr(netdev->perm_addr)) {
2806 if (!eeprom_bad_csum_allow) {
2807 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2808 "EEPROM, aborting.\n");
2809 err = -EAGAIN;
2810 goto err_out_free;
2811 } else {
2812 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2813 "you MUST configure one.\n");
2814 }
1da177e4
LT
2815 }
2816
2817 /* Wol magic packet can be enabled from eeprom */
f26251eb 2818 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2819 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2820 nic->flags |= wol_magic;
bc79fc84
RW
2821 device_set_wakeup_enable(&pdev->dev, true);
2822 }
1da177e4 2823
6bdacb1a 2824 /* ack any pending wake events, disable PME */
e7272403 2825 pci_pme_active(pdev, false);
1da177e4
LT
2826
2827 strcpy(netdev->name, "eth%d");
f26251eb 2828 if ((err = register_netdev(netdev))) {
1da177e4
LT
2829 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2830 goto err_out_free;
2831 }
2832
e174961c 2833 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
0795af57 2834 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
e174961c 2835 pdev->irq, netdev->dev_addr);
1da177e4
LT
2836
2837 return 0;
2838
2839err_out_free:
2840 e100_free(nic);
2841err_out_iounmap:
27345bb6 2842 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2843err_out_free_res:
2844 pci_release_regions(pdev);
2845err_out_disable_pdev:
2846 pci_disable_device(pdev);
2847err_out_free_dev:
2848 pci_set_drvdata(pdev, NULL);
2849 free_netdev(netdev);
2850 return err;
2851}
2852
2853static void __devexit e100_remove(struct pci_dev *pdev)
2854{
2855 struct net_device *netdev = pci_get_drvdata(pdev);
2856
f26251eb 2857 if (netdev) {
1da177e4
LT
2858 struct nic *nic = netdev_priv(netdev);
2859 unregister_netdev(netdev);
2860 e100_free(nic);
915e91d7 2861 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2862 free_netdev(netdev);
2863 pci_release_regions(pdev);
2864 pci_disable_device(pdev);
2865 pci_set_drvdata(pdev, NULL);
2866 }
2867}
2868
b55de80e
BA
2869#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
2870#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2871#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
ac7c992c 2872static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
2873{
2874 struct net_device *netdev = pci_get_drvdata(pdev);
2875 struct nic *nic = netdev_priv(netdev);
2876
824545e7 2877 if (netif_running(netdev))
f902283b 2878 e100_down(nic);
518d8338 2879 netif_device_detach(netdev);
a53a33da 2880
1da177e4 2881 pci_save_state(pdev);
e8e82b76
AK
2882
2883 if ((nic->flags & wol_magic) | e100_asf(nic)) {
b55de80e
BA
2884 /* enable reverse auto-negotiation */
2885 if (nic->phy == phy_82552_v) {
2886 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2887 E100_82552_SMARTSPEED);
2888
2889 mdio_write(netdev, nic->mii.phy_id,
2890 E100_82552_SMARTSPEED, smartspeed |
2891 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
2892 }
ac7c992c 2893 *enable_wake = true;
e8e82b76 2894 } else {
ac7c992c 2895 *enable_wake = false;
e8e82b76 2896 }
975b366a 2897
8543da66 2898 pci_disable_device(pdev);
ac7c992c 2899}
1da177e4 2900
ac7c992c
TLSC
2901static int __e100_power_off(struct pci_dev *pdev, bool wake)
2902{
6905b1f1 2903 if (wake)
ac7c992c 2904 return pci_prepare_to_sleep(pdev);
6905b1f1
RW
2905
2906 pci_wake_from_d3(pdev, false);
2907 pci_set_power_state(pdev, PCI_D3hot);
2908
2909 return 0;
1da177e4
LT
2910}
2911
f902283b 2912#ifdef CONFIG_PM
ac7c992c
TLSC
2913static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2914{
2915 bool wake;
2916 __e100_shutdown(pdev, &wake);
2917 return __e100_power_off(pdev, wake);
2918}
2919
1da177e4
LT
2920static int e100_resume(struct pci_dev *pdev)
2921{
2922 struct net_device *netdev = pci_get_drvdata(pdev);
2923 struct nic *nic = netdev_priv(netdev);
2924
975b366a 2925 pci_set_power_state(pdev, PCI_D0);
1da177e4 2926 pci_restore_state(pdev);
6bdacb1a 2927 /* ack any pending wake events, disable PME */
975b366a 2928 pci_enable_wake(pdev, 0, 0);
1da177e4 2929
4b512d26 2930 /* disable reverse auto-negotiation */
b55de80e
BA
2931 if (nic->phy == phy_82552_v) {
2932 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2933 E100_82552_SMARTSPEED);
2934
2935 mdio_write(netdev, nic->mii.phy_id,
2936 E100_82552_SMARTSPEED,
2937 smartspeed & ~(E100_82552_REV_ANEG));
2938 }
2939
1da177e4 2940 netif_device_attach(netdev);
975b366a 2941 if (netif_running(netdev))
1da177e4
LT
2942 e100_up(nic);
2943
2944 return 0;
2945}
975b366a 2946#endif /* CONFIG_PM */
1da177e4 2947
d18c3db5 2948static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2949{
ac7c992c
TLSC
2950 bool wake;
2951 __e100_shutdown(pdev, &wake);
2952 if (system_state == SYSTEM_POWER_OFF)
2953 __e100_power_off(pdev, wake);
6bdacb1a
MC
2954}
2955
2cc30492
AK
2956/* ------------------ PCI Error Recovery infrastructure -------------- */
2957/**
2958 * e100_io_error_detected - called when PCI error is detected.
2959 * @pdev: Pointer to PCI device
0a0863af 2960 * @state: The current pci connection state
2cc30492
AK
2961 */
2962static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2963{
2964 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 2965 struct nic *nic = netdev_priv(netdev);
2cc30492 2966
2cc30492 2967 netif_device_detach(netdev);
ef681ce1
AD
2968
2969 if (state == pci_channel_io_perm_failure)
2970 return PCI_ERS_RESULT_DISCONNECT;
2971
2972 if (netif_running(netdev))
2973 e100_down(nic);
b1d26f24 2974 pci_disable_device(pdev);
2cc30492
AK
2975
2976 /* Request a slot reset. */
2977 return PCI_ERS_RESULT_NEED_RESET;
2978}
2979
2980/**
2981 * e100_io_slot_reset - called after the pci bus has been reset.
2982 * @pdev: Pointer to PCI device
2983 *
2984 * Restart the card from scratch.
2985 */
2986static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2987{
2988 struct net_device *netdev = pci_get_drvdata(pdev);
2989 struct nic *nic = netdev_priv(netdev);
2990
2991 if (pci_enable_device(pdev)) {
2992 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2993 return PCI_ERS_RESULT_DISCONNECT;
2994 }
2995 pci_set_master(pdev);
2996
2997 /* Only one device per card can do a reset */
2998 if (0 != PCI_FUNC(pdev->devfn))
2999 return PCI_ERS_RESULT_RECOVERED;
3000 e100_hw_reset(nic);
3001 e100_phy_init(nic);
3002
3003 return PCI_ERS_RESULT_RECOVERED;
3004}
3005
3006/**
3007 * e100_io_resume - resume normal operations
3008 * @pdev: Pointer to PCI device
3009 *
3010 * Resume normal operations after an error recovery
3011 * sequence has been completed.
3012 */
3013static void e100_io_resume(struct pci_dev *pdev)
3014{
3015 struct net_device *netdev = pci_get_drvdata(pdev);
3016 struct nic *nic = netdev_priv(netdev);
3017
3018 /* ack any pending wake events, disable PME */
3019 pci_enable_wake(pdev, 0, 0);
3020
3021 netif_device_attach(netdev);
3022 if (netif_running(netdev)) {
3023 e100_open(netdev);
3024 mod_timer(&nic->watchdog, jiffies);
3025 }
3026}
3027
3028static struct pci_error_handlers e100_err_handler = {
3029 .error_detected = e100_io_error_detected,
3030 .slot_reset = e100_io_slot_reset,
3031 .resume = e100_io_resume,
3032};
6bdacb1a 3033
1da177e4
LT
3034static struct pci_driver e100_driver = {
3035 .name = DRV_NAME,
3036 .id_table = e100_id_table,
3037 .probe = e100_probe,
3038 .remove = __devexit_p(e100_remove),
e8e82b76 3039#ifdef CONFIG_PM
975b366a 3040 /* Power Management hooks */
1da177e4
LT
3041 .suspend = e100_suspend,
3042 .resume = e100_resume,
3043#endif
05479938 3044 .shutdown = e100_shutdown,
2cc30492 3045 .err_handler = &e100_err_handler,
1da177e4
LT
3046};
3047
3048static int __init e100_init_module(void)
3049{
f26251eb 3050 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
1da177e4
LT
3051 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3052 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
3053 }
29917620 3054 return pci_register_driver(&e100_driver);
1da177e4
LT
3055}
3056
3057static void __exit e100_cleanup_module(void)
3058{
3059 pci_unregister_driver(&e100_driver);
3060}
3061
3062module_init(e100_init_module);
3063module_exit(e100_cleanup_module);
This page took 1.134406 seconds and 5 git commands to generate.