dma-mapping: replace all DMA_32BIT_MASK macro with DMA_BIT_MASK(32)
[deliverable/linux.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
146 */
147
1da177e4
LT
148#include <linux/module.h>
149#include <linux/moduleparam.h>
150#include <linux/kernel.h>
151#include <linux/types.h>
152#include <linux/slab.h>
153#include <linux/delay.h>
154#include <linux/init.h>
155#include <linux/pci.h>
1e7f0bd8 156#include <linux/dma-mapping.h>
1da177e4
LT
157#include <linux/netdevice.h>
158#include <linux/etherdevice.h>
159#include <linux/mii.h>
160#include <linux/if_vlan.h>
161#include <linux/skbuff.h>
162#include <linux/ethtool.h>
163#include <linux/string.h>
9ac32e1b 164#include <linux/firmware.h>
1da177e4
LT
165#include <asm/unaligned.h>
166
167
168#define DRV_NAME "e100"
4e1dc97d 169#define DRV_EXT "-NAPI"
b55de80e 170#define DRV_VERSION "3.5.24-k2"DRV_EXT
1da177e4 171#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 172#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
173#define PFX DRV_NAME ": "
174
175#define E100_WATCHDOG_PERIOD (2 * HZ)
176#define E100_NAPI_WEIGHT 16
177
9ac32e1b
JSR
178#define FIRMWARE_D101M "e100/d101m_ucode.bin"
179#define FIRMWARE_D101S "e100/d101s_ucode.bin"
180#define FIRMWARE_D102E "e100/d102e_ucode.bin"
181
1da177e4
LT
182MODULE_DESCRIPTION(DRV_DESCRIPTION);
183MODULE_AUTHOR(DRV_COPYRIGHT);
184MODULE_LICENSE("GPL");
185MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
186MODULE_FIRMWARE(FIRMWARE_D101M);
187MODULE_FIRMWARE(FIRMWARE_D101S);
188MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
189
190static int debug = 3;
8fb6f732 191static int eeprom_bad_csum_allow = 0;
27345bb6 192static int use_io = 0;
1da177e4 193module_param(debug, int, 0);
8fb6f732 194module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 195module_param(use_io, int, 0);
1da177e4 196MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 197MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 198MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
199#define DPRINTK(nlevel, klevel, fmt, args...) \
200 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
201 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
b39d66a8 202 __func__ , ## args))
1da177e4
LT
203
204#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
205 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
206 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
207static struct pci_device_id e100_id_table[] = {
208 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
209 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
210 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
211 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
212 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
213 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
214 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
216 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
217 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
218 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
219 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
222 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
223 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
224 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
225 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
231 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
232 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
233 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
234 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
238 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
239 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
240 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
241 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
242 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
b55de80e 243 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
1da177e4
LT
244 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
245 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
246 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
247 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
248 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 249 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
250 { 0, }
251};
252MODULE_DEVICE_TABLE(pci, e100_id_table);
253
254enum mac {
255 mac_82557_D100_A = 0,
256 mac_82557_D100_B = 1,
257 mac_82557_D100_C = 2,
258 mac_82558_D101_A4 = 4,
259 mac_82558_D101_B0 = 5,
260 mac_82559_D101M = 8,
261 mac_82559_D101S = 9,
262 mac_82550_D102 = 12,
263 mac_82550_D102_C = 13,
264 mac_82551_E = 14,
265 mac_82551_F = 15,
266 mac_82551_10 = 16,
267 mac_unknown = 0xFF,
268};
269
270enum phy {
271 phy_100a = 0x000003E0,
272 phy_100c = 0x035002A8,
273 phy_82555_tx = 0x015002A8,
274 phy_nsc_tx = 0x5C002000,
275 phy_82562_et = 0x033002A8,
276 phy_82562_em = 0x032002A8,
277 phy_82562_ek = 0x031002A8,
278 phy_82562_eh = 0x017002A8,
b55de80e 279 phy_82552_v = 0xd061004d,
1da177e4
LT
280 phy_unknown = 0xFFFFFFFF,
281};
282
283/* CSR (Control/Status Registers) */
284struct csr {
285 struct {
286 u8 status;
287 u8 stat_ack;
288 u8 cmd_lo;
289 u8 cmd_hi;
290 u32 gen_ptr;
291 } scb;
292 u32 port;
293 u16 flash_ctrl;
294 u8 eeprom_ctrl_lo;
295 u8 eeprom_ctrl_hi;
296 u32 mdi_ctrl;
297 u32 rx_dma_count;
298};
299
300enum scb_status {
7734f6e6 301 rus_no_res = 0x08,
1da177e4
LT
302 rus_ready = 0x10,
303 rus_mask = 0x3C,
304};
305
ca93ca42
JG
306enum ru_state {
307 RU_SUSPENDED = 0,
308 RU_RUNNING = 1,
309 RU_UNINITIALIZED = -1,
310};
311
1da177e4
LT
312enum scb_stat_ack {
313 stat_ack_not_ours = 0x00,
314 stat_ack_sw_gen = 0x04,
315 stat_ack_rnr = 0x10,
316 stat_ack_cu_idle = 0x20,
317 stat_ack_frame_rx = 0x40,
318 stat_ack_cu_cmd_done = 0x80,
319 stat_ack_not_present = 0xFF,
320 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
321 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
322};
323
324enum scb_cmd_hi {
325 irq_mask_none = 0x00,
326 irq_mask_all = 0x01,
327 irq_sw_gen = 0x02,
328};
329
330enum scb_cmd_lo {
331 cuc_nop = 0x00,
332 ruc_start = 0x01,
333 ruc_load_base = 0x06,
334 cuc_start = 0x10,
335 cuc_resume = 0x20,
336 cuc_dump_addr = 0x40,
337 cuc_dump_stats = 0x50,
338 cuc_load_base = 0x60,
339 cuc_dump_reset = 0x70,
340};
341
342enum cuc_dump {
343 cuc_dump_complete = 0x0000A005,
344 cuc_dump_reset_complete = 0x0000A007,
345};
05479938 346
1da177e4
LT
347enum port {
348 software_reset = 0x0000,
349 selftest = 0x0001,
350 selective_reset = 0x0002,
351};
352
353enum eeprom_ctrl_lo {
354 eesk = 0x01,
355 eecs = 0x02,
356 eedi = 0x04,
357 eedo = 0x08,
358};
359
360enum mdi_ctrl {
361 mdi_write = 0x04000000,
362 mdi_read = 0x08000000,
363 mdi_ready = 0x10000000,
364};
365
366enum eeprom_op {
367 op_write = 0x05,
368 op_read = 0x06,
369 op_ewds = 0x10,
370 op_ewen = 0x13,
371};
372
373enum eeprom_offsets {
374 eeprom_cnfg_mdix = 0x03,
375 eeprom_id = 0x0A,
376 eeprom_config_asf = 0x0D,
377 eeprom_smbus_addr = 0x90,
378};
379
380enum eeprom_cnfg_mdix {
381 eeprom_mdix_enabled = 0x0080,
382};
383
384enum eeprom_id {
385 eeprom_id_wol = 0x0020,
386};
387
388enum eeprom_config_asf {
389 eeprom_asf = 0x8000,
390 eeprom_gcl = 0x4000,
391};
392
393enum cb_status {
394 cb_complete = 0x8000,
395 cb_ok = 0x2000,
396};
397
398enum cb_command {
399 cb_nop = 0x0000,
400 cb_iaaddr = 0x0001,
401 cb_config = 0x0002,
402 cb_multi = 0x0003,
403 cb_tx = 0x0004,
404 cb_ucode = 0x0005,
405 cb_dump = 0x0006,
406 cb_tx_sf = 0x0008,
407 cb_cid = 0x1f00,
408 cb_i = 0x2000,
409 cb_s = 0x4000,
410 cb_el = 0x8000,
411};
412
413struct rfd {
aaf918ba
AV
414 __le16 status;
415 __le16 command;
416 __le32 link;
417 __le32 rbd;
418 __le16 actual_size;
419 __le16 size;
1da177e4
LT
420};
421
422struct rx {
423 struct rx *next, *prev;
424 struct sk_buff *skb;
425 dma_addr_t dma_addr;
426};
427
428#if defined(__BIG_ENDIAN_BITFIELD)
429#define X(a,b) b,a
430#else
431#define X(a,b) a,b
432#endif
433struct config {
434/*0*/ u8 X(byte_count:6, pad0:2);
435/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
436/*2*/ u8 adaptive_ifs;
437/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
438 term_write_cache_line:1), pad3:4);
439/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
440/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
441/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
442 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
443 rx_discard_overruns:1), rx_save_bad_frames:1);
444/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
445 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
446 tx_dynamic_tbd:1);
447/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
448/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
449 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
450/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
451 loopback:2);
452/*11*/ u8 X(linear_priority:3, pad11:5);
453/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
454/*13*/ u8 ip_addr_lo;
455/*14*/ u8 ip_addr_hi;
456/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
457 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
458 pad15_2:1), crs_or_cdt:1);
459/*16*/ u8 fc_delay_lo;
460/*17*/ u8 fc_delay_hi;
461/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
462 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
463/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
464 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
465 full_duplex_force:1), full_duplex_pin:1);
466/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
467/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
468/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
469 u8 pad_d102[9];
470};
471
472#define E100_MAX_MULTICAST_ADDRS 64
473struct multi {
aaf918ba 474 __le16 count;
1da177e4
LT
475 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
476};
477
478/* Important: keep total struct u32-aligned */
479#define UCODE_SIZE 134
480struct cb {
aaf918ba
AV
481 __le16 status;
482 __le16 command;
483 __le32 link;
1da177e4
LT
484 union {
485 u8 iaaddr[ETH_ALEN];
aaf918ba 486 __le32 ucode[UCODE_SIZE];
1da177e4
LT
487 struct config config;
488 struct multi multi;
489 struct {
490 u32 tbd_array;
491 u16 tcb_byte_count;
492 u8 threshold;
493 u8 tbd_count;
494 struct {
aaf918ba
AV
495 __le32 buf_addr;
496 __le16 size;
1da177e4
LT
497 u16 eol;
498 } tbd;
499 } tcb;
aaf918ba 500 __le32 dump_buffer_addr;
1da177e4
LT
501 } u;
502 struct cb *next, *prev;
503 dma_addr_t dma_addr;
504 struct sk_buff *skb;
505};
506
507enum loopback {
508 lb_none = 0, lb_mac = 1, lb_phy = 3,
509};
510
511struct stats {
aaf918ba 512 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
513 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
514 tx_multiple_collisions, tx_total_collisions;
aaf918ba 515 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
516 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
517 rx_short_frame_errors;
aaf918ba
AV
518 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
519 __le16 xmt_tco_frames, rcv_tco_frames;
520 __le32 complete;
1da177e4
LT
521};
522
523struct mem {
524 struct {
525 u32 signature;
526 u32 result;
527 } selftest;
528 struct stats stats;
529 u8 dump_buf[596];
530};
531
532struct param_range {
533 u32 min;
534 u32 max;
535 u32 count;
536};
537
538struct params {
539 struct param_range rfds;
540 struct param_range cbs;
541};
542
543struct nic {
544 /* Begin: frequently used values: keep adjacent for cache effect */
545 u32 msg_enable ____cacheline_aligned;
546 struct net_device *netdev;
547 struct pci_dev *pdev;
548
549 struct rx *rxs ____cacheline_aligned;
550 struct rx *rx_to_use;
551 struct rx *rx_to_clean;
552 struct rfd blank_rfd;
ca93ca42 553 enum ru_state ru_running;
1da177e4
LT
554
555 spinlock_t cb_lock ____cacheline_aligned;
556 spinlock_t cmd_lock;
557 struct csr __iomem *csr;
558 enum scb_cmd_lo cuc_cmd;
559 unsigned int cbs_avail;
bea3348e 560 struct napi_struct napi;
1da177e4
LT
561 struct cb *cbs;
562 struct cb *cb_to_use;
563 struct cb *cb_to_send;
564 struct cb *cb_to_clean;
aaf918ba 565 __le16 tx_command;
1da177e4
LT
566 /* End: frequently used values: keep adjacent for cache effect */
567
568 enum {
569 ich = (1 << 0),
570 promiscuous = (1 << 1),
571 multicast_all = (1 << 2),
572 wol_magic = (1 << 3),
573 ich_10h_workaround = (1 << 4),
574 } flags ____cacheline_aligned;
575
576 enum mac mac;
577 enum phy phy;
578 struct params params;
1da177e4
LT
579 struct timer_list watchdog;
580 struct timer_list blink_timer;
581 struct mii_if_info mii;
2acdb1e0 582 struct work_struct tx_timeout_task;
1da177e4
LT
583 enum loopback loopback;
584
585 struct mem *mem;
586 dma_addr_t dma_addr;
587
588 dma_addr_t cbs_dma_addr;
589 u8 adaptive_ifs;
590 u8 tx_threshold;
591 u32 tx_frames;
592 u32 tx_collisions;
593 u32 tx_deferred;
594 u32 tx_single_collisions;
595 u32 tx_multiple_collisions;
596 u32 tx_fc_pause;
597 u32 tx_tco_frames;
598
599 u32 rx_fc_pause;
600 u32 rx_fc_unsupported;
601 u32 rx_tco_frames;
602 u32 rx_over_length_errors;
603
1da177e4
LT
604 u16 leds;
605 u16 eeprom_wc;
aaf918ba 606 __le16 eeprom[256];
ac7c6669 607 spinlock_t mdio_lock;
1da177e4
LT
608};
609
610static inline void e100_write_flush(struct nic *nic)
611{
612 /* Flush previous PCI writes through intermediate bridges
613 * by doing a benign read */
27345bb6 614 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
615}
616
858119e1 617static void e100_enable_irq(struct nic *nic)
1da177e4
LT
618{
619 unsigned long flags;
620
621 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 622 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 623 e100_write_flush(nic);
ad8c48ad 624 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
625}
626
858119e1 627static void e100_disable_irq(struct nic *nic)
1da177e4
LT
628{
629 unsigned long flags;
630
631 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 632 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 633 e100_write_flush(nic);
ad8c48ad 634 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
635}
636
637static void e100_hw_reset(struct nic *nic)
638{
639 /* Put CU and RU into idle with a selective reset to get
640 * device off of PCI bus */
27345bb6 641 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
642 e100_write_flush(nic); udelay(20);
643
644 /* Now fully reset device */
27345bb6 645 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
646 e100_write_flush(nic); udelay(20);
647
648 /* Mask off our interrupt line - it's unmasked after reset */
649 e100_disable_irq(nic);
650}
651
652static int e100_self_test(struct nic *nic)
653{
654 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
655
656 /* Passing the self-test is a pretty good indication
657 * that the device can DMA to/from host memory */
658
659 nic->mem->selftest.signature = 0;
660 nic->mem->selftest.result = 0xFFFFFFFF;
661
27345bb6 662 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
663 e100_write_flush(nic);
664 /* Wait 10 msec for self-test to complete */
665 msleep(10);
666
667 /* Interrupts are enabled after self-test */
668 e100_disable_irq(nic);
669
670 /* Check results of self-test */
f26251eb 671 if (nic->mem->selftest.result != 0) {
1da177e4
LT
672 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
673 nic->mem->selftest.result);
674 return -ETIMEDOUT;
675 }
f26251eb 676 if (nic->mem->selftest.signature == 0) {
1da177e4
LT
677 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
678 return -ETIMEDOUT;
679 }
680
681 return 0;
682}
683
aaf918ba 684static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
685{
686 u32 cmd_addr_data[3];
687 u8 ctrl;
688 int i, j;
689
690 /* Three cmds: write/erase enable, write data, write/erase disable */
691 cmd_addr_data[0] = op_ewen << (addr_len - 2);
692 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 693 le16_to_cpu(data);
1da177e4
LT
694 cmd_addr_data[2] = op_ewds << (addr_len - 2);
695
696 /* Bit-bang cmds to write word to eeprom */
f26251eb 697 for (j = 0; j < 3; j++) {
1da177e4
LT
698
699 /* Chip select */
27345bb6 700 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
701 e100_write_flush(nic); udelay(4);
702
f26251eb 703 for (i = 31; i >= 0; i--) {
1da177e4
LT
704 ctrl = (cmd_addr_data[j] & (1 << i)) ?
705 eecs | eedi : eecs;
27345bb6 706 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
707 e100_write_flush(nic); udelay(4);
708
27345bb6 709 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
710 e100_write_flush(nic); udelay(4);
711 }
712 /* Wait 10 msec for cmd to complete */
713 msleep(10);
714
715 /* Chip deselect */
27345bb6 716 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
717 e100_write_flush(nic); udelay(4);
718 }
719};
720
721/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 722static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
723{
724 u32 cmd_addr_data;
725 u16 data = 0;
726 u8 ctrl;
727 int i;
728
729 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
730
731 /* Chip select */
27345bb6 732 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
733 e100_write_flush(nic); udelay(4);
734
735 /* Bit-bang to read word from eeprom */
f26251eb 736 for (i = 31; i >= 0; i--) {
1da177e4 737 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 738 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 739 e100_write_flush(nic); udelay(4);
05479938 740
27345bb6 741 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 742 e100_write_flush(nic); udelay(4);
05479938 743
1da177e4
LT
744 /* Eeprom drives a dummy zero to EEDO after receiving
745 * complete address. Use this to adjust addr_len. */
27345bb6 746 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 747 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
748 *addr_len -= (i - 16);
749 i = 17;
750 }
05479938 751
1da177e4
LT
752 data = (data << 1) | (ctrl & eedo ? 1 : 0);
753 }
754
755 /* Chip deselect */
27345bb6 756 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
757 e100_write_flush(nic); udelay(4);
758
aaf918ba 759 return cpu_to_le16(data);
1da177e4
LT
760};
761
762/* Load entire EEPROM image into driver cache and validate checksum */
763static int e100_eeprom_load(struct nic *nic)
764{
765 u16 addr, addr_len = 8, checksum = 0;
766
767 /* Try reading with an 8-bit addr len to discover actual addr len */
768 e100_eeprom_read(nic, &addr_len, 0);
769 nic->eeprom_wc = 1 << addr_len;
770
f26251eb 771 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 772 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 773 if (addr < nic->eeprom_wc - 1)
aaf918ba 774 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
775 }
776
777 /* The checksum, stored in the last word, is calculated such that
778 * the sum of words should be 0xBABA */
aaf918ba 779 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
1da177e4 780 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
781 if (!eeprom_bad_csum_allow)
782 return -EAGAIN;
1da177e4
LT
783 }
784
785 return 0;
786}
787
788/* Save (portion of) driver EEPROM cache to device and update checksum */
789static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
790{
791 u16 addr, addr_len = 8, checksum = 0;
792
793 /* Try reading with an 8-bit addr len to discover actual addr len */
794 e100_eeprom_read(nic, &addr_len, 0);
795 nic->eeprom_wc = 1 << addr_len;
796
f26251eb 797 if (start + count >= nic->eeprom_wc)
1da177e4
LT
798 return -EINVAL;
799
f26251eb 800 for (addr = start; addr < start + count; addr++)
1da177e4
LT
801 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
802
803 /* The checksum, stored in the last word, is calculated such that
804 * the sum of words should be 0xBABA */
f26251eb 805 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
806 checksum += le16_to_cpu(nic->eeprom[addr]);
807 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
808 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
809 nic->eeprom[nic->eeprom_wc - 1]);
810
811 return 0;
812}
813
962082b6 814#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 815#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 816static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
817{
818 unsigned long flags;
819 unsigned int i;
820 int err = 0;
821
822 spin_lock_irqsave(&nic->cmd_lock, flags);
823
824 /* Previous command is accepted when SCB clears */
f26251eb
BA
825 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
826 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
827 break;
828 cpu_relax();
f26251eb 829 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
830 udelay(5);
831 }
f26251eb 832 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
833 err = -EAGAIN;
834 goto err_unlock;
835 }
836
f26251eb 837 if (unlikely(cmd != cuc_resume))
27345bb6
JB
838 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
839 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
840
841err_unlock:
842 spin_unlock_irqrestore(&nic->cmd_lock, flags);
843
844 return err;
845}
846
858119e1 847static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
848 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
849{
850 struct cb *cb;
851 unsigned long flags;
852 int err = 0;
853
854 spin_lock_irqsave(&nic->cb_lock, flags);
855
f26251eb 856 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
857 err = -ENOMEM;
858 goto err_unlock;
859 }
860
861 cb = nic->cb_to_use;
862 nic->cb_to_use = cb->next;
863 nic->cbs_avail--;
864 cb->skb = skb;
865
f26251eb 866 if (unlikely(!nic->cbs_avail))
1da177e4
LT
867 err = -ENOSPC;
868
869 cb_prepare(nic, cb, skb);
870
871 /* Order is important otherwise we'll be in a race with h/w:
872 * set S-bit in current first, then clear S-bit in previous. */
873 cb->command |= cpu_to_le16(cb_s);
874 wmb();
875 cb->prev->command &= cpu_to_le16(~cb_s);
876
f26251eb
BA
877 while (nic->cb_to_send != nic->cb_to_use) {
878 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
879 nic->cb_to_send->dma_addr))) {
880 /* Ok, here's where things get sticky. It's
881 * possible that we can't schedule the command
882 * because the controller is too busy, so
883 * let's just queue the command and try again
884 * when another command is scheduled. */
f26251eb 885 if (err == -ENOSPC) {
962082b6
MC
886 //request a reset
887 schedule_work(&nic->tx_timeout_task);
888 }
1da177e4
LT
889 break;
890 } else {
891 nic->cuc_cmd = cuc_resume;
892 nic->cb_to_send = nic->cb_to_send->next;
893 }
894 }
895
896err_unlock:
897 spin_unlock_irqrestore(&nic->cb_lock, flags);
898
899 return err;
900}
901
902static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
903{
904 u32 data_out = 0;
905 unsigned int i;
ac7c6669 906 unsigned long flags;
1da177e4 907
ac7c6669
OM
908
909 /*
910 * Stratus87247: we shouldn't be writing the MDI control
911 * register until the Ready bit shows True. Also, since
912 * manipulation of the MDI control registers is a multi-step
913 * procedure it should be done under lock.
914 */
915 spin_lock_irqsave(&nic->mdio_lock, flags);
916 for (i = 100; i; --i) {
27345bb6 917 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
918 break;
919 udelay(20);
920 }
921 if (unlikely(!i)) {
922 printk("e100.mdio_ctrl(%s) won't go Ready\n",
923 nic->netdev->name );
924 spin_unlock_irqrestore(&nic->mdio_lock, flags);
925 return 0; /* No way to indicate timeout error */
926 }
27345bb6 927 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 928
ac7c6669 929 for (i = 0; i < 100; i++) {
1da177e4 930 udelay(20);
27345bb6 931 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
932 break;
933 }
ac7c6669 934 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
935 DPRINTK(HW, DEBUG,
936 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
937 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
938 return (u16)data_out;
939}
940
941static int mdio_read(struct net_device *netdev, int addr, int reg)
942{
943 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
944}
945
946static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
947{
b55de80e
BA
948 struct nic *nic = netdev_priv(netdev);
949
950 if ((nic->phy == phy_82552_v) && (reg == MII_BMCR) &&
951 (data & (BMCR_ANRESTART | BMCR_ANENABLE))) {
952 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
953
954 /*
955 * Workaround Si issue where sometimes the part will not
956 * autoneg to 100Mbps even when advertised.
957 */
958 if (advert & ADVERTISE_100FULL)
959 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
960 else if (advert & ADVERTISE_100HALF)
961 data |= BMCR_SPEED100;
962 }
963
1da177e4
LT
964 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
965}
966
967static void e100_get_defaults(struct nic *nic)
968{
2afecc04
JB
969 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
970 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 971
1da177e4 972 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 973 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 974 if (nic->mac == mac_unknown)
1da177e4
LT
975 nic->mac = mac_82557_D100_A;
976
977 nic->params.rfds = rfds;
978 nic->params.cbs = cbs;
979
980 /* Quadwords to DMA into FIFO before starting frame transmit */
981 nic->tx_threshold = 0xE0;
982
0a0863af 983 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
984 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
985 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
986
987 /* Template for a freshly allocated RFD */
7734f6e6 988 nic->blank_rfd.command = 0;
1172899a 989 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1da177e4
LT
990 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
991
992 /* MII setup */
993 nic->mii.phy_id_mask = 0x1F;
994 nic->mii.reg_num_mask = 0x1F;
995 nic->mii.dev = nic->netdev;
996 nic->mii.mdio_read = mdio_read;
997 nic->mii.mdio_write = mdio_write;
998}
999
1000static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1001{
1002 struct config *config = &cb->u.config;
1003 u8 *c = (u8 *)config;
1004
1005 cb->command = cpu_to_le16(cb_config);
1006
1007 memset(config, 0, sizeof(struct config));
1008
1009 config->byte_count = 0x16; /* bytes in this struct */
1010 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1011 config->direct_rx_dma = 0x1; /* reserved */
1012 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1013 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1014 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1015 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1016 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
1017 config->pad10 = 0x6;
1018 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1019 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1020 config->ifs = 0x6; /* x16 = inter frame spacing */
1021 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1022 config->pad15_1 = 0x1;
1023 config->pad15_2 = 0x1;
1024 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1025 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1026 config->tx_padding = 0x1; /* 1=pad short frames */
1027 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1028 config->pad18 = 0x1;
1029 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1030 config->pad20_1 = 0x1F;
1031 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1032 config->pad21_1 = 0x5;
1033
1034 config->adaptive_ifs = nic->adaptive_ifs;
1035 config->loopback = nic->loopback;
1036
f26251eb 1037 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1038 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1039
f26251eb 1040 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1041 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1042 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1043 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1044 }
1045
f26251eb 1046 if (nic->flags & multicast_all)
1da177e4
LT
1047 config->multicast_all = 0x1; /* 1=accept, 0=no */
1048
6bdacb1a 1049 /* disable WoL when up */
f26251eb 1050 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1051 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1052
f26251eb 1053 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1054 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1055 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1056 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1057 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1058 if (nic->mac >= mac_82559_D101M) {
1da177e4 1059 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1060 /* Enable TCO in extended config */
1061 if (nic->mac >= mac_82551_10) {
1062 config->byte_count = 0x20; /* extended bytes */
1063 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1064 }
1065 } else {
1da177e4 1066 config->standard_stat_counter = 0x0;
44e4925e 1067 }
1da177e4
LT
1068 }
1069
1070 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1071 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1072 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1073 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1074 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1075 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1076}
1077
2afecc04
JB
1078/*************************************************************************
1079* CPUSaver parameters
1080*
1081* All CPUSaver parameters are 16-bit literals that are part of a
1082* "move immediate value" instruction. By changing the value of
1083* the literal in the instruction before the code is loaded, the
1084* driver can change the algorithm.
1085*
0779bf2d 1086* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1087* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1088* timer is reset each time a new packet is received. (see
1089* BUNDLEMAX below to set the limit on number of chained packets)
1090* The current default is 0x600 or 1536. Experiments show that
1091* the value should probably stay within the 0x200 - 0x1000.
1092*
05479938 1093* BUNDLEMAX -
2afecc04
JB
1094* This sets the maximum number of frames that will be bundled. In
1095* some situations, such as the TCP windowing algorithm, it may be
1096* better to limit the growth of the bundle size than let it go as
1097* high as it can, because that could cause too much added latency.
1098* The default is six, because this is the number of packets in the
1099* default TCP window size. A value of 1 would make CPUSaver indicate
1100* an interrupt for every frame received. If you do not want to put
1101* a limit on the bundle size, set this value to xFFFF.
1102*
05479938 1103* BUNDLESMALL -
2afecc04
JB
1104* This contains a bit-mask describing the minimum size frame that
1105* will be bundled. The default masks the lower 7 bits, which means
1106* that any frame less than 128 bytes in length will not be bundled,
1107* but will instead immediately generate an interrupt. This does
1108* not affect the current bundle in any way. Any frame that is 128
1109* bytes or large will be bundled normally. This feature is meant
1110* to provide immediate indication of ACK frames in a TCP environment.
1111* Customers were seeing poor performance when a machine with CPUSaver
1112* enabled was sending but not receiving. The delay introduced when
1113* the ACKs were received was enough to reduce total throughput, because
1114* the sender would sit idle until the ACK was finally seen.
1115*
1116* The current default is 0xFF80, which masks out the lower 7 bits.
1117* This means that any frame which is x7F (127) bytes or smaller
05479938 1118* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1119* bit mask, there are only a few valid values that can be used. To
1120* turn this feature off, the driver can write the value xFFFF to the
1121* lower word of this instruction (in the same way that the other
1122* parameters are used). Likewise, a value of 0xF800 (2047) would
1123* cause an interrupt to be generated for every frame, because all
1124* standard Ethernet frames are <= 2047 bytes in length.
1125*************************************************************************/
1126
05479938 1127/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1128 * workarounds it provides, set the following defines to:
1129 * BUNDLESMALL 0
1130 * BUNDLEMAX 1
1131 * INTDELAY 1
1132 */
1133#define BUNDLESMALL 1
1134#define BUNDLEMAX (u16)6
1135#define INTDELAY (u16)1536 /* 0x600 */
1136
9ac32e1b
JSR
1137/* Initialize firmware */
1138static const struct firmware *e100_request_firmware(struct nic *nic)
1139{
1140 const char *fw_name;
1141 const struct firmware *fw;
1142 u8 timer, bundle, min_size;
1143 int err;
1144
2afecc04
JB
1145 /* do not load u-code for ICH devices */
1146 if (nic->flags & ich)
9ac32e1b 1147 return NULL;
2afecc04 1148
44c10138 1149 /* Search for ucode match against h/w revision */
9ac32e1b
JSR
1150 if (nic->mac == mac_82559_D101M)
1151 fw_name = FIRMWARE_D101M;
1152 else if (nic->mac == mac_82559_D101S)
1153 fw_name = FIRMWARE_D101S;
1154 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1155 fw_name = FIRMWARE_D102E;
1156 else /* No ucode on other devices */
1157 return NULL;
1158
1159 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1160 if (err) {
1161 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1162 fw_name, err);
1163 return ERR_PTR(err);
1164 }
1165 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1166 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1167 if (fw->size != UCODE_SIZE * 4 + 3) {
1168 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
1169 fw_name, fw->size);
1170 release_firmware(fw);
1171 return ERR_PTR(-EINVAL);
2afecc04
JB
1172 }
1173
9ac32e1b
JSR
1174 /* Read timer, bundle and min_size from end of firmware blob */
1175 timer = fw->data[UCODE_SIZE * 4];
1176 bundle = fw->data[UCODE_SIZE * 4 + 1];
1177 min_size = fw->data[UCODE_SIZE * 4 + 2];
1178
1179 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1180 min_size >= UCODE_SIZE) {
1181 DPRINTK(PROBE, ERR,
1182 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1183 fw_name, timer, bundle, min_size);
1184 release_firmware(fw);
1185 return ERR_PTR(-EINVAL);
1186 }
1187 /* OK, firmware is validated and ready to use... */
1188 return fw;
24180333
JB
1189}
1190
9ac32e1b
JSR
1191static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1192 struct sk_buff *skb)
24180333 1193{
9ac32e1b
JSR
1194 const struct firmware *fw = (void *)skb;
1195 u8 timer, bundle, min_size;
1196
1197 /* It's not a real skb; we just abused the fact that e100_exec_cb
1198 will pass it through to here... */
1199 cb->skb = NULL;
1200
1201 /* firmware is stored as little endian already */
1202 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1203
1204 /* Read timer, bundle and min_size from end of firmware blob */
1205 timer = fw->data[UCODE_SIZE * 4];
1206 bundle = fw->data[UCODE_SIZE * 4 + 1];
1207 min_size = fw->data[UCODE_SIZE * 4 + 2];
1208
1209 /* Insert user-tunable settings in cb->u.ucode */
1210 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1211 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1212 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1213 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1214 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1215 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1216
1217 cb->command = cpu_to_le16(cb_ucode | cb_el);
1218}
1219
1220static inline int e100_load_ucode_wait(struct nic *nic)
1221{
1222 const struct firmware *fw;
24180333
JB
1223 int err = 0, counter = 50;
1224 struct cb *cb = nic->cb_to_clean;
1225
9ac32e1b
JSR
1226 fw = e100_request_firmware(nic);
1227 /* If it's NULL, then no ucode is required */
1228 if (!fw || IS_ERR(fw))
1229 return PTR_ERR(fw);
1230
1231 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
24180333 1232 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1233
24180333
JB
1234 /* must restart cuc */
1235 nic->cuc_cmd = cuc_start;
1236
1237 /* wait for completion */
1238 e100_write_flush(nic);
1239 udelay(10);
1240
1241 /* wait for possibly (ouch) 500ms */
1242 while (!(cb->status & cpu_to_le16(cb_complete))) {
1243 msleep(10);
1244 if (!--counter) break;
1245 }
05479938 1246
3a4fa0a2 1247 /* ack any interrupts, something could have been set */
27345bb6 1248 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1249
1250 /* if the command failed, or is not OK, notify and return */
1251 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1252 DPRINTK(PROBE,ERR, "ucode load failed\n");
1253 err = -EPERM;
1254 }
05479938 1255
24180333 1256 return err;
1da177e4
LT
1257}
1258
1259static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1260 struct sk_buff *skb)
1261{
1262 cb->command = cpu_to_le16(cb_iaaddr);
1263 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1264}
1265
1266static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1267{
1268 cb->command = cpu_to_le16(cb_dump);
1269 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1270 offsetof(struct mem, dump_buf));
1271}
1272
1273#define NCONFIG_AUTO_SWITCH 0x0080
1274#define MII_NSC_CONG MII_RESV1
1275#define NSC_CONG_ENABLE 0x0100
1276#define NSC_CONG_TXREADY 0x0400
1277#define ADVERTISE_FC_SUPPORTED 0x0400
1278static int e100_phy_init(struct nic *nic)
1279{
1280 struct net_device *netdev = nic->netdev;
1281 u32 addr;
1282 u16 bmcr, stat, id_lo, id_hi, cong;
1283
1284 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1285 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1286 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1287 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1288 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1289 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1290 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1291 break;
1292 }
1293 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
f26251eb 1294 if (addr == 32)
1da177e4
LT
1295 return -EAGAIN;
1296
b55de80e
BA
1297 /* Isolate all the PHY ids */
1298 for (addr = 0; addr < 32; addr++)
1299 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1300 /* Select the discovered PHY */
1301 bmcr &= ~BMCR_ISOLATE;
1302 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1da177e4
LT
1303
1304 /* Get phy ID */
1305 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1306 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1307 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1308 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1309
1310 /* Handle National tx phys */
1311#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1312 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1313 /* Disable congestion control */
1314 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1315 cong |= NSC_CONG_TXREADY;
1316 cong &= ~NSC_CONG_ENABLE;
1317 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1318 }
1319
b55de80e
BA
1320 if (nic->phy == phy_82552_v) {
1321 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1322
1323 /* Workaround Si not advertising flow-control during autoneg */
1324 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1325 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1326
1327 /* Reset for the above changes to take effect */
1328 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1329 bmcr |= BMCR_RESET;
1330 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1331 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1332 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1333 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1334 /* enable/disable MDI/MDI-X auto-switching. */
1335 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1336 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1337 }
1da177e4
LT
1338
1339 return 0;
1340}
1341
1342static int e100_hw_init(struct nic *nic)
1343{
1344 int err;
1345
1346 e100_hw_reset(nic);
1347
1348 DPRINTK(HW, ERR, "e100_hw_init\n");
f26251eb 1349 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1350 return err;
1351
f26251eb 1352 if ((err = e100_phy_init(nic)))
1da177e4 1353 return err;
f26251eb 1354 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1355 return err;
f26251eb 1356 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1357 return err;
9ac32e1b 1358 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1359 return err;
f26251eb 1360 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1361 return err;
f26251eb 1362 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1363 return err;
f26251eb 1364 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1365 nic->dma_addr + offsetof(struct mem, stats))))
1366 return err;
f26251eb 1367 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1368 return err;
1369
1370 e100_disable_irq(nic);
1371
1372 return 0;
1373}
1374
1375static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1376{
1377 struct net_device *netdev = nic->netdev;
1378 struct dev_mc_list *list = netdev->mc_list;
1379 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1380
1381 cb->command = cpu_to_le16(cb_multi);
1382 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
f26251eb 1383 for (i = 0; list && i < count; i++, list = list->next)
1da177e4
LT
1384 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1385 ETH_ALEN);
1386}
1387
1388static void e100_set_multicast_list(struct net_device *netdev)
1389{
1390 struct nic *nic = netdev_priv(netdev);
1391
1392 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1393 netdev->mc_count, netdev->flags);
1394
f26251eb 1395 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1396 nic->flags |= promiscuous;
1397 else
1398 nic->flags &= ~promiscuous;
1399
f26251eb 1400 if (netdev->flags & IFF_ALLMULTI ||
1da177e4
LT
1401 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1402 nic->flags |= multicast_all;
1403 else
1404 nic->flags &= ~multicast_all;
1405
1406 e100_exec_cb(nic, NULL, e100_configure);
1407 e100_exec_cb(nic, NULL, e100_multi);
1408}
1409
1410static void e100_update_stats(struct nic *nic)
1411{
09f75cd7
JG
1412 struct net_device *dev = nic->netdev;
1413 struct net_device_stats *ns = &dev->stats;
1da177e4 1414 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1415 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1416 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1417 &s->complete;
1418
1419 /* Device's stats reporting may take several microseconds to
0a0863af 1420 * complete, so we're always waiting for results of the
1da177e4
LT
1421 * previous command. */
1422
f26251eb 1423 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1424 *complete = 0;
1425 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1426 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1427 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1428 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1429 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1430 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1431 ns->collisions += nic->tx_collisions;
1432 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1433 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1434 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1435 nic->rx_over_length_errors;
1436 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1437 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1438 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1439 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1440 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1441 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1442 le32_to_cpu(s->rx_alignment_errors) +
1443 le32_to_cpu(s->rx_short_frame_errors) +
1444 le32_to_cpu(s->rx_cdt_errors);
1445 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1446 nic->tx_single_collisions +=
1447 le32_to_cpu(s->tx_single_collisions);
1448 nic->tx_multiple_collisions +=
1449 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1450 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1451 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1452 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1453 nic->rx_fc_unsupported +=
1454 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1455 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1456 nic->tx_tco_frames +=
1457 le16_to_cpu(s->xmt_tco_frames);
1458 nic->rx_tco_frames +=
1459 le16_to_cpu(s->rcv_tco_frames);
1460 }
1461 }
1462 }
1463
05479938 1464
f26251eb 1465 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1f53367d 1466 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1467}
1468
1469static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1470{
1471 /* Adjust inter-frame-spacing (IFS) between two transmits if
1472 * we're getting collisions on a half-duplex connection. */
1473
f26251eb 1474 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1475 u32 prev = nic->adaptive_ifs;
1476 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1477
f26251eb 1478 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1479 (nic->tx_frames > min_frames)) {
f26251eb 1480 if (nic->adaptive_ifs < 60)
1da177e4
LT
1481 nic->adaptive_ifs += 5;
1482 } else if (nic->tx_frames < min_frames) {
f26251eb 1483 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1484 nic->adaptive_ifs -= 5;
1485 }
f26251eb 1486 if (nic->adaptive_ifs != prev)
1da177e4
LT
1487 e100_exec_cb(nic, NULL, e100_configure);
1488 }
1489}
1490
1491static void e100_watchdog(unsigned long data)
1492{
1493 struct nic *nic = (struct nic *)data;
1494 struct ethtool_cmd cmd;
1495
1496 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1497
1498 /* mii library handles link maintenance tasks */
1499
1500 mii_ethtool_gset(&nic->mii, &cmd);
1501
f26251eb 1502 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
f4113030
JK
1503 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
1504 nic->netdev->name,
1505 cmd.speed == SPEED_100 ? "100" : "10",
1506 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1507 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
f4113030
JK
1508 printk(KERN_INFO "e100: %s NIC Link is Down\n",
1509 nic->netdev->name);
1da177e4
LT
1510 }
1511
1512 mii_check_link(&nic->mii);
1513
1514 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1515 * allocation failure.
1516 * Unfortunately have to use a spinlock to not re-enable interrupts
1517 * accidentally, due to hardware that shares a register between the
1518 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1519 spin_lock_irq(&nic->cmd_lock);
27345bb6 1520 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1521 e100_write_flush(nic);
ad8c48ad 1522 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1523
1524 e100_update_stats(nic);
1525 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1526
f26251eb 1527 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1528 /* Issue a multicast command to workaround a 557 lock up */
1529 e100_set_multicast_list(nic->netdev);
1530
f26251eb 1531 if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1da177e4
LT
1532 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1533 nic->flags |= ich_10h_workaround;
1534 else
1535 nic->flags &= ~ich_10h_workaround;
1536
34c6417b
SH
1537 mod_timer(&nic->watchdog,
1538 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1539}
1540
858119e1 1541static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1542 struct sk_buff *skb)
1543{
1544 cb->command = nic->tx_command;
962082b6 1545 /* interrupt every 16 packets regardless of delay */
f26251eb 1546 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1547 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1548 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1549 cb->u.tcb.tcb_byte_count = 0;
1550 cb->u.tcb.threshold = nic->tx_threshold;
1551 cb->u.tcb.tbd_count = 1;
1552 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1553 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1554 /* check for mapping failure? */
1da177e4
LT
1555 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1556}
1557
1558static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1559{
1560 struct nic *nic = netdev_priv(netdev);
1561 int err;
1562
f26251eb 1563 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1564 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1565 Issue a NOP command followed by a 1us delay before
1566 issuing the Tx command. */
f26251eb 1567 if (e100_exec_cmd(nic, cuc_nop, 0))
1f53367d 1568 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1569 udelay(1);
1570 }
1571
1572 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1573
f26251eb 1574 switch (err) {
1da177e4
LT
1575 case -ENOSPC:
1576 /* We queued the skb, but now we're out of space. */
1577 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1578 netif_stop_queue(netdev);
1579 break;
1580 case -ENOMEM:
1581 /* This is a hard error - log it. */
1582 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1583 netif_stop_queue(netdev);
1584 return 1;
1585 }
1586
1587 netdev->trans_start = jiffies;
1588 return 0;
1589}
1590
858119e1 1591static int e100_tx_clean(struct nic *nic)
1da177e4 1592{
09f75cd7 1593 struct net_device *dev = nic->netdev;
1da177e4
LT
1594 struct cb *cb;
1595 int tx_cleaned = 0;
1596
1597 spin_lock(&nic->cb_lock);
1598
1da177e4 1599 /* Clean CBs marked complete */
f26251eb 1600 for (cb = nic->cb_to_clean;
1da177e4
LT
1601 cb->status & cpu_to_le16(cb_complete);
1602 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1603 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1604 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1605 cb->status);
1606
f26251eb 1607 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1608 dev->stats.tx_packets++;
1609 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1610
1611 pci_unmap_single(nic->pdev,
1612 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1613 le16_to_cpu(cb->u.tcb.tbd.size),
1614 PCI_DMA_TODEVICE);
1615 dev_kfree_skb_any(cb->skb);
1616 cb->skb = NULL;
1617 tx_cleaned = 1;
1618 }
1619 cb->status = 0;
1620 nic->cbs_avail++;
1621 }
1622
1623 spin_unlock(&nic->cb_lock);
1624
1625 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1626 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1627 netif_wake_queue(nic->netdev);
1628
1629 return tx_cleaned;
1630}
1631
1632static void e100_clean_cbs(struct nic *nic)
1633{
f26251eb
BA
1634 if (nic->cbs) {
1635 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1636 struct cb *cb = nic->cb_to_clean;
f26251eb 1637 if (cb->skb) {
1da177e4
LT
1638 pci_unmap_single(nic->pdev,
1639 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1640 le16_to_cpu(cb->u.tcb.tbd.size),
1641 PCI_DMA_TODEVICE);
1642 dev_kfree_skb(cb->skb);
1643 }
1644 nic->cb_to_clean = nic->cb_to_clean->next;
1645 nic->cbs_avail++;
1646 }
1647 pci_free_consistent(nic->pdev,
1648 sizeof(struct cb) * nic->params.cbs.count,
1649 nic->cbs, nic->cbs_dma_addr);
1650 nic->cbs = NULL;
1651 nic->cbs_avail = 0;
1652 }
1653 nic->cuc_cmd = cuc_start;
1654 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1655 nic->cbs;
1656}
1657
1658static int e100_alloc_cbs(struct nic *nic)
1659{
1660 struct cb *cb;
1661 unsigned int i, count = nic->params.cbs.count;
1662
1663 nic->cuc_cmd = cuc_start;
1664 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1665 nic->cbs_avail = 0;
1666
1667 nic->cbs = pci_alloc_consistent(nic->pdev,
1668 sizeof(struct cb) * count, &nic->cbs_dma_addr);
f26251eb 1669 if (!nic->cbs)
1da177e4
LT
1670 return -ENOMEM;
1671
f26251eb 1672 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1673 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1674 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1675
1676 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1677 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1678 ((i+1) % count) * sizeof(struct cb));
1679 cb->skb = NULL;
1680 }
1681
1682 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1683 nic->cbs_avail = count;
1684
1685 return 0;
1686}
1687
ca93ca42 1688static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1689{
f26251eb
BA
1690 if (!nic->rxs) return;
1691 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1692
1693 /* handle init time starts */
f26251eb 1694 if (!rx) rx = nic->rxs;
ca93ca42
JG
1695
1696 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1697 if (rx->skb) {
ca93ca42
JG
1698 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1699 nic->ru_running = RU_RUNNING;
1700 }
1da177e4
LT
1701}
1702
1703#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1704static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1705{
f26251eb 1706 if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1707 return -ENOMEM;
1708
1709 /* Align, init, and map the RFD. */
1da177e4 1710 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1711 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1712 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1713 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1714
8d8bb39b 1715 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1716 dev_kfree_skb_any(rx->skb);
097688ef 1717 rx->skb = NULL;
1f53367d
MC
1718 rx->dma_addr = 0;
1719 return -ENOMEM;
1720 }
1721
1da177e4 1722 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1723 * this one. We are safe to touch the previous RFD because
1724 * it is protected by the before last buffer's el bit being set */
aaf918ba 1725 if (rx->prev->skb) {
1da177e4 1726 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1727 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1728 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1729 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1730 }
1731
1732 return 0;
1733}
1734
858119e1 1735static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1736 unsigned int *work_done, unsigned int work_to_do)
1737{
09f75cd7 1738 struct net_device *dev = nic->netdev;
1da177e4
LT
1739 struct sk_buff *skb = rx->skb;
1740 struct rfd *rfd = (struct rfd *)skb->data;
1741 u16 rfd_status, actual_size;
1742
f26251eb 1743 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1744 return -EAGAIN;
1745
1746 /* Need to sync before taking a peek at cb_complete bit */
1747 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1748 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1749 rfd_status = le16_to_cpu(rfd->status);
1750
1751 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1752
1753 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1754 if (unlikely(!(rfd_status & cb_complete))) {
1755 /* If the next buffer has the el bit, but we think the receiver
1756 * is still running, check to see if it really stopped while
1757 * we had interrupts off.
1758 * This allows for a fast restart without re-enabling
1759 * interrupts */
1760 if ((le16_to_cpu(rfd->command) & cb_el) &&
1761 (RU_RUNNING == nic->ru_running))
1762
17393dd6 1763 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1764 nic->ru_running = RU_SUSPENDED;
1f53367d 1765 return -ENODATA;
7734f6e6 1766 }
1da177e4
LT
1767
1768 /* Get actual data size */
1769 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 1770 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
1771 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1772
1773 /* Get data */
1774 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1775 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 1776
7734f6e6
DA
1777 /* If this buffer has the el bit, but we think the receiver
1778 * is still running, check to see if it really stopped while
1779 * we had interrupts off.
1780 * This allows for a fast restart without re-enabling interrupts.
1781 * This can happen when the RU sees the size change but also sees
1782 * the el bit set. */
1783 if ((le16_to_cpu(rfd->command) & cb_el) &&
1784 (RU_RUNNING == nic->ru_running)) {
1785
17393dd6 1786 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 1787 nic->ru_running = RU_SUSPENDED;
7734f6e6 1788 }
ca93ca42 1789
1da177e4
LT
1790 /* Pull off the RFD and put the actual data (minus eth hdr) */
1791 skb_reserve(skb, sizeof(struct rfd));
1792 skb_put(skb, actual_size);
1793 skb->protocol = eth_type_trans(skb, nic->netdev);
1794
f26251eb 1795 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 1796 /* Don't indicate if hardware indicates errors */
1da177e4 1797 dev_kfree_skb_any(skb);
f26251eb 1798 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1799 /* Don't indicate oversized frames */
1800 nic->rx_over_length_errors++;
1da177e4
LT
1801 dev_kfree_skb_any(skb);
1802 } else {
09f75cd7
JG
1803 dev->stats.rx_packets++;
1804 dev->stats.rx_bytes += actual_size;
1da177e4 1805 netif_receive_skb(skb);
f26251eb 1806 if (work_done)
1da177e4
LT
1807 (*work_done)++;
1808 }
1809
1810 rx->skb = NULL;
1811
1812 return 0;
1813}
1814
858119e1 1815static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1816 unsigned int work_to_do)
1817{
1818 struct rx *rx;
7734f6e6
DA
1819 int restart_required = 0, err = 0;
1820 struct rx *old_before_last_rx, *new_before_last_rx;
1821 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
1822
1823 /* Indicate newly arrived packets */
f26251eb 1824 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
1825 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1826 /* Hit quota or no more to clean */
1827 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 1828 break;
1da177e4
LT
1829 }
1830
7734f6e6
DA
1831
1832 /* On EAGAIN, hit quota so have more work to do, restart once
1833 * cleanup is complete.
1834 * Else, are we already rnr? then pay attention!!! this ensures that
1835 * the state machine progression never allows a start with a
1836 * partially cleaned list, avoiding a race between hardware
1837 * and rx_to_clean when in NAPI mode */
1838 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
1839 restart_required = 1;
1840
1841 old_before_last_rx = nic->rx_to_use->prev->prev;
1842 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 1843
1da177e4 1844 /* Alloc new skbs to refill list */
f26251eb
BA
1845 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1846 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
1847 break; /* Better luck next time (see watchdog) */
1848 }
ca93ca42 1849
7734f6e6
DA
1850 new_before_last_rx = nic->rx_to_use->prev->prev;
1851 if (new_before_last_rx != old_before_last_rx) {
1852 /* Set the el-bit on the buffer that is before the last buffer.
1853 * This lets us update the next pointer on the last buffer
1854 * without worrying about hardware touching it.
1855 * We set the size to 0 to prevent hardware from touching this
1856 * buffer.
1857 * When the hardware hits the before last buffer with el-bit
1858 * and size of 0, it will RNR interrupt, the RUS will go into
1859 * the No Resources state. It will not complete nor write to
1860 * this buffer. */
1861 new_before_last_rfd =
1862 (struct rfd *)new_before_last_rx->skb->data;
1863 new_before_last_rfd->size = 0;
1864 new_before_last_rfd->command |= cpu_to_le16(cb_el);
1865 pci_dma_sync_single_for_device(nic->pdev,
1866 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1867 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1868
1869 /* Now that we have a new stopping point, we can clear the old
1870 * stopping point. We must sync twice to get the proper
1871 * ordering on the hardware side of things. */
1872 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
1873 pci_dma_sync_single_for_device(nic->pdev,
1874 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1875 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1876 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1877 pci_dma_sync_single_for_device(nic->pdev,
1878 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1879 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1880 }
1881
f26251eb 1882 if (restart_required) {
ca93ca42 1883 // ack the rnr?
915e91d7 1884 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 1885 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 1886 if (work_done)
ca93ca42
JG
1887 (*work_done)++;
1888 }
1da177e4
LT
1889}
1890
1891static void e100_rx_clean_list(struct nic *nic)
1892{
1893 struct rx *rx;
1894 unsigned int i, count = nic->params.rfds.count;
1895
ca93ca42
JG
1896 nic->ru_running = RU_UNINITIALIZED;
1897
f26251eb
BA
1898 if (nic->rxs) {
1899 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1900 if (rx->skb) {
1da177e4 1901 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1902 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1903 dev_kfree_skb(rx->skb);
1904 }
1905 }
1906 kfree(nic->rxs);
1907 nic->rxs = NULL;
1908 }
1909
1910 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1911}
1912
1913static int e100_rx_alloc_list(struct nic *nic)
1914{
1915 struct rx *rx;
1916 unsigned int i, count = nic->params.rfds.count;
7734f6e6 1917 struct rfd *before_last;
1da177e4
LT
1918
1919 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 1920 nic->ru_running = RU_UNINITIALIZED;
1da177e4 1921
f26251eb 1922 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 1923 return -ENOMEM;
1da177e4 1924
f26251eb 1925 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
1926 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1927 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 1928 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
1929 e100_rx_clean_list(nic);
1930 return -ENOMEM;
1931 }
1932 }
7734f6e6
DA
1933 /* Set the el-bit on the buffer that is before the last buffer.
1934 * This lets us update the next pointer on the last buffer without
1935 * worrying about hardware touching it.
1936 * We set the size to 0 to prevent hardware from touching this buffer.
1937 * When the hardware hits the before last buffer with el-bit and size
1938 * of 0, it will RNR interrupt, the RU will go into the No Resources
1939 * state. It will not complete nor write to this buffer. */
1940 rx = nic->rxs->prev->prev;
1941 before_last = (struct rfd *)rx->skb->data;
1942 before_last->command |= cpu_to_le16(cb_el);
1943 before_last->size = 0;
1944 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 1945 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1946
1947 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 1948 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
1949
1950 return 0;
1951}
1952
7d12e780 1953static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
1954{
1955 struct net_device *netdev = dev_id;
1956 struct nic *nic = netdev_priv(netdev);
27345bb6 1957 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
1958
1959 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1960
f26251eb 1961 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
1962 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1963 return IRQ_NONE;
1964
1965 /* Ack interrupt(s) */
27345bb6 1966 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 1967
ca93ca42 1968 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 1969 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
1970 nic->ru_running = RU_SUSPENDED;
1971
288379f0 1972 if (likely(napi_schedule_prep(&nic->napi))) {
0685c31b 1973 e100_disable_irq(nic);
288379f0 1974 __napi_schedule(&nic->napi);
0685c31b 1975 }
1da177e4
LT
1976
1977 return IRQ_HANDLED;
1978}
1979
bea3348e 1980static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 1981{
bea3348e 1982 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 1983 unsigned int work_done = 0;
1da177e4 1984
bea3348e 1985 e100_rx_clean(nic, &work_done, budget);
53e52c72 1986 e100_tx_clean(nic);
1da177e4 1987
53e52c72
DM
1988 /* If budget not fully consumed, exit the polling mode */
1989 if (work_done < budget) {
288379f0 1990 napi_complete(napi);
1da177e4 1991 e100_enable_irq(nic);
1da177e4
LT
1992 }
1993
bea3348e 1994 return work_done;
1da177e4
LT
1995}
1996
1997#ifdef CONFIG_NET_POLL_CONTROLLER
1998static void e100_netpoll(struct net_device *netdev)
1999{
2000 struct nic *nic = netdev_priv(netdev);
611494dc 2001
1da177e4 2002 e100_disable_irq(nic);
7d12e780 2003 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2004 e100_tx_clean(nic);
2005 e100_enable_irq(nic);
2006}
2007#endif
2008
1da177e4
LT
2009static int e100_set_mac_address(struct net_device *netdev, void *p)
2010{
2011 struct nic *nic = netdev_priv(netdev);
2012 struct sockaddr *addr = p;
2013
2014 if (!is_valid_ether_addr(addr->sa_data))
2015 return -EADDRNOTAVAIL;
2016
2017 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2018 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2019
2020 return 0;
2021}
2022
2023static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2024{
f26251eb 2025 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2026 return -EINVAL;
2027 netdev->mtu = new_mtu;
2028 return 0;
2029}
2030
2031static int e100_asf(struct nic *nic)
2032{
2033 /* ASF can be enabled from eeprom */
2034 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2035 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2036 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2037 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2038}
2039
2040static int e100_up(struct nic *nic)
2041{
2042 int err;
2043
f26251eb 2044 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2045 return err;
f26251eb 2046 if ((err = e100_alloc_cbs(nic)))
1da177e4 2047 goto err_rx_clean_list;
f26251eb 2048 if ((err = e100_hw_init(nic)))
1da177e4
LT
2049 goto err_clean_cbs;
2050 e100_set_multicast_list(nic->netdev);
ca93ca42 2051 e100_start_receiver(nic, NULL);
1da177e4 2052 mod_timer(&nic->watchdog, jiffies);
f26251eb 2053 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2054 nic->netdev->name, nic->netdev)))
2055 goto err_no_irq;
1da177e4 2056 netif_wake_queue(nic->netdev);
bea3348e 2057 napi_enable(&nic->napi);
0236ebb7
MC
2058 /* enable ints _after_ enabling poll, preventing a race between
2059 * disable ints+schedule */
2060 e100_enable_irq(nic);
1da177e4
LT
2061 return 0;
2062
2063err_no_irq:
2064 del_timer_sync(&nic->watchdog);
2065err_clean_cbs:
2066 e100_clean_cbs(nic);
2067err_rx_clean_list:
2068 e100_rx_clean_list(nic);
2069 return err;
2070}
2071
2072static void e100_down(struct nic *nic)
2073{
0236ebb7 2074 /* wait here for poll to complete */
bea3348e 2075 napi_disable(&nic->napi);
0236ebb7 2076 netif_stop_queue(nic->netdev);
1da177e4
LT
2077 e100_hw_reset(nic);
2078 free_irq(nic->pdev->irq, nic->netdev);
2079 del_timer_sync(&nic->watchdog);
2080 netif_carrier_off(nic->netdev);
1da177e4
LT
2081 e100_clean_cbs(nic);
2082 e100_rx_clean_list(nic);
2083}
2084
2085static void e100_tx_timeout(struct net_device *netdev)
2086{
2087 struct nic *nic = netdev_priv(netdev);
2088
05479938 2089 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2090 * in interrupt context */
2091 schedule_work(&nic->tx_timeout_task);
2092}
2093
c4028958 2094static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2095{
c4028958
DH
2096 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2097 struct net_device *netdev = nic->netdev;
2acdb1e0 2098
1da177e4 2099 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2100 ioread8(&nic->csr->scb.status));
1da177e4
LT
2101 e100_down(netdev_priv(netdev));
2102 e100_up(netdev_priv(netdev));
2103}
2104
2105static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2106{
2107 int err;
2108 struct sk_buff *skb;
2109
2110 /* Use driver resources to perform internal MAC or PHY
2111 * loopback test. A single packet is prepared and transmitted
2112 * in loopback mode, and the test passes if the received
2113 * packet compares byte-for-byte to the transmitted packet. */
2114
f26251eb 2115 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2116 return err;
f26251eb 2117 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2118 goto err_clean_rx;
2119
2120 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2121 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2122 loopback_mode = lb_mac;
2123
2124 nic->loopback = loopback_mode;
f26251eb 2125 if ((err = e100_hw_init(nic)))
1da177e4
LT
2126 goto err_loopback_none;
2127
f26251eb 2128 if (loopback_mode == lb_phy)
1da177e4
LT
2129 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2130 BMCR_LOOPBACK);
2131
ca93ca42 2132 e100_start_receiver(nic, NULL);
1da177e4 2133
f26251eb 2134 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2135 err = -ENOMEM;
2136 goto err_loopback_none;
2137 }
2138 skb_put(skb, ETH_DATA_LEN);
2139 memset(skb->data, 0xFF, ETH_DATA_LEN);
2140 e100_xmit_frame(skb, nic->netdev);
2141
2142 msleep(10);
2143
aa49cdd9 2144 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2145 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2146
f26251eb 2147 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2148 skb->data, ETH_DATA_LEN))
2149 err = -EAGAIN;
2150
2151err_loopback_none:
2152 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2153 nic->loopback = lb_none;
1da177e4 2154 e100_clean_cbs(nic);
aa49cdd9 2155 e100_hw_reset(nic);
1da177e4
LT
2156err_clean_rx:
2157 e100_rx_clean_list(nic);
2158 return err;
2159}
2160
2161#define MII_LED_CONTROL 0x1B
b55de80e
BA
2162#define E100_82552_LED_OVERRIDE 0x19
2163#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2164#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
1da177e4
LT
2165static void e100_blink_led(unsigned long data)
2166{
2167 struct nic *nic = (struct nic *)data;
2168 enum led_state {
2169 led_on = 0x01,
2170 led_off = 0x04,
2171 led_on_559 = 0x05,
2172 led_on_557 = 0x07,
2173 };
b55de80e
BA
2174 u16 led_reg = MII_LED_CONTROL;
2175
2176 if (nic->phy == phy_82552_v) {
2177 led_reg = E100_82552_LED_OVERRIDE;
1da177e4 2178
b55de80e
BA
2179 nic->leds = (nic->leds == E100_82552_LED_ON) ?
2180 E100_82552_LED_OFF : E100_82552_LED_ON;
2181 } else {
2182 nic->leds = (nic->leds & led_on) ? led_off :
2183 (nic->mac < mac_82559_D101M) ? led_on_557 :
2184 led_on_559;
2185 }
2186 mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
1da177e4
LT
2187 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2188}
2189
2190static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2191{
2192 struct nic *nic = netdev_priv(netdev);
2193 return mii_ethtool_gset(&nic->mii, cmd);
2194}
2195
2196static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2197{
2198 struct nic *nic = netdev_priv(netdev);
2199 int err;
2200
2201 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2202 err = mii_ethtool_sset(&nic->mii, cmd);
2203 e100_exec_cb(nic, NULL, e100_configure);
2204
2205 return err;
2206}
2207
2208static void e100_get_drvinfo(struct net_device *netdev,
2209 struct ethtool_drvinfo *info)
2210{
2211 struct nic *nic = netdev_priv(netdev);
2212 strcpy(info->driver, DRV_NAME);
2213 strcpy(info->version, DRV_VERSION);
2214 strcpy(info->fw_version, "N/A");
2215 strcpy(info->bus_info, pci_name(nic->pdev));
2216}
2217
abf9b902 2218#define E100_PHY_REGS 0x1C
1da177e4
LT
2219static int e100_get_regs_len(struct net_device *netdev)
2220{
2221 struct nic *nic = netdev_priv(netdev);
abf9b902 2222 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2223}
2224
2225static void e100_get_regs(struct net_device *netdev,
2226 struct ethtool_regs *regs, void *p)
2227{
2228 struct nic *nic = netdev_priv(netdev);
2229 u32 *buff = p;
2230 int i;
2231
44c10138 2232 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2233 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2234 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2235 ioread16(&nic->csr->scb.status);
f26251eb 2236 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2237 buff[1 + E100_PHY_REGS - i] =
2238 mdio_read(netdev, nic->mii.phy_id, i);
2239 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2240 e100_exec_cb(nic, NULL, e100_dump);
2241 msleep(10);
2242 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2243 sizeof(nic->mem->dump_buf));
2244}
2245
2246static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2247{
2248 struct nic *nic = netdev_priv(netdev);
2249 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2250 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2251}
2252
2253static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2254{
2255 struct nic *nic = netdev_priv(netdev);
2256
bc79fc84
RW
2257 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2258 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2259 return -EOPNOTSUPP;
2260
f26251eb 2261 if (wol->wolopts)
1da177e4
LT
2262 nic->flags |= wol_magic;
2263 else
2264 nic->flags &= ~wol_magic;
2265
bc79fc84
RW
2266 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2267
1da177e4
LT
2268 e100_exec_cb(nic, NULL, e100_configure);
2269
2270 return 0;
2271}
2272
2273static u32 e100_get_msglevel(struct net_device *netdev)
2274{
2275 struct nic *nic = netdev_priv(netdev);
2276 return nic->msg_enable;
2277}
2278
2279static void e100_set_msglevel(struct net_device *netdev, u32 value)
2280{
2281 struct nic *nic = netdev_priv(netdev);
2282 nic->msg_enable = value;
2283}
2284
2285static int e100_nway_reset(struct net_device *netdev)
2286{
2287 struct nic *nic = netdev_priv(netdev);
2288 return mii_nway_restart(&nic->mii);
2289}
2290
2291static u32 e100_get_link(struct net_device *netdev)
2292{
2293 struct nic *nic = netdev_priv(netdev);
2294 return mii_link_ok(&nic->mii);
2295}
2296
2297static int e100_get_eeprom_len(struct net_device *netdev)
2298{
2299 struct nic *nic = netdev_priv(netdev);
2300 return nic->eeprom_wc << 1;
2301}
2302
2303#define E100_EEPROM_MAGIC 0x1234
2304static int e100_get_eeprom(struct net_device *netdev,
2305 struct ethtool_eeprom *eeprom, u8 *bytes)
2306{
2307 struct nic *nic = netdev_priv(netdev);
2308
2309 eeprom->magic = E100_EEPROM_MAGIC;
2310 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2311
2312 return 0;
2313}
2314
2315static int e100_set_eeprom(struct net_device *netdev,
2316 struct ethtool_eeprom *eeprom, u8 *bytes)
2317{
2318 struct nic *nic = netdev_priv(netdev);
2319
f26251eb 2320 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2321 return -EINVAL;
2322
2323 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2324
2325 return e100_eeprom_save(nic, eeprom->offset >> 1,
2326 (eeprom->len >> 1) + 1);
2327}
2328
2329static void e100_get_ringparam(struct net_device *netdev,
2330 struct ethtool_ringparam *ring)
2331{
2332 struct nic *nic = netdev_priv(netdev);
2333 struct param_range *rfds = &nic->params.rfds;
2334 struct param_range *cbs = &nic->params.cbs;
2335
2336 ring->rx_max_pending = rfds->max;
2337 ring->tx_max_pending = cbs->max;
2338 ring->rx_mini_max_pending = 0;
2339 ring->rx_jumbo_max_pending = 0;
2340 ring->rx_pending = rfds->count;
2341 ring->tx_pending = cbs->count;
2342 ring->rx_mini_pending = 0;
2343 ring->rx_jumbo_pending = 0;
2344}
2345
2346static int e100_set_ringparam(struct net_device *netdev,
2347 struct ethtool_ringparam *ring)
2348{
2349 struct nic *nic = netdev_priv(netdev);
2350 struct param_range *rfds = &nic->params.rfds;
2351 struct param_range *cbs = &nic->params.cbs;
2352
05479938 2353 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2354 return -EINVAL;
2355
f26251eb 2356 if (netif_running(netdev))
1da177e4
LT
2357 e100_down(nic);
2358 rfds->count = max(ring->rx_pending, rfds->min);
2359 rfds->count = min(rfds->count, rfds->max);
2360 cbs->count = max(ring->tx_pending, cbs->min);
2361 cbs->count = min(cbs->count, cbs->max);
2362 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2363 rfds->count, cbs->count);
f26251eb 2364 if (netif_running(netdev))
1da177e4
LT
2365 e100_up(nic);
2366
2367 return 0;
2368}
2369
2370static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2371 "Link test (on/offline)",
2372 "Eeprom test (on/offline)",
2373 "Self test (offline)",
2374 "Mac loopback (offline)",
2375 "Phy loopback (offline)",
2376};
4c3616cd 2377#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2378
1da177e4
LT
2379static void e100_diag_test(struct net_device *netdev,
2380 struct ethtool_test *test, u64 *data)
2381{
2382 struct ethtool_cmd cmd;
2383 struct nic *nic = netdev_priv(netdev);
2384 int i, err;
2385
2386 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2387 data[0] = !mii_link_ok(&nic->mii);
2388 data[1] = e100_eeprom_load(nic);
f26251eb 2389 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2390
2391 /* save speed, duplex & autoneg settings */
2392 err = mii_ethtool_gset(&nic->mii, &cmd);
2393
f26251eb 2394 if (netif_running(netdev))
1da177e4
LT
2395 e100_down(nic);
2396 data[2] = e100_self_test(nic);
2397 data[3] = e100_loopback_test(nic, lb_mac);
2398 data[4] = e100_loopback_test(nic, lb_phy);
2399
2400 /* restore speed, duplex & autoneg settings */
2401 err = mii_ethtool_sset(&nic->mii, &cmd);
2402
f26251eb 2403 if (netif_running(netdev))
1da177e4
LT
2404 e100_up(nic);
2405 }
f26251eb 2406 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2407 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2408
2409 msleep_interruptible(4 * 1000);
1da177e4
LT
2410}
2411
2412static int e100_phys_id(struct net_device *netdev, u32 data)
2413{
2414 struct nic *nic = netdev_priv(netdev);
b55de80e
BA
2415 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2416 MII_LED_CONTROL;
1da177e4 2417
f26251eb 2418 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1da177e4
LT
2419 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2420 mod_timer(&nic->blink_timer, jiffies);
2421 msleep_interruptible(data * 1000);
2422 del_timer_sync(&nic->blink_timer);
b55de80e 2423 mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
1da177e4
LT
2424
2425 return 0;
2426}
2427
2428static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2429 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2430 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2431 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2432 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2433 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2434 "tx_heartbeat_errors", "tx_window_errors",
2435 /* device-specific stats */
2436 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2437 "tx_flow_control_pause", "rx_flow_control_pause",
2438 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2439};
2440#define E100_NET_STATS_LEN 21
4c3616cd 2441#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2442
b9f2c044 2443static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2444{
b9f2c044
JG
2445 switch (sset) {
2446 case ETH_SS_TEST:
2447 return E100_TEST_LEN;
2448 case ETH_SS_STATS:
2449 return E100_STATS_LEN;
2450 default:
2451 return -EOPNOTSUPP;
2452 }
1da177e4
LT
2453}
2454
2455static void e100_get_ethtool_stats(struct net_device *netdev,
2456 struct ethtool_stats *stats, u64 *data)
2457{
2458 struct nic *nic = netdev_priv(netdev);
2459 int i;
2460
f26251eb 2461 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2462 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2463
2464 data[i++] = nic->tx_deferred;
2465 data[i++] = nic->tx_single_collisions;
2466 data[i++] = nic->tx_multiple_collisions;
2467 data[i++] = nic->tx_fc_pause;
2468 data[i++] = nic->rx_fc_pause;
2469 data[i++] = nic->rx_fc_unsupported;
2470 data[i++] = nic->tx_tco_frames;
2471 data[i++] = nic->rx_tco_frames;
2472}
2473
2474static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2475{
f26251eb 2476 switch (stringset) {
1da177e4
LT
2477 case ETH_SS_TEST:
2478 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2479 break;
2480 case ETH_SS_STATS:
2481 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2482 break;
2483 }
2484}
2485
7282d491 2486static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2487 .get_settings = e100_get_settings,
2488 .set_settings = e100_set_settings,
2489 .get_drvinfo = e100_get_drvinfo,
2490 .get_regs_len = e100_get_regs_len,
2491 .get_regs = e100_get_regs,
2492 .get_wol = e100_get_wol,
2493 .set_wol = e100_set_wol,
2494 .get_msglevel = e100_get_msglevel,
2495 .set_msglevel = e100_set_msglevel,
2496 .nway_reset = e100_nway_reset,
2497 .get_link = e100_get_link,
2498 .get_eeprom_len = e100_get_eeprom_len,
2499 .get_eeprom = e100_get_eeprom,
2500 .set_eeprom = e100_set_eeprom,
2501 .get_ringparam = e100_get_ringparam,
2502 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2503 .self_test = e100_diag_test,
2504 .get_strings = e100_get_strings,
2505 .phys_id = e100_phys_id,
1da177e4 2506 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2507 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2508};
2509
2510static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2511{
2512 struct nic *nic = netdev_priv(netdev);
2513
2514 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2515}
2516
2517static int e100_alloc(struct nic *nic)
2518{
2519 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2520 &nic->dma_addr);
2521 return nic->mem ? 0 : -ENOMEM;
2522}
2523
2524static void e100_free(struct nic *nic)
2525{
f26251eb 2526 if (nic->mem) {
1da177e4
LT
2527 pci_free_consistent(nic->pdev, sizeof(struct mem),
2528 nic->mem, nic->dma_addr);
2529 nic->mem = NULL;
2530 }
2531}
2532
2533static int e100_open(struct net_device *netdev)
2534{
2535 struct nic *nic = netdev_priv(netdev);
2536 int err = 0;
2537
2538 netif_carrier_off(netdev);
f26251eb 2539 if ((err = e100_up(nic)))
1da177e4
LT
2540 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2541 return err;
2542}
2543
2544static int e100_close(struct net_device *netdev)
2545{
2546 e100_down(netdev_priv(netdev));
2547 return 0;
2548}
2549
acc78426
SH
2550static const struct net_device_ops e100_netdev_ops = {
2551 .ndo_open = e100_open,
2552 .ndo_stop = e100_close,
00829823 2553 .ndo_start_xmit = e100_xmit_frame,
acc78426
SH
2554 .ndo_validate_addr = eth_validate_addr,
2555 .ndo_set_multicast_list = e100_set_multicast_list,
2556 .ndo_set_mac_address = e100_set_mac_address,
2557 .ndo_change_mtu = e100_change_mtu,
2558 .ndo_do_ioctl = e100_do_ioctl,
2559 .ndo_tx_timeout = e100_tx_timeout,
2560#ifdef CONFIG_NET_POLL_CONTROLLER
2561 .ndo_poll_controller = e100_netpoll,
2562#endif
2563};
2564
1da177e4
LT
2565static int __devinit e100_probe(struct pci_dev *pdev,
2566 const struct pci_device_id *ent)
2567{
2568 struct net_device *netdev;
2569 struct nic *nic;
2570 int err;
2571
f26251eb
BA
2572 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2573 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
1da177e4
LT
2574 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2575 return -ENOMEM;
2576 }
2577
acc78426 2578 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2579 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2580 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2581 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2582
2583 nic = netdev_priv(netdev);
bea3348e 2584 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2585 nic->netdev = netdev;
2586 nic->pdev = pdev;
2587 nic->msg_enable = (1 << debug) - 1;
2588 pci_set_drvdata(pdev, netdev);
2589
f26251eb 2590 if ((err = pci_enable_device(pdev))) {
1da177e4
LT
2591 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2592 goto err_out_free_dev;
2593 }
2594
f26251eb 2595 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1da177e4
LT
2596 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2597 "base address, aborting.\n");
2598 err = -ENODEV;
2599 goto err_out_disable_pdev;
2600 }
2601
f26251eb 2602 if ((err = pci_request_regions(pdev, DRV_NAME))) {
1da177e4
LT
2603 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2604 goto err_out_disable_pdev;
2605 }
2606
284901a9 2607 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1da177e4
LT
2608 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2609 goto err_out_free_res;
2610 }
2611
1da177e4
LT
2612 SET_NETDEV_DEV(netdev, &pdev->dev);
2613
27345bb6
JB
2614 if (use_io)
2615 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2616
2617 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2618 if (!nic->csr) {
1da177e4
LT
2619 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2620 err = -ENOMEM;
2621 goto err_out_free_res;
2622 }
2623
f26251eb 2624 if (ent->driver_data)
1da177e4
LT
2625 nic->flags |= ich;
2626 else
2627 nic->flags &= ~ich;
2628
2629 e100_get_defaults(nic);
2630
1f53367d 2631 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2632 spin_lock_init(&nic->cb_lock);
2633 spin_lock_init(&nic->cmd_lock);
ac7c6669 2634 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2635
2636 /* Reset the device before pci_set_master() in case device is in some
2637 * funky state and has an interrupt pending - hint: we don't have the
2638 * interrupt handler registered yet. */
2639 e100_hw_reset(nic);
2640
2641 pci_set_master(pdev);
2642
2643 init_timer(&nic->watchdog);
2644 nic->watchdog.function = e100_watchdog;
2645 nic->watchdog.data = (unsigned long)nic;
2646 init_timer(&nic->blink_timer);
2647 nic->blink_timer.function = e100_blink_led;
2648 nic->blink_timer.data = (unsigned long)nic;
2649
c4028958 2650 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2651
f26251eb 2652 if ((err = e100_alloc(nic))) {
1da177e4
LT
2653 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2654 goto err_out_iounmap;
2655 }
2656
f26251eb 2657 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2658 goto err_out_free;
2659
f92d8728
MC
2660 e100_phy_init(nic);
2661
1da177e4 2662 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2663 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2664 if (!is_valid_ether_addr(netdev->perm_addr)) {
2665 if (!eeprom_bad_csum_allow) {
2666 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2667 "EEPROM, aborting.\n");
2668 err = -EAGAIN;
2669 goto err_out_free;
2670 } else {
2671 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2672 "you MUST configure one.\n");
2673 }
1da177e4
LT
2674 }
2675
2676 /* Wol magic packet can be enabled from eeprom */
f26251eb 2677 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2678 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2679 nic->flags |= wol_magic;
bc79fc84
RW
2680 device_set_wakeup_enable(&pdev->dev, true);
2681 }
1da177e4 2682
6bdacb1a 2683 /* ack any pending wake events, disable PME */
e7272403 2684 pci_pme_active(pdev, false);
1da177e4
LT
2685
2686 strcpy(netdev->name, "eth%d");
f26251eb 2687 if ((err = register_netdev(netdev))) {
1da177e4
LT
2688 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2689 goto err_out_free;
2690 }
2691
e174961c 2692 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
0795af57 2693 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
e174961c 2694 pdev->irq, netdev->dev_addr);
1da177e4
LT
2695
2696 return 0;
2697
2698err_out_free:
2699 e100_free(nic);
2700err_out_iounmap:
27345bb6 2701 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2702err_out_free_res:
2703 pci_release_regions(pdev);
2704err_out_disable_pdev:
2705 pci_disable_device(pdev);
2706err_out_free_dev:
2707 pci_set_drvdata(pdev, NULL);
2708 free_netdev(netdev);
2709 return err;
2710}
2711
2712static void __devexit e100_remove(struct pci_dev *pdev)
2713{
2714 struct net_device *netdev = pci_get_drvdata(pdev);
2715
f26251eb 2716 if (netdev) {
1da177e4
LT
2717 struct nic *nic = netdev_priv(netdev);
2718 unregister_netdev(netdev);
2719 e100_free(nic);
915e91d7 2720 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2721 free_netdev(netdev);
2722 pci_release_regions(pdev);
2723 pci_disable_device(pdev);
2724 pci_set_drvdata(pdev, NULL);
2725 }
2726}
2727
b55de80e
BA
2728#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
2729#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2730#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
1da177e4
LT
2731static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2732{
2733 struct net_device *netdev = pci_get_drvdata(pdev);
2734 struct nic *nic = netdev_priv(netdev);
2735
824545e7 2736 if (netif_running(netdev))
f902283b 2737 e100_down(nic);
518d8338 2738 netif_device_detach(netdev);
a53a33da 2739
1da177e4 2740 pci_save_state(pdev);
e8e82b76
AK
2741
2742 if ((nic->flags & wol_magic) | e100_asf(nic)) {
b55de80e
BA
2743 /* enable reverse auto-negotiation */
2744 if (nic->phy == phy_82552_v) {
2745 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2746 E100_82552_SMARTSPEED);
2747
2748 mdio_write(netdev, nic->mii.phy_id,
2749 E100_82552_SMARTSPEED, smartspeed |
2750 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
2751 }
bc79fc84
RW
2752 if (pci_enable_wake(pdev, PCI_D3cold, true))
2753 pci_enable_wake(pdev, PCI_D3hot, true);
e8e82b76 2754 } else {
bc79fc84 2755 pci_enable_wake(pdev, PCI_D3hot, false);
e8e82b76 2756 }
975b366a 2757
8543da66 2758 pci_disable_device(pdev);
e8e82b76 2759 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2760
2761 return 0;
2762}
2763
f902283b 2764#ifdef CONFIG_PM
1da177e4
LT
2765static int e100_resume(struct pci_dev *pdev)
2766{
2767 struct net_device *netdev = pci_get_drvdata(pdev);
2768 struct nic *nic = netdev_priv(netdev);
2769
975b366a 2770 pci_set_power_state(pdev, PCI_D0);
1da177e4 2771 pci_restore_state(pdev);
6bdacb1a 2772 /* ack any pending wake events, disable PME */
975b366a 2773 pci_enable_wake(pdev, 0, 0);
1da177e4 2774
b55de80e
BA
2775 /* disbale reverse auto-negotiation */
2776 if (nic->phy == phy_82552_v) {
2777 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2778 E100_82552_SMARTSPEED);
2779
2780 mdio_write(netdev, nic->mii.phy_id,
2781 E100_82552_SMARTSPEED,
2782 smartspeed & ~(E100_82552_REV_ANEG));
2783 }
2784
1da177e4 2785 netif_device_attach(netdev);
975b366a 2786 if (netif_running(netdev))
1da177e4
LT
2787 e100_up(nic);
2788
2789 return 0;
2790}
975b366a 2791#endif /* CONFIG_PM */
1da177e4 2792
d18c3db5 2793static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2794{
f902283b 2795 e100_suspend(pdev, PMSG_SUSPEND);
6bdacb1a
MC
2796}
2797
2cc30492
AK
2798/* ------------------ PCI Error Recovery infrastructure -------------- */
2799/**
2800 * e100_io_error_detected - called when PCI error is detected.
2801 * @pdev: Pointer to PCI device
0a0863af 2802 * @state: The current pci connection state
2cc30492
AK
2803 */
2804static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2805{
2806 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 2807 struct nic *nic = netdev_priv(netdev);
2cc30492 2808
0a0863af 2809 /* Similar to calling e100_down(), but avoids adapter I/O. */
acc78426 2810 e100_close(netdev);
2cc30492 2811
0a0863af 2812 /* Detach; put netif into a state similar to hotplug unplug. */
bea3348e 2813 napi_enable(&nic->napi);
2cc30492 2814 netif_device_detach(netdev);
b1d26f24 2815 pci_disable_device(pdev);
2cc30492
AK
2816
2817 /* Request a slot reset. */
2818 return PCI_ERS_RESULT_NEED_RESET;
2819}
2820
2821/**
2822 * e100_io_slot_reset - called after the pci bus has been reset.
2823 * @pdev: Pointer to PCI device
2824 *
2825 * Restart the card from scratch.
2826 */
2827static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2828{
2829 struct net_device *netdev = pci_get_drvdata(pdev);
2830 struct nic *nic = netdev_priv(netdev);
2831
2832 if (pci_enable_device(pdev)) {
2833 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2834 return PCI_ERS_RESULT_DISCONNECT;
2835 }
2836 pci_set_master(pdev);
2837
2838 /* Only one device per card can do a reset */
2839 if (0 != PCI_FUNC(pdev->devfn))
2840 return PCI_ERS_RESULT_RECOVERED;
2841 e100_hw_reset(nic);
2842 e100_phy_init(nic);
2843
2844 return PCI_ERS_RESULT_RECOVERED;
2845}
2846
2847/**
2848 * e100_io_resume - resume normal operations
2849 * @pdev: Pointer to PCI device
2850 *
2851 * Resume normal operations after an error recovery
2852 * sequence has been completed.
2853 */
2854static void e100_io_resume(struct pci_dev *pdev)
2855{
2856 struct net_device *netdev = pci_get_drvdata(pdev);
2857 struct nic *nic = netdev_priv(netdev);
2858
2859 /* ack any pending wake events, disable PME */
2860 pci_enable_wake(pdev, 0, 0);
2861
2862 netif_device_attach(netdev);
2863 if (netif_running(netdev)) {
2864 e100_open(netdev);
2865 mod_timer(&nic->watchdog, jiffies);
2866 }
2867}
2868
2869static struct pci_error_handlers e100_err_handler = {
2870 .error_detected = e100_io_error_detected,
2871 .slot_reset = e100_io_slot_reset,
2872 .resume = e100_io_resume,
2873};
6bdacb1a 2874
1da177e4
LT
2875static struct pci_driver e100_driver = {
2876 .name = DRV_NAME,
2877 .id_table = e100_id_table,
2878 .probe = e100_probe,
2879 .remove = __devexit_p(e100_remove),
e8e82b76 2880#ifdef CONFIG_PM
975b366a 2881 /* Power Management hooks */
1da177e4
LT
2882 .suspend = e100_suspend,
2883 .resume = e100_resume,
2884#endif
05479938 2885 .shutdown = e100_shutdown,
2cc30492 2886 .err_handler = &e100_err_handler,
1da177e4
LT
2887};
2888
2889static int __init e100_init_module(void)
2890{
f26251eb 2891 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
1da177e4
LT
2892 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2893 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2894 }
29917620 2895 return pci_register_driver(&e100_driver);
1da177e4
LT
2896}
2897
2898static void __exit e100_cleanup_module(void)
2899{
2900 pci_unregister_driver(&e100_driver);
2901}
2902
2903module_init(e100_init_module);
2904module_exit(e100_cleanup_module);
This page took 0.768543 seconds and 5 git commands to generate.