bonding: ARP monitoring broken on x86_64
[deliverable/linux.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
118 *
119 * V. Miscellaneous
120 *
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
126 *
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
128 *
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
131 *
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
135 *
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
139 */
140
1da177e4
LT
141#include <linux/module.h>
142#include <linux/moduleparam.h>
143#include <linux/kernel.h>
144#include <linux/types.h>
145#include <linux/slab.h>
146#include <linux/delay.h>
147#include <linux/init.h>
148#include <linux/pci.h>
1e7f0bd8 149#include <linux/dma-mapping.h>
1da177e4
LT
150#include <linux/netdevice.h>
151#include <linux/etherdevice.h>
152#include <linux/mii.h>
153#include <linux/if_vlan.h>
154#include <linux/skbuff.h>
155#include <linux/ethtool.h>
156#include <linux/string.h>
157#include <asm/unaligned.h>
158
159
160#define DRV_NAME "e100"
4e1dc97d 161#define DRV_EXT "-NAPI"
76ddb3fd 162#define DRV_VERSION "3.5.17-k2"DRV_EXT
1da177e4 163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
165#define PFX DRV_NAME ": "
166
167#define E100_WATCHDOG_PERIOD (2 * HZ)
168#define E100_NAPI_WEIGHT 16
169
170MODULE_DESCRIPTION(DRV_DESCRIPTION);
171MODULE_AUTHOR(DRV_COPYRIGHT);
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
175static int debug = 3;
8fb6f732 176static int eeprom_bad_csum_allow = 0;
1da177e4 177module_param(debug, int, 0);
8fb6f732 178module_param(eeprom_bad_csum_allow, int, 0);
1da177e4 179MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 180MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
1da177e4
LT
181#define DPRINTK(nlevel, klevel, fmt, args...) \
182 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
183 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
184 __FUNCTION__ , ## args))
185
186#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
187 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
188 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
189static struct pci_device_id e100_id_table[] = {
190 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
191 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
192 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
193 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
194 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
195 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
198 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
199 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
200 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
204 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
205 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
206 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
212 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
213 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
214 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
215 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
220 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
221 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
222 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
223 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
225 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
226 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
227 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
228 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
229 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 230 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
231 { 0, }
232};
233MODULE_DEVICE_TABLE(pci, e100_id_table);
234
235enum mac {
236 mac_82557_D100_A = 0,
237 mac_82557_D100_B = 1,
238 mac_82557_D100_C = 2,
239 mac_82558_D101_A4 = 4,
240 mac_82558_D101_B0 = 5,
241 mac_82559_D101M = 8,
242 mac_82559_D101S = 9,
243 mac_82550_D102 = 12,
244 mac_82550_D102_C = 13,
245 mac_82551_E = 14,
246 mac_82551_F = 15,
247 mac_82551_10 = 16,
248 mac_unknown = 0xFF,
249};
250
251enum phy {
252 phy_100a = 0x000003E0,
253 phy_100c = 0x035002A8,
254 phy_82555_tx = 0x015002A8,
255 phy_nsc_tx = 0x5C002000,
256 phy_82562_et = 0x033002A8,
257 phy_82562_em = 0x032002A8,
258 phy_82562_ek = 0x031002A8,
259 phy_82562_eh = 0x017002A8,
260 phy_unknown = 0xFFFFFFFF,
261};
262
263/* CSR (Control/Status Registers) */
264struct csr {
265 struct {
266 u8 status;
267 u8 stat_ack;
268 u8 cmd_lo;
269 u8 cmd_hi;
270 u32 gen_ptr;
271 } scb;
272 u32 port;
273 u16 flash_ctrl;
274 u8 eeprom_ctrl_lo;
275 u8 eeprom_ctrl_hi;
276 u32 mdi_ctrl;
277 u32 rx_dma_count;
278};
279
280enum scb_status {
281 rus_ready = 0x10,
282 rus_mask = 0x3C,
283};
284
1f53367d
MC
285enum ru_state {
286 RU_SUSPENDED = 0,
287 RU_RUNNING = 1,
288 RU_UNINITIALIZED = -1,
289};
290
1da177e4
LT
291enum scb_stat_ack {
292 stat_ack_not_ours = 0x00,
293 stat_ack_sw_gen = 0x04,
294 stat_ack_rnr = 0x10,
295 stat_ack_cu_idle = 0x20,
296 stat_ack_frame_rx = 0x40,
297 stat_ack_cu_cmd_done = 0x80,
298 stat_ack_not_present = 0xFF,
299 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
300 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
301};
302
303enum scb_cmd_hi {
304 irq_mask_none = 0x00,
305 irq_mask_all = 0x01,
306 irq_sw_gen = 0x02,
307};
308
309enum scb_cmd_lo {
310 cuc_nop = 0x00,
311 ruc_start = 0x01,
312 ruc_load_base = 0x06,
313 cuc_start = 0x10,
314 cuc_resume = 0x20,
315 cuc_dump_addr = 0x40,
316 cuc_dump_stats = 0x50,
317 cuc_load_base = 0x60,
318 cuc_dump_reset = 0x70,
319};
320
321enum cuc_dump {
322 cuc_dump_complete = 0x0000A005,
323 cuc_dump_reset_complete = 0x0000A007,
324};
05479938 325
1da177e4
LT
326enum port {
327 software_reset = 0x0000,
328 selftest = 0x0001,
329 selective_reset = 0x0002,
330};
331
332enum eeprom_ctrl_lo {
333 eesk = 0x01,
334 eecs = 0x02,
335 eedi = 0x04,
336 eedo = 0x08,
337};
338
339enum mdi_ctrl {
340 mdi_write = 0x04000000,
341 mdi_read = 0x08000000,
342 mdi_ready = 0x10000000,
343};
344
345enum eeprom_op {
346 op_write = 0x05,
347 op_read = 0x06,
348 op_ewds = 0x10,
349 op_ewen = 0x13,
350};
351
352enum eeprom_offsets {
353 eeprom_cnfg_mdix = 0x03,
354 eeprom_id = 0x0A,
355 eeprom_config_asf = 0x0D,
356 eeprom_smbus_addr = 0x90,
357};
358
359enum eeprom_cnfg_mdix {
360 eeprom_mdix_enabled = 0x0080,
361};
362
363enum eeprom_id {
364 eeprom_id_wol = 0x0020,
365};
366
367enum eeprom_config_asf {
368 eeprom_asf = 0x8000,
369 eeprom_gcl = 0x4000,
370};
371
372enum cb_status {
373 cb_complete = 0x8000,
374 cb_ok = 0x2000,
375};
376
377enum cb_command {
378 cb_nop = 0x0000,
379 cb_iaaddr = 0x0001,
380 cb_config = 0x0002,
381 cb_multi = 0x0003,
382 cb_tx = 0x0004,
383 cb_ucode = 0x0005,
384 cb_dump = 0x0006,
385 cb_tx_sf = 0x0008,
386 cb_cid = 0x1f00,
387 cb_i = 0x2000,
388 cb_s = 0x4000,
389 cb_el = 0x8000,
390};
391
392struct rfd {
393 u16 status;
394 u16 command;
395 u32 link;
396 u32 rbd;
397 u16 actual_size;
398 u16 size;
399};
400
401struct rx {
402 struct rx *next, *prev;
403 struct sk_buff *skb;
404 dma_addr_t dma_addr;
405};
406
407#if defined(__BIG_ENDIAN_BITFIELD)
408#define X(a,b) b,a
409#else
410#define X(a,b) a,b
411#endif
412struct config {
413/*0*/ u8 X(byte_count:6, pad0:2);
414/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
415/*2*/ u8 adaptive_ifs;
416/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
417 term_write_cache_line:1), pad3:4);
418/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
419/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
420/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
421 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
422 rx_discard_overruns:1), rx_save_bad_frames:1);
423/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
424 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
425 tx_dynamic_tbd:1);
426/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
427/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
428 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
429/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
430 loopback:2);
431/*11*/ u8 X(linear_priority:3, pad11:5);
432/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
433/*13*/ u8 ip_addr_lo;
434/*14*/ u8 ip_addr_hi;
435/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
436 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
437 pad15_2:1), crs_or_cdt:1);
438/*16*/ u8 fc_delay_lo;
439/*17*/ u8 fc_delay_hi;
440/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
441 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
442/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
443 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
444 full_duplex_force:1), full_duplex_pin:1);
445/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
446/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
447/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
448 u8 pad_d102[9];
449};
450
451#define E100_MAX_MULTICAST_ADDRS 64
452struct multi {
453 u16 count;
454 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
455};
456
457/* Important: keep total struct u32-aligned */
458#define UCODE_SIZE 134
459struct cb {
460 u16 status;
461 u16 command;
462 u32 link;
463 union {
464 u8 iaaddr[ETH_ALEN];
465 u32 ucode[UCODE_SIZE];
466 struct config config;
467 struct multi multi;
468 struct {
469 u32 tbd_array;
470 u16 tcb_byte_count;
471 u8 threshold;
472 u8 tbd_count;
473 struct {
474 u32 buf_addr;
475 u16 size;
476 u16 eol;
477 } tbd;
478 } tcb;
479 u32 dump_buffer_addr;
480 } u;
481 struct cb *next, *prev;
482 dma_addr_t dma_addr;
483 struct sk_buff *skb;
484};
485
486enum loopback {
487 lb_none = 0, lb_mac = 1, lb_phy = 3,
488};
489
490struct stats {
491 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
492 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
493 tx_multiple_collisions, tx_total_collisions;
494 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
495 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
496 rx_short_frame_errors;
497 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
498 u16 xmt_tco_frames, rcv_tco_frames;
499 u32 complete;
500};
501
502struct mem {
503 struct {
504 u32 signature;
505 u32 result;
506 } selftest;
507 struct stats stats;
508 u8 dump_buf[596];
509};
510
511struct param_range {
512 u32 min;
513 u32 max;
514 u32 count;
515};
516
517struct params {
518 struct param_range rfds;
519 struct param_range cbs;
520};
521
522struct nic {
523 /* Begin: frequently used values: keep adjacent for cache effect */
524 u32 msg_enable ____cacheline_aligned;
525 struct net_device *netdev;
526 struct pci_dev *pdev;
527
528 struct rx *rxs ____cacheline_aligned;
529 struct rx *rx_to_use;
530 struct rx *rx_to_clean;
531 struct rfd blank_rfd;
1f53367d 532 enum ru_state ru_running;
1da177e4
LT
533
534 spinlock_t cb_lock ____cacheline_aligned;
535 spinlock_t cmd_lock;
536 struct csr __iomem *csr;
537 enum scb_cmd_lo cuc_cmd;
538 unsigned int cbs_avail;
539 struct cb *cbs;
540 struct cb *cb_to_use;
541 struct cb *cb_to_send;
542 struct cb *cb_to_clean;
543 u16 tx_command;
544 /* End: frequently used values: keep adjacent for cache effect */
545
546 enum {
547 ich = (1 << 0),
548 promiscuous = (1 << 1),
549 multicast_all = (1 << 2),
550 wol_magic = (1 << 3),
551 ich_10h_workaround = (1 << 4),
552 } flags ____cacheline_aligned;
553
554 enum mac mac;
555 enum phy phy;
556 struct params params;
557 struct net_device_stats net_stats;
558 struct timer_list watchdog;
559 struct timer_list blink_timer;
560 struct mii_if_info mii;
2acdb1e0 561 struct work_struct tx_timeout_task;
1da177e4
LT
562 enum loopback loopback;
563
564 struct mem *mem;
565 dma_addr_t dma_addr;
566
567 dma_addr_t cbs_dma_addr;
568 u8 adaptive_ifs;
569 u8 tx_threshold;
570 u32 tx_frames;
571 u32 tx_collisions;
572 u32 tx_deferred;
573 u32 tx_single_collisions;
574 u32 tx_multiple_collisions;
575 u32 tx_fc_pause;
576 u32 tx_tco_frames;
577
578 u32 rx_fc_pause;
579 u32 rx_fc_unsupported;
580 u32 rx_tco_frames;
581 u32 rx_over_length_errors;
582
583 u8 rev_id;
584 u16 leds;
585 u16 eeprom_wc;
586 u16 eeprom[256];
ac7c6669 587 spinlock_t mdio_lock;
1da177e4
LT
588};
589
590static inline void e100_write_flush(struct nic *nic)
591{
592 /* Flush previous PCI writes through intermediate bridges
593 * by doing a benign read */
594 (void)readb(&nic->csr->scb.status);
595}
596
858119e1 597static void e100_enable_irq(struct nic *nic)
1da177e4
LT
598{
599 unsigned long flags;
600
601 spin_lock_irqsave(&nic->cmd_lock, flags);
602 writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 603 e100_write_flush(nic);
ad8c48ad 604 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
605}
606
858119e1 607static void e100_disable_irq(struct nic *nic)
1da177e4
LT
608{
609 unsigned long flags;
610
611 spin_lock_irqsave(&nic->cmd_lock, flags);
612 writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 613 e100_write_flush(nic);
ad8c48ad 614 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
615}
616
617static void e100_hw_reset(struct nic *nic)
618{
619 /* Put CU and RU into idle with a selective reset to get
620 * device off of PCI bus */
621 writel(selective_reset, &nic->csr->port);
622 e100_write_flush(nic); udelay(20);
623
624 /* Now fully reset device */
625 writel(software_reset, &nic->csr->port);
626 e100_write_flush(nic); udelay(20);
627
628 /* Mask off our interrupt line - it's unmasked after reset */
629 e100_disable_irq(nic);
630}
631
632static int e100_self_test(struct nic *nic)
633{
634 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
635
636 /* Passing the self-test is a pretty good indication
637 * that the device can DMA to/from host memory */
638
639 nic->mem->selftest.signature = 0;
640 nic->mem->selftest.result = 0xFFFFFFFF;
641
642 writel(selftest | dma_addr, &nic->csr->port);
643 e100_write_flush(nic);
644 /* Wait 10 msec for self-test to complete */
645 msleep(10);
646
647 /* Interrupts are enabled after self-test */
648 e100_disable_irq(nic);
649
650 /* Check results of self-test */
651 if(nic->mem->selftest.result != 0) {
652 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
653 nic->mem->selftest.result);
654 return -ETIMEDOUT;
655 }
656 if(nic->mem->selftest.signature == 0) {
657 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
658 return -ETIMEDOUT;
659 }
660
661 return 0;
662}
663
664static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
665{
666 u32 cmd_addr_data[3];
667 u8 ctrl;
668 int i, j;
669
670 /* Three cmds: write/erase enable, write data, write/erase disable */
671 cmd_addr_data[0] = op_ewen << (addr_len - 2);
672 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
673 cpu_to_le16(data);
674 cmd_addr_data[2] = op_ewds << (addr_len - 2);
675
676 /* Bit-bang cmds to write word to eeprom */
677 for(j = 0; j < 3; j++) {
678
679 /* Chip select */
680 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
681 e100_write_flush(nic); udelay(4);
682
683 for(i = 31; i >= 0; i--) {
684 ctrl = (cmd_addr_data[j] & (1 << i)) ?
685 eecs | eedi : eecs;
686 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
687 e100_write_flush(nic); udelay(4);
688
689 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
690 e100_write_flush(nic); udelay(4);
691 }
692 /* Wait 10 msec for cmd to complete */
693 msleep(10);
694
695 /* Chip deselect */
696 writeb(0, &nic->csr->eeprom_ctrl_lo);
697 e100_write_flush(nic); udelay(4);
698 }
699};
700
701/* General technique stolen from the eepro100 driver - very clever */
702static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
703{
704 u32 cmd_addr_data;
705 u16 data = 0;
706 u8 ctrl;
707 int i;
708
709 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
710
711 /* Chip select */
712 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
713 e100_write_flush(nic); udelay(4);
714
715 /* Bit-bang to read word from eeprom */
716 for(i = 31; i >= 0; i--) {
717 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
718 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
719 e100_write_flush(nic); udelay(4);
05479938 720
1da177e4
LT
721 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
722 e100_write_flush(nic); udelay(4);
05479938 723
1da177e4
LT
724 /* Eeprom drives a dummy zero to EEDO after receiving
725 * complete address. Use this to adjust addr_len. */
726 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
727 if(!(ctrl & eedo) && i > 16) {
728 *addr_len -= (i - 16);
729 i = 17;
730 }
05479938 731
1da177e4
LT
732 data = (data << 1) | (ctrl & eedo ? 1 : 0);
733 }
734
735 /* Chip deselect */
736 writeb(0, &nic->csr->eeprom_ctrl_lo);
737 e100_write_flush(nic); udelay(4);
738
739 return le16_to_cpu(data);
740};
741
742/* Load entire EEPROM image into driver cache and validate checksum */
743static int e100_eeprom_load(struct nic *nic)
744{
745 u16 addr, addr_len = 8, checksum = 0;
746
747 /* Try reading with an 8-bit addr len to discover actual addr len */
748 e100_eeprom_read(nic, &addr_len, 0);
749 nic->eeprom_wc = 1 << addr_len;
750
751 for(addr = 0; addr < nic->eeprom_wc; addr++) {
752 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
753 if(addr < nic->eeprom_wc - 1)
754 checksum += cpu_to_le16(nic->eeprom[addr]);
755 }
756
757 /* The checksum, stored in the last word, is calculated such that
758 * the sum of words should be 0xBABA */
759 checksum = le16_to_cpu(0xBABA - checksum);
760 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
761 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
762 if (!eeprom_bad_csum_allow)
763 return -EAGAIN;
1da177e4
LT
764 }
765
766 return 0;
767}
768
769/* Save (portion of) driver EEPROM cache to device and update checksum */
770static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
771{
772 u16 addr, addr_len = 8, checksum = 0;
773
774 /* Try reading with an 8-bit addr len to discover actual addr len */
775 e100_eeprom_read(nic, &addr_len, 0);
776 nic->eeprom_wc = 1 << addr_len;
777
778 if(start + count >= nic->eeprom_wc)
779 return -EINVAL;
780
781 for(addr = start; addr < start + count; addr++)
782 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
783
784 /* The checksum, stored in the last word, is calculated such that
785 * the sum of words should be 0xBABA */
786 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
787 checksum += cpu_to_le16(nic->eeprom[addr]);
788 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
789 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
790 nic->eeprom[nic->eeprom_wc - 1]);
791
792 return 0;
793}
794
962082b6 795#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 796#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 797static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
798{
799 unsigned long flags;
800 unsigned int i;
801 int err = 0;
802
803 spin_lock_irqsave(&nic->cmd_lock, flags);
804
805 /* Previous command is accepted when SCB clears */
806 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
807 if(likely(!readb(&nic->csr->scb.cmd_lo)))
808 break;
809 cpu_relax();
e6280f26 810 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
811 udelay(5);
812 }
813 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
814 err = -EAGAIN;
815 goto err_unlock;
816 }
817
818 if(unlikely(cmd != cuc_resume))
819 writel(dma_addr, &nic->csr->scb.gen_ptr);
820 writeb(cmd, &nic->csr->scb.cmd_lo);
821
822err_unlock:
823 spin_unlock_irqrestore(&nic->cmd_lock, flags);
824
825 return err;
826}
827
858119e1 828static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
829 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
830{
831 struct cb *cb;
832 unsigned long flags;
833 int err = 0;
834
835 spin_lock_irqsave(&nic->cb_lock, flags);
836
837 if(unlikely(!nic->cbs_avail)) {
838 err = -ENOMEM;
839 goto err_unlock;
840 }
841
842 cb = nic->cb_to_use;
843 nic->cb_to_use = cb->next;
844 nic->cbs_avail--;
845 cb->skb = skb;
846
847 if(unlikely(!nic->cbs_avail))
848 err = -ENOSPC;
849
850 cb_prepare(nic, cb, skb);
851
852 /* Order is important otherwise we'll be in a race with h/w:
853 * set S-bit in current first, then clear S-bit in previous. */
854 cb->command |= cpu_to_le16(cb_s);
855 wmb();
856 cb->prev->command &= cpu_to_le16(~cb_s);
857
858 while(nic->cb_to_send != nic->cb_to_use) {
859 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
860 nic->cb_to_send->dma_addr))) {
861 /* Ok, here's where things get sticky. It's
862 * possible that we can't schedule the command
863 * because the controller is too busy, so
864 * let's just queue the command and try again
865 * when another command is scheduled. */
962082b6
MC
866 if(err == -ENOSPC) {
867 //request a reset
868 schedule_work(&nic->tx_timeout_task);
869 }
1da177e4
LT
870 break;
871 } else {
872 nic->cuc_cmd = cuc_resume;
873 nic->cb_to_send = nic->cb_to_send->next;
874 }
875 }
876
877err_unlock:
878 spin_unlock_irqrestore(&nic->cb_lock, flags);
879
880 return err;
881}
882
883static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
884{
885 u32 data_out = 0;
886 unsigned int i;
ac7c6669 887 unsigned long flags;
1da177e4 888
ac7c6669
OM
889
890 /*
891 * Stratus87247: we shouldn't be writing the MDI control
892 * register until the Ready bit shows True. Also, since
893 * manipulation of the MDI control registers is a multi-step
894 * procedure it should be done under lock.
895 */
896 spin_lock_irqsave(&nic->mdio_lock, flags);
897 for (i = 100; i; --i) {
898 if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
899 break;
900 udelay(20);
901 }
902 if (unlikely(!i)) {
903 printk("e100.mdio_ctrl(%s) won't go Ready\n",
904 nic->netdev->name );
905 spin_unlock_irqrestore(&nic->mdio_lock, flags);
906 return 0; /* No way to indicate timeout error */
907 }
1da177e4
LT
908 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
909
ac7c6669 910 for (i = 0; i < 100; i++) {
1da177e4 911 udelay(20);
ac7c6669 912 if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
913 break;
914 }
ac7c6669 915 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
916 DPRINTK(HW, DEBUG,
917 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
918 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
919 return (u16)data_out;
920}
921
922static int mdio_read(struct net_device *netdev, int addr, int reg)
923{
924 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
925}
926
927static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
928{
929 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
930}
931
932static void e100_get_defaults(struct nic *nic)
933{
2afecc04
JB
934 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
935 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4
LT
936
937 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
938 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
939 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
940 if(nic->mac == mac_unknown)
941 nic->mac = mac_82557_D100_A;
942
943 nic->params.rfds = rfds;
944 nic->params.cbs = cbs;
945
946 /* Quadwords to DMA into FIFO before starting frame transmit */
947 nic->tx_threshold = 0xE0;
948
962082b6
MC
949 /* no interrupt for every tx completion, delay = 256us if not 557*/
950 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
951 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
952
953 /* Template for a freshly allocated RFD */
954 nic->blank_rfd.command = cpu_to_le16(cb_el);
955 nic->blank_rfd.rbd = 0xFFFFFFFF;
956 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
957
958 /* MII setup */
959 nic->mii.phy_id_mask = 0x1F;
960 nic->mii.reg_num_mask = 0x1F;
961 nic->mii.dev = nic->netdev;
962 nic->mii.mdio_read = mdio_read;
963 nic->mii.mdio_write = mdio_write;
964}
965
966static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
967{
968 struct config *config = &cb->u.config;
969 u8 *c = (u8 *)config;
970
971 cb->command = cpu_to_le16(cb_config);
972
973 memset(config, 0, sizeof(struct config));
974
975 config->byte_count = 0x16; /* bytes in this struct */
976 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
977 config->direct_rx_dma = 0x1; /* reserved */
978 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
979 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
980 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
981 config->tx_underrun_retry = 0x3; /* # of underrun retries */
982 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
983 config->pad10 = 0x6;
984 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
985 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
986 config->ifs = 0x6; /* x16 = inter frame spacing */
987 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
988 config->pad15_1 = 0x1;
989 config->pad15_2 = 0x1;
990 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
991 config->fc_delay_hi = 0x40; /* time delay for fc frame */
992 config->tx_padding = 0x1; /* 1=pad short frames */
993 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
994 config->pad18 = 0x1;
995 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
996 config->pad20_1 = 0x1F;
997 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
998 config->pad21_1 = 0x5;
999
1000 config->adaptive_ifs = nic->adaptive_ifs;
1001 config->loopback = nic->loopback;
1002
1003 if(nic->mii.force_media && nic->mii.full_duplex)
1004 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1005
1006 if(nic->flags & promiscuous || nic->loopback) {
1007 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1008 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1009 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1010 }
1011
1012 if(nic->flags & multicast_all)
1013 config->multicast_all = 0x1; /* 1=accept, 0=no */
1014
6bdacb1a
MC
1015 /* disable WoL when up */
1016 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1017 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1018
1019 if(nic->mac >= mac_82558_D101_A4) {
1020 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1021 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1022 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1023 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1024 if(nic->mac >= mac_82559_D101M)
1025 config->tno_intr = 0x1; /* TCO stats enable */
1026 else
1027 config->standard_stat_counter = 0x0;
1028 }
1029
1030 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1031 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1032 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1033 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1034 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1035 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1036}
1037
2afecc04
JB
1038/********************************************************/
1039/* Micro code for 8086:1229 Rev 8 */
1040/********************************************************/
1041
1042/* Parameter values for the D101M B-step */
1043#define D101M_CPUSAVER_TIMER_DWORD 78
1044#define D101M_CPUSAVER_BUNDLE_DWORD 65
1045#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1046
1047#define D101M_B_RCVBUNDLE_UCODE \
1048{\
10490x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10500x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10510x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10520x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10530x00380438, 0x00000000, 0x00140000, 0x00380555, \
10540x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10550x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10560x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10570x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10580x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10590x00000000, 0x00000000, 0x00000000, 0x00000000, \
10600x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10610x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10620x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10630x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10640x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10650x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10660x00000000, 0x00000000, 0x00000000, 0x00000000, \
10670x00000000, 0x00000000, 0x00000000, 0x00000000, \
10680x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10690x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10700x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10710x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10720x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10730x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10740x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10750x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10760x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10770x00380559, 0x00000000, 0x00000000, 0x00000000, \
10780x00000000, 0x00000000, 0x00000000, 0x00000000, \
10790x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10800x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10810x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1082}
1083
1084/********************************************************/
1085/* Micro code for 8086:1229 Rev 9 */
1086/********************************************************/
1087
1088/* Parameter values for the D101S */
1089#define D101S_CPUSAVER_TIMER_DWORD 78
1090#define D101S_CPUSAVER_BUNDLE_DWORD 67
1091#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1092
1093#define D101S_RCVBUNDLE_UCODE \
1094{\
10950x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
10960x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
10970x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
10980x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
10990x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
11000x00308000, 0x00100610, 0x00100561, 0x000E0408, \
11010x00134861, 0x000C0002, 0x00103093, 0x00308000, \
11020x00100624, 0x00100561, 0x000E0408, 0x00100861, \
11030x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11040x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11050x00000000, 0x00000000, 0x00000000, 0x00000000, \
11060x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11070x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11080x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11090x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11100x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11110x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11120x00101313, 0x00380700, 0x00000000, 0x00000000, \
11130x00000000, 0x00000000, 0x00000000, 0x00000000, \
11140x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11150x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11160x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11170x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11180x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11190x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11200x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11210x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11220x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11230x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11240x00000000, 0x00000000, 0x00000000, 0x00000000, \
11250x00000000, 0x00000000, 0x00000000, 0x00130831, \
11260x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11270x00041000, 0x00010004, 0x00380700 \
1128}
1129
1130/********************************************************/
1131/* Micro code for the 8086:1229 Rev F/10 */
1132/********************************************************/
1133
1134/* Parameter values for the D102 E-step */
1135#define D102_E_CPUSAVER_TIMER_DWORD 42
1136#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1137#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1138
1139#define D102_E_RCVBUNDLE_UCODE \
1140{\
11410x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11420x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11430x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11440x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11450x00000000, 0x00000000, 0x00000000, 0x00000000, \
11460x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11470x00000000, 0x00000000, 0x00000000, 0x00000000, \
11480x00000000, 0x00000000, 0x00000000, 0x00000000, \
11490x00000000, 0x00000000, 0x00000000, 0x00000000, \
11500x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11510x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11520x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11530x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11540x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11550x00000000, 0x00000000, 0x00000000, 0x00000000, \
11560x00000000, 0x00000000, 0x00000000, 0x00000000, \
11570x00000000, 0x00000000, 0x00000000, 0x00000000, \
11580x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11590x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11600x00000000, 0x00000000, 0x00000000, 0x00000000, \
11610x00000000, 0x00000000, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00000000, 0x00000000, 0x00000000, 0x00000000, \
11650x00000000, 0x00000000, 0x00000000, 0x00000000, \
11660x00000000, 0x00000000, 0x00000000, 0x00000000, \
11670x00000000, 0x00000000, 0x00000000, 0x00000000, \
11680x00000000, 0x00000000, 0x00000000, 0x00000000, \
11690x00000000, 0x00000000, 0x00000000, 0x00000000, \
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
11710x00000000, 0x00000000, 0x00000000, 0x00000000, \
11720x00000000, 0x00000000, 0x00000000, 0x00000000, \
11730x00000000, 0x00000000, 0x00000000, 0x00000000, \
1174}
1175
24180333 1176static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4 1177{
2afecc04
JB
1178/* *INDENT-OFF* */
1179 static struct {
1180 u32 ucode[UCODE_SIZE + 1];
1181 u8 mac;
1182 u8 timer_dword;
1183 u8 bundle_dword;
1184 u8 min_size_dword;
1185 } ucode_opts[] = {
1186 { D101M_B_RCVBUNDLE_UCODE,
1187 mac_82559_D101M,
1188 D101M_CPUSAVER_TIMER_DWORD,
1189 D101M_CPUSAVER_BUNDLE_DWORD,
1190 D101M_CPUSAVER_MIN_SIZE_DWORD },
1191 { D101S_RCVBUNDLE_UCODE,
1192 mac_82559_D101S,
1193 D101S_CPUSAVER_TIMER_DWORD,
1194 D101S_CPUSAVER_BUNDLE_DWORD,
1195 D101S_CPUSAVER_MIN_SIZE_DWORD },
1196 { D102_E_RCVBUNDLE_UCODE,
1197 mac_82551_F,
1198 D102_E_CPUSAVER_TIMER_DWORD,
1199 D102_E_CPUSAVER_BUNDLE_DWORD,
1200 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1201 { D102_E_RCVBUNDLE_UCODE,
1202 mac_82551_10,
1203 D102_E_CPUSAVER_TIMER_DWORD,
1204 D102_E_CPUSAVER_BUNDLE_DWORD,
1205 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1206 { {0}, 0, 0, 0, 0}
1207 }, *opts;
1208/* *INDENT-ON* */
1209
1210/*************************************************************************
1211* CPUSaver parameters
1212*
1213* All CPUSaver parameters are 16-bit literals that are part of a
1214* "move immediate value" instruction. By changing the value of
1215* the literal in the instruction before the code is loaded, the
1216* driver can change the algorithm.
1217*
0779bf2d 1218* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1219* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1220* timer is reset each time a new packet is received. (see
1221* BUNDLEMAX below to set the limit on number of chained packets)
1222* The current default is 0x600 or 1536. Experiments show that
1223* the value should probably stay within the 0x200 - 0x1000.
1224*
05479938 1225* BUNDLEMAX -
2afecc04
JB
1226* This sets the maximum number of frames that will be bundled. In
1227* some situations, such as the TCP windowing algorithm, it may be
1228* better to limit the growth of the bundle size than let it go as
1229* high as it can, because that could cause too much added latency.
1230* The default is six, because this is the number of packets in the
1231* default TCP window size. A value of 1 would make CPUSaver indicate
1232* an interrupt for every frame received. If you do not want to put
1233* a limit on the bundle size, set this value to xFFFF.
1234*
05479938 1235* BUNDLESMALL -
2afecc04
JB
1236* This contains a bit-mask describing the minimum size frame that
1237* will be bundled. The default masks the lower 7 bits, which means
1238* that any frame less than 128 bytes in length will not be bundled,
1239* but will instead immediately generate an interrupt. This does
1240* not affect the current bundle in any way. Any frame that is 128
1241* bytes or large will be bundled normally. This feature is meant
1242* to provide immediate indication of ACK frames in a TCP environment.
1243* Customers were seeing poor performance when a machine with CPUSaver
1244* enabled was sending but not receiving. The delay introduced when
1245* the ACKs were received was enough to reduce total throughput, because
1246* the sender would sit idle until the ACK was finally seen.
1247*
1248* The current default is 0xFF80, which masks out the lower 7 bits.
1249* This means that any frame which is x7F (127) bytes or smaller
05479938 1250* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1251* bit mask, there are only a few valid values that can be used. To
1252* turn this feature off, the driver can write the value xFFFF to the
1253* lower word of this instruction (in the same way that the other
1254* parameters are used). Likewise, a value of 0xF800 (2047) would
1255* cause an interrupt to be generated for every frame, because all
1256* standard Ethernet frames are <= 2047 bytes in length.
1257*************************************************************************/
1258
05479938 1259/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1260 * workarounds it provides, set the following defines to:
1261 * BUNDLESMALL 0
1262 * BUNDLEMAX 1
1263 * INTDELAY 1
1264 */
1265#define BUNDLESMALL 1
1266#define BUNDLEMAX (u16)6
1267#define INTDELAY (u16)1536 /* 0x600 */
1268
1269 /* do not load u-code for ICH devices */
1270 if (nic->flags & ich)
1271 goto noloaducode;
1272
1273 /* Search for ucode match against h/w rev_id */
1274 for (opts = ucode_opts; opts->mac; opts++) {
1275 int i;
1276 u32 *ucode = opts->ucode;
1277 if (nic->mac != opts->mac)
1278 continue;
1279
1280 /* Insert user-tunable settings */
1281 ucode[opts->timer_dword] &= 0xFFFF0000;
1282 ucode[opts->timer_dword] |= INTDELAY;
1283 ucode[opts->bundle_dword] &= 0xFFFF0000;
1284 ucode[opts->bundle_dword] |= BUNDLEMAX;
1285 ucode[opts->min_size_dword] &= 0xFFFF0000;
1286 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1287
1288 for (i = 0; i < UCODE_SIZE; i++)
875521dd 1289 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
24180333 1290 cb->command = cpu_to_le16(cb_ucode | cb_el);
2afecc04
JB
1291 return;
1292 }
1293
1294noloaducode:
24180333
JB
1295 cb->command = cpu_to_le16(cb_nop | cb_el);
1296}
1297
1298static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1299 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1300{
1301 int err = 0, counter = 50;
1302 struct cb *cb = nic->cb_to_clean;
1303
1304 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1305 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1306
24180333
JB
1307 /* must restart cuc */
1308 nic->cuc_cmd = cuc_start;
1309
1310 /* wait for completion */
1311 e100_write_flush(nic);
1312 udelay(10);
1313
1314 /* wait for possibly (ouch) 500ms */
1315 while (!(cb->status & cpu_to_le16(cb_complete))) {
1316 msleep(10);
1317 if (!--counter) break;
1318 }
05479938 1319
24180333
JB
1320 /* ack any interupts, something could have been set */
1321 writeb(~0, &nic->csr->scb.stat_ack);
1322
1323 /* if the command failed, or is not OK, notify and return */
1324 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1325 DPRINTK(PROBE,ERR, "ucode load failed\n");
1326 err = -EPERM;
1327 }
05479938 1328
24180333 1329 return err;
1da177e4
LT
1330}
1331
1332static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1333 struct sk_buff *skb)
1334{
1335 cb->command = cpu_to_le16(cb_iaaddr);
1336 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1337}
1338
1339static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1340{
1341 cb->command = cpu_to_le16(cb_dump);
1342 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1343 offsetof(struct mem, dump_buf));
1344}
1345
1346#define NCONFIG_AUTO_SWITCH 0x0080
1347#define MII_NSC_CONG MII_RESV1
1348#define NSC_CONG_ENABLE 0x0100
1349#define NSC_CONG_TXREADY 0x0400
1350#define ADVERTISE_FC_SUPPORTED 0x0400
1351static int e100_phy_init(struct nic *nic)
1352{
1353 struct net_device *netdev = nic->netdev;
1354 u32 addr;
1355 u16 bmcr, stat, id_lo, id_hi, cong;
1356
1357 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1358 for(addr = 0; addr < 32; addr++) {
1359 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1360 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1361 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1362 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1363 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1364 break;
1365 }
1366 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1367 if(addr == 32)
1368 return -EAGAIN;
1369
1370 /* Selected the phy and isolate the rest */
1371 for(addr = 0; addr < 32; addr++) {
1372 if(addr != nic->mii.phy_id) {
1373 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1374 } else {
1375 bmcr = mdio_read(netdev, addr, MII_BMCR);
1376 mdio_write(netdev, addr, MII_BMCR,
1377 bmcr & ~BMCR_ISOLATE);
1378 }
1379 }
1380
1381 /* Get phy ID */
1382 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1383 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1384 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1385 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1386
1387 /* Handle National tx phys */
1388#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1389 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1390 /* Disable congestion control */
1391 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1392 cong |= NSC_CONG_TXREADY;
1393 cong &= ~NSC_CONG_ENABLE;
1394 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1395 }
1396
05479938 1397 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1398 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1399 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1400 /* enable/disable MDI/MDI-X auto-switching. */
1401 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1402 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1403 }
1da177e4
LT
1404
1405 return 0;
1406}
1407
1408static int e100_hw_init(struct nic *nic)
1409{
1410 int err;
1411
1412 e100_hw_reset(nic);
1413
1414 DPRINTK(HW, ERR, "e100_hw_init\n");
1415 if(!in_interrupt() && (err = e100_self_test(nic)))
1416 return err;
1417
1418 if((err = e100_phy_init(nic)))
1419 return err;
1420 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1421 return err;
1422 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1423 return err;
24180333 1424 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1da177e4
LT
1425 return err;
1426 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1427 return err;
1428 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1429 return err;
1430 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1431 nic->dma_addr + offsetof(struct mem, stats))))
1432 return err;
1433 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1434 return err;
1435
1436 e100_disable_irq(nic);
1437
1438 return 0;
1439}
1440
1441static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1442{
1443 struct net_device *netdev = nic->netdev;
1444 struct dev_mc_list *list = netdev->mc_list;
1445 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1446
1447 cb->command = cpu_to_le16(cb_multi);
1448 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1449 for(i = 0; list && i < count; i++, list = list->next)
1450 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1451 ETH_ALEN);
1452}
1453
1454static void e100_set_multicast_list(struct net_device *netdev)
1455{
1456 struct nic *nic = netdev_priv(netdev);
1457
1458 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1459 netdev->mc_count, netdev->flags);
1460
1461 if(netdev->flags & IFF_PROMISC)
1462 nic->flags |= promiscuous;
1463 else
1464 nic->flags &= ~promiscuous;
1465
1466 if(netdev->flags & IFF_ALLMULTI ||
1467 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1468 nic->flags |= multicast_all;
1469 else
1470 nic->flags &= ~multicast_all;
1471
1472 e100_exec_cb(nic, NULL, e100_configure);
1473 e100_exec_cb(nic, NULL, e100_multi);
1474}
1475
1476static void e100_update_stats(struct nic *nic)
1477{
1478 struct net_device_stats *ns = &nic->net_stats;
1479 struct stats *s = &nic->mem->stats;
1480 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1481 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1482 &s->complete;
1483
1484 /* Device's stats reporting may take several microseconds to
1485 * complete, so where always waiting for results of the
1486 * previous command. */
1487
1488 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1489 *complete = 0;
1490 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1491 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1492 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1493 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1494 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1495 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1496 ns->collisions += nic->tx_collisions;
1497 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1498 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1499 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1500 nic->rx_over_length_errors;
1501 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1502 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1503 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1504 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1505 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1506 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1507 le32_to_cpu(s->rx_alignment_errors) +
1508 le32_to_cpu(s->rx_short_frame_errors) +
1509 le32_to_cpu(s->rx_cdt_errors);
1510 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1511 nic->tx_single_collisions +=
1512 le32_to_cpu(s->tx_single_collisions);
1513 nic->tx_multiple_collisions +=
1514 le32_to_cpu(s->tx_multiple_collisions);
1515 if(nic->mac >= mac_82558_D101_A4) {
1516 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1517 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1518 nic->rx_fc_unsupported +=
1519 le32_to_cpu(s->fc_rcv_unsupported);
1520 if(nic->mac >= mac_82559_D101M) {
1521 nic->tx_tco_frames +=
1522 le16_to_cpu(s->xmt_tco_frames);
1523 nic->rx_tco_frames +=
1524 le16_to_cpu(s->rcv_tco_frames);
1525 }
1526 }
1527 }
1528
05479938 1529
1f53367d
MC
1530 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1531 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1532}
1533
1534static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1535{
1536 /* Adjust inter-frame-spacing (IFS) between two transmits if
1537 * we're getting collisions on a half-duplex connection. */
1538
1539 if(duplex == DUPLEX_HALF) {
1540 u32 prev = nic->adaptive_ifs;
1541 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1542
1543 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1544 (nic->tx_frames > min_frames)) {
1545 if(nic->adaptive_ifs < 60)
1546 nic->adaptive_ifs += 5;
1547 } else if (nic->tx_frames < min_frames) {
1548 if(nic->adaptive_ifs >= 5)
1549 nic->adaptive_ifs -= 5;
1550 }
1551 if(nic->adaptive_ifs != prev)
1552 e100_exec_cb(nic, NULL, e100_configure);
1553 }
1554}
1555
1556static void e100_watchdog(unsigned long data)
1557{
1558 struct nic *nic = (struct nic *)data;
1559 struct ethtool_cmd cmd;
1560
1561 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1562
1563 /* mii library handles link maintenance tasks */
1564
1565 mii_ethtool_gset(&nic->mii, &cmd);
1566
1567 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1568 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1569 cmd.speed == SPEED_100 ? "100" : "10",
1570 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1571 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1572 DPRINTK(LINK, INFO, "link down\n");
1573 }
1574
1575 mii_check_link(&nic->mii);
1576
1577 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1578 * allocation failure.
1579 * Unfortunately have to use a spinlock to not re-enable interrupts
1580 * accidentally, due to hardware that shares a register between the
1581 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4
LT
1582 spin_lock_irq(&nic->cmd_lock);
1583 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1584 e100_write_flush(nic);
ad8c48ad 1585 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1586
1587 e100_update_stats(nic);
1588 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1589
1590 if(nic->mac <= mac_82557_D100_C)
1591 /* Issue a multicast command to workaround a 557 lock up */
1592 e100_set_multicast_list(nic->netdev);
1593
1594 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1595 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1596 nic->flags |= ich_10h_workaround;
1597 else
1598 nic->flags &= ~ich_10h_workaround;
1599
1600 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1601}
1602
858119e1 1603static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1604 struct sk_buff *skb)
1605{
1606 cb->command = nic->tx_command;
962082b6 1607 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1608 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1609 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1610 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1611 cb->u.tcb.tcb_byte_count = 0;
1612 cb->u.tcb.threshold = nic->tx_threshold;
1613 cb->u.tcb.tbd_count = 1;
1614 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1615 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1616 /* check for mapping failure? */
1da177e4
LT
1617 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1618}
1619
1620static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1621{
1622 struct nic *nic = netdev_priv(netdev);
1623 int err;
1624
1625 if(nic->flags & ich_10h_workaround) {
1626 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1627 Issue a NOP command followed by a 1us delay before
1628 issuing the Tx command. */
1f53367d
MC
1629 if(e100_exec_cmd(nic, cuc_nop, 0))
1630 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1631 udelay(1);
1632 }
1633
1634 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1635
1636 switch(err) {
1637 case -ENOSPC:
1638 /* We queued the skb, but now we're out of space. */
1639 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1640 netif_stop_queue(netdev);
1641 break;
1642 case -ENOMEM:
1643 /* This is a hard error - log it. */
1644 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1645 netif_stop_queue(netdev);
1646 return 1;
1647 }
1648
1649 netdev->trans_start = jiffies;
1650 return 0;
1651}
1652
858119e1 1653static int e100_tx_clean(struct nic *nic)
1da177e4
LT
1654{
1655 struct cb *cb;
1656 int tx_cleaned = 0;
1657
1658 spin_lock(&nic->cb_lock);
1659
1da177e4
LT
1660 /* Clean CBs marked complete */
1661 for(cb = nic->cb_to_clean;
1662 cb->status & cpu_to_le16(cb_complete);
1663 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1664 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1665 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1666 cb->status);
1667
1da177e4
LT
1668 if(likely(cb->skb != NULL)) {
1669 nic->net_stats.tx_packets++;
1670 nic->net_stats.tx_bytes += cb->skb->len;
1671
1672 pci_unmap_single(nic->pdev,
1673 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1674 le16_to_cpu(cb->u.tcb.tbd.size),
1675 PCI_DMA_TODEVICE);
1676 dev_kfree_skb_any(cb->skb);
1677 cb->skb = NULL;
1678 tx_cleaned = 1;
1679 }
1680 cb->status = 0;
1681 nic->cbs_avail++;
1682 }
1683
1684 spin_unlock(&nic->cb_lock);
1685
1686 /* Recover from running out of Tx resources in xmit_frame */
1687 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1688 netif_wake_queue(nic->netdev);
1689
1690 return tx_cleaned;
1691}
1692
1693static void e100_clean_cbs(struct nic *nic)
1694{
1695 if(nic->cbs) {
1696 while(nic->cbs_avail != nic->params.cbs.count) {
1697 struct cb *cb = nic->cb_to_clean;
1698 if(cb->skb) {
1699 pci_unmap_single(nic->pdev,
1700 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1701 le16_to_cpu(cb->u.tcb.tbd.size),
1702 PCI_DMA_TODEVICE);
1703 dev_kfree_skb(cb->skb);
1704 }
1705 nic->cb_to_clean = nic->cb_to_clean->next;
1706 nic->cbs_avail++;
1707 }
1708 pci_free_consistent(nic->pdev,
1709 sizeof(struct cb) * nic->params.cbs.count,
1710 nic->cbs, nic->cbs_dma_addr);
1711 nic->cbs = NULL;
1712 nic->cbs_avail = 0;
1713 }
1714 nic->cuc_cmd = cuc_start;
1715 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1716 nic->cbs;
1717}
1718
1719static int e100_alloc_cbs(struct nic *nic)
1720{
1721 struct cb *cb;
1722 unsigned int i, count = nic->params.cbs.count;
1723
1724 nic->cuc_cmd = cuc_start;
1725 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1726 nic->cbs_avail = 0;
1727
1728 nic->cbs = pci_alloc_consistent(nic->pdev,
1729 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1730 if(!nic->cbs)
1731 return -ENOMEM;
1732
1733 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1734 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1735 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1736
1737 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1738 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1739 ((i+1) % count) * sizeof(struct cb));
1740 cb->skb = NULL;
1741 }
1742
1743 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1744 nic->cbs_avail = count;
1745
1746 return 0;
1747}
1748
1f53367d 1749static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1750{
1f53367d
MC
1751 if(!nic->rxs) return;
1752 if(RU_SUSPENDED != nic->ru_running) return;
1753
1754 /* handle init time starts */
1755 if(!rx) rx = nic->rxs;
1756
1da177e4 1757 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1f53367d
MC
1758 if(rx->skb) {
1759 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1760 nic->ru_running = RU_RUNNING;
1da177e4
LT
1761 }
1762}
1763
1764#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1765static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1766{
4187592b 1767 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1768 return -ENOMEM;
1769
1770 /* Align, init, and map the RFD. */
1da177e4
LT
1771 skb_reserve(rx->skb, NET_IP_ALIGN);
1772 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
1773 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1774 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1775
1f53367d
MC
1776 if(pci_dma_mapping_error(rx->dma_addr)) {
1777 dev_kfree_skb_any(rx->skb);
097688ef 1778 rx->skb = NULL;
1f53367d
MC
1779 rx->dma_addr = 0;
1780 return -ENOMEM;
1781 }
1782
1da177e4
LT
1783 /* Link the RFD to end of RFA by linking previous RFD to
1784 * this one, and clearing EL bit of previous. */
1785 if(rx->prev->skb) {
1786 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1787 put_unaligned(cpu_to_le32(rx->dma_addr),
1788 (u32 *)&prev_rfd->link);
1789 wmb();
1790 prev_rfd->command &= ~cpu_to_le16(cb_el);
1791 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1792 sizeof(struct rfd), PCI_DMA_TODEVICE);
1793 }
1794
1795 return 0;
1796}
1797
858119e1 1798static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1799 unsigned int *work_done, unsigned int work_to_do)
1800{
1801 struct sk_buff *skb = rx->skb;
1802 struct rfd *rfd = (struct rfd *)skb->data;
1803 u16 rfd_status, actual_size;
1804
1805 if(unlikely(work_done && *work_done >= work_to_do))
1806 return -EAGAIN;
1807
1808 /* Need to sync before taking a peek at cb_complete bit */
1809 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1810 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1811 rfd_status = le16_to_cpu(rfd->status);
1812
1813 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1814
1815 /* If data isn't ready, nothing to indicate */
1816 if(unlikely(!(rfd_status & cb_complete)))
1f53367d 1817 return -ENODATA;
1da177e4
LT
1818
1819 /* Get actual data size */
1820 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1821 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1822 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1823
1824 /* Get data */
1825 pci_unmap_single(nic->pdev, rx->dma_addr,
1826 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1827
1f53367d
MC
1828 /* this allows for a fast restart without re-enabling interrupts */
1829 if(le16_to_cpu(rfd->command) & cb_el)
1830 nic->ru_running = RU_SUSPENDED;
1831
1da177e4
LT
1832 /* Pull off the RFD and put the actual data (minus eth hdr) */
1833 skb_reserve(skb, sizeof(struct rfd));
1834 skb_put(skb, actual_size);
1835 skb->protocol = eth_type_trans(skb, nic->netdev);
1836
1837 if(unlikely(!(rfd_status & cb_ok))) {
1838 /* Don't indicate if hardware indicates errors */
1da177e4 1839 dev_kfree_skb_any(skb);
136df52d 1840 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1841 /* Don't indicate oversized frames */
1842 nic->rx_over_length_errors++;
1da177e4
LT
1843 dev_kfree_skb_any(skb);
1844 } else {
1845 nic->net_stats.rx_packets++;
1846 nic->net_stats.rx_bytes += actual_size;
1847 nic->netdev->last_rx = jiffies;
1848 netif_receive_skb(skb);
1849 if(work_done)
1850 (*work_done)++;
1851 }
1852
1853 rx->skb = NULL;
1854
1855 return 0;
1856}
1857
858119e1 1858static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1859 unsigned int work_to_do)
1860{
1861 struct rx *rx;
1f53367d
MC
1862 int restart_required = 0;
1863 struct rx *rx_to_start = NULL;
1864
1865 /* are we already rnr? then pay attention!!! this ensures that
05479938 1866 * the state machine progression never allows a start with a
1f53367d
MC
1867 * partially cleaned list, avoiding a race between hardware
1868 * and rx_to_clean when in NAPI mode */
1869 if(RU_SUSPENDED == nic->ru_running)
1870 restart_required = 1;
1da177e4
LT
1871
1872 /* Indicate newly arrived packets */
1873 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1f53367d
MC
1874 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1875 if(-EAGAIN == err) {
1876 /* hit quota so have more work to do, restart once
1877 * cleanup is complete */
1878 restart_required = 0;
1879 break;
1880 } else if(-ENODATA == err)
1da177e4
LT
1881 break; /* No more to clean */
1882 }
1883
1f53367d
MC
1884 /* save our starting point as the place we'll restart the receiver */
1885 if(restart_required)
1886 rx_to_start = nic->rx_to_clean;
1887
1da177e4
LT
1888 /* Alloc new skbs to refill list */
1889 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1890 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1891 break; /* Better luck next time (see watchdog) */
1892 }
1893
1f53367d
MC
1894 if(restart_required) {
1895 // ack the rnr?
1896 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1897 e100_start_receiver(nic, rx_to_start);
1898 if(work_done)
1899 (*work_done)++;
1900 }
1da177e4
LT
1901}
1902
1903static void e100_rx_clean_list(struct nic *nic)
1904{
1905 struct rx *rx;
1906 unsigned int i, count = nic->params.rfds.count;
1907
1f53367d
MC
1908 nic->ru_running = RU_UNINITIALIZED;
1909
1da177e4
LT
1910 if(nic->rxs) {
1911 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1912 if(rx->skb) {
1913 pci_unmap_single(nic->pdev, rx->dma_addr,
1914 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1915 dev_kfree_skb(rx->skb);
1916 }
1917 }
1918 kfree(nic->rxs);
1919 nic->rxs = NULL;
1920 }
1921
1922 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1923}
1924
1925static int e100_rx_alloc_list(struct nic *nic)
1926{
1927 struct rx *rx;
1928 unsigned int i, count = nic->params.rfds.count;
1929
1930 nic->rx_to_use = nic->rx_to_clean = NULL;
1f53367d 1931 nic->ru_running = RU_UNINITIALIZED;
1da177e4 1932
c48e3fca 1933 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 1934 return -ENOMEM;
1da177e4
LT
1935
1936 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1937 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1938 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1939 if(e100_rx_alloc_skb(nic, rx)) {
1940 e100_rx_clean_list(nic);
1941 return -ENOMEM;
1942 }
1943 }
1944
1945 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1f53367d 1946 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
1947
1948 return 0;
1949}
1950
7d12e780 1951static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
1952{
1953 struct net_device *netdev = dev_id;
1954 struct nic *nic = netdev_priv(netdev);
1955 u8 stat_ack = readb(&nic->csr->scb.stat_ack);
1956
1957 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1958
1959 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1960 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1961 return IRQ_NONE;
1962
1963 /* Ack interrupt(s) */
1964 writeb(stat_ack, &nic->csr->scb.stat_ack);
1965
1966 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1967 if(stat_ack & stat_ack_rnr)
1f53367d 1968 nic->ru_running = RU_SUSPENDED;
1da177e4 1969
0685c31b
MC
1970 if(likely(netif_rx_schedule_prep(netdev))) {
1971 e100_disable_irq(nic);
1972 __netif_rx_schedule(netdev);
1973 }
1da177e4
LT
1974
1975 return IRQ_HANDLED;
1976}
1977
1978static int e100_poll(struct net_device *netdev, int *budget)
1979{
1980 struct nic *nic = netdev_priv(netdev);
1981 unsigned int work_to_do = min(netdev->quota, *budget);
1982 unsigned int work_done = 0;
1983 int tx_cleaned;
1984
1985 e100_rx_clean(nic, &work_done, work_to_do);
1986 tx_cleaned = e100_tx_clean(nic);
1987
1988 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1989 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1990 netif_rx_complete(netdev);
1991 e100_enable_irq(nic);
1992 return 0;
1993 }
1994
1995 *budget -= work_done;
1996 netdev->quota -= work_done;
1997
1998 return 1;
1999}
2000
2001#ifdef CONFIG_NET_POLL_CONTROLLER
2002static void e100_netpoll(struct net_device *netdev)
2003{
2004 struct nic *nic = netdev_priv(netdev);
611494dc 2005
1da177e4 2006 e100_disable_irq(nic);
7d12e780 2007 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2008 e100_tx_clean(nic);
2009 e100_enable_irq(nic);
2010}
2011#endif
2012
2013static struct net_device_stats *e100_get_stats(struct net_device *netdev)
2014{
2015 struct nic *nic = netdev_priv(netdev);
2016 return &nic->net_stats;
2017}
2018
2019static int e100_set_mac_address(struct net_device *netdev, void *p)
2020{
2021 struct nic *nic = netdev_priv(netdev);
2022 struct sockaddr *addr = p;
2023
2024 if (!is_valid_ether_addr(addr->sa_data))
2025 return -EADDRNOTAVAIL;
2026
2027 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2028 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2029
2030 return 0;
2031}
2032
2033static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2034{
2035 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2036 return -EINVAL;
2037 netdev->mtu = new_mtu;
2038 return 0;
2039}
2040
2041static int e100_asf(struct nic *nic)
2042{
2043 /* ASF can be enabled from eeprom */
2044 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2045 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2046 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2047 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2048}
2049
2050static int e100_up(struct nic *nic)
2051{
2052 int err;
2053
2054 if((err = e100_rx_alloc_list(nic)))
2055 return err;
2056 if((err = e100_alloc_cbs(nic)))
2057 goto err_rx_clean_list;
2058 if((err = e100_hw_init(nic)))
2059 goto err_clean_cbs;
2060 e100_set_multicast_list(nic->netdev);
097688ef 2061 e100_start_receiver(nic, NULL);
1da177e4 2062 mod_timer(&nic->watchdog, jiffies);
1fb9df5d 2063 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2064 nic->netdev->name, nic->netdev)))
2065 goto err_no_irq;
1da177e4 2066 netif_wake_queue(nic->netdev);
0236ebb7
MC
2067 netif_poll_enable(nic->netdev);
2068 /* enable ints _after_ enabling poll, preventing a race between
2069 * disable ints+schedule */
2070 e100_enable_irq(nic);
1da177e4
LT
2071 return 0;
2072
2073err_no_irq:
2074 del_timer_sync(&nic->watchdog);
2075err_clean_cbs:
2076 e100_clean_cbs(nic);
2077err_rx_clean_list:
2078 e100_rx_clean_list(nic);
2079 return err;
2080}
2081
2082static void e100_down(struct nic *nic)
2083{
0236ebb7
MC
2084 /* wait here for poll to complete */
2085 netif_poll_disable(nic->netdev);
2086 netif_stop_queue(nic->netdev);
1da177e4
LT
2087 e100_hw_reset(nic);
2088 free_irq(nic->pdev->irq, nic->netdev);
2089 del_timer_sync(&nic->watchdog);
2090 netif_carrier_off(nic->netdev);
1da177e4
LT
2091 e100_clean_cbs(nic);
2092 e100_rx_clean_list(nic);
2093}
2094
2095static void e100_tx_timeout(struct net_device *netdev)
2096{
2097 struct nic *nic = netdev_priv(netdev);
2098
05479938 2099 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2100 * in interrupt context */
2101 schedule_work(&nic->tx_timeout_task);
2102}
2103
c4028958 2104static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2105{
c4028958
DH
2106 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2107 struct net_device *netdev = nic->netdev;
2acdb1e0 2108
1da177e4
LT
2109 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2110 readb(&nic->csr->scb.status));
2111 e100_down(netdev_priv(netdev));
2112 e100_up(netdev_priv(netdev));
2113}
2114
2115static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2116{
2117 int err;
2118 struct sk_buff *skb;
2119
2120 /* Use driver resources to perform internal MAC or PHY
2121 * loopback test. A single packet is prepared and transmitted
2122 * in loopback mode, and the test passes if the received
2123 * packet compares byte-for-byte to the transmitted packet. */
2124
2125 if((err = e100_rx_alloc_list(nic)))
2126 return err;
2127 if((err = e100_alloc_cbs(nic)))
2128 goto err_clean_rx;
2129
2130 /* ICH PHY loopback is broken so do MAC loopback instead */
2131 if(nic->flags & ich && loopback_mode == lb_phy)
2132 loopback_mode = lb_mac;
2133
2134 nic->loopback = loopback_mode;
2135 if((err = e100_hw_init(nic)))
2136 goto err_loopback_none;
2137
2138 if(loopback_mode == lb_phy)
2139 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2140 BMCR_LOOPBACK);
2141
097688ef 2142 e100_start_receiver(nic, NULL);
1da177e4 2143
4187592b 2144 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2145 err = -ENOMEM;
2146 goto err_loopback_none;
2147 }
2148 skb_put(skb, ETH_DATA_LEN);
2149 memset(skb->data, 0xFF, ETH_DATA_LEN);
2150 e100_xmit_frame(skb, nic->netdev);
2151
2152 msleep(10);
2153
aa49cdd9
JB
2154 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2155 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2156
1da177e4
LT
2157 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2158 skb->data, ETH_DATA_LEN))
2159 err = -EAGAIN;
2160
2161err_loopback_none:
2162 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2163 nic->loopback = lb_none;
1da177e4 2164 e100_clean_cbs(nic);
aa49cdd9 2165 e100_hw_reset(nic);
1da177e4
LT
2166err_clean_rx:
2167 e100_rx_clean_list(nic);
2168 return err;
2169}
2170
2171#define MII_LED_CONTROL 0x1B
2172static void e100_blink_led(unsigned long data)
2173{
2174 struct nic *nic = (struct nic *)data;
2175 enum led_state {
2176 led_on = 0x01,
2177 led_off = 0x04,
2178 led_on_559 = 0x05,
2179 led_on_557 = 0x07,
2180 };
2181
2182 nic->leds = (nic->leds & led_on) ? led_off :
2183 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2184 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2185 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2186}
2187
2188static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2189{
2190 struct nic *nic = netdev_priv(netdev);
2191 return mii_ethtool_gset(&nic->mii, cmd);
2192}
2193
2194static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2195{
2196 struct nic *nic = netdev_priv(netdev);
2197 int err;
2198
2199 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2200 err = mii_ethtool_sset(&nic->mii, cmd);
2201 e100_exec_cb(nic, NULL, e100_configure);
2202
2203 return err;
2204}
2205
2206static void e100_get_drvinfo(struct net_device *netdev,
2207 struct ethtool_drvinfo *info)
2208{
2209 struct nic *nic = netdev_priv(netdev);
2210 strcpy(info->driver, DRV_NAME);
2211 strcpy(info->version, DRV_VERSION);
2212 strcpy(info->fw_version, "N/A");
2213 strcpy(info->bus_info, pci_name(nic->pdev));
2214}
2215
2216static int e100_get_regs_len(struct net_device *netdev)
2217{
2218 struct nic *nic = netdev_priv(netdev);
2219#define E100_PHY_REGS 0x1C
2220#define E100_REGS_LEN 1 + E100_PHY_REGS + \
2221 sizeof(nic->mem->dump_buf) / sizeof(u32)
2222 return E100_REGS_LEN * sizeof(u32);
2223}
2224
2225static void e100_get_regs(struct net_device *netdev,
2226 struct ethtool_regs *regs, void *p)
2227{
2228 struct nic *nic = netdev_priv(netdev);
2229 u32 *buff = p;
2230 int i;
2231
2232 regs->version = (1 << 24) | nic->rev_id;
2233 buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
2234 readb(&nic->csr->scb.cmd_lo) << 16 |
2235 readw(&nic->csr->scb.status);
2236 for(i = E100_PHY_REGS; i >= 0; i--)
2237 buff[1 + E100_PHY_REGS - i] =
2238 mdio_read(netdev, nic->mii.phy_id, i);
2239 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2240 e100_exec_cb(nic, NULL, e100_dump);
2241 msleep(10);
2242 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2243 sizeof(nic->mem->dump_buf));
2244}
2245
2246static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2247{
2248 struct nic *nic = netdev_priv(netdev);
2249 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2250 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2251}
2252
2253static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2254{
2255 struct nic *nic = netdev_priv(netdev);
2256
2257 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2258 return -EOPNOTSUPP;
2259
2260 if(wol->wolopts)
2261 nic->flags |= wol_magic;
2262 else
2263 nic->flags &= ~wol_magic;
2264
1da177e4
LT
2265 e100_exec_cb(nic, NULL, e100_configure);
2266
2267 return 0;
2268}
2269
2270static u32 e100_get_msglevel(struct net_device *netdev)
2271{
2272 struct nic *nic = netdev_priv(netdev);
2273 return nic->msg_enable;
2274}
2275
2276static void e100_set_msglevel(struct net_device *netdev, u32 value)
2277{
2278 struct nic *nic = netdev_priv(netdev);
2279 nic->msg_enable = value;
2280}
2281
2282static int e100_nway_reset(struct net_device *netdev)
2283{
2284 struct nic *nic = netdev_priv(netdev);
2285 return mii_nway_restart(&nic->mii);
2286}
2287
2288static u32 e100_get_link(struct net_device *netdev)
2289{
2290 struct nic *nic = netdev_priv(netdev);
2291 return mii_link_ok(&nic->mii);
2292}
2293
2294static int e100_get_eeprom_len(struct net_device *netdev)
2295{
2296 struct nic *nic = netdev_priv(netdev);
2297 return nic->eeprom_wc << 1;
2298}
2299
2300#define E100_EEPROM_MAGIC 0x1234
2301static int e100_get_eeprom(struct net_device *netdev,
2302 struct ethtool_eeprom *eeprom, u8 *bytes)
2303{
2304 struct nic *nic = netdev_priv(netdev);
2305
2306 eeprom->magic = E100_EEPROM_MAGIC;
2307 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2308
2309 return 0;
2310}
2311
2312static int e100_set_eeprom(struct net_device *netdev,
2313 struct ethtool_eeprom *eeprom, u8 *bytes)
2314{
2315 struct nic *nic = netdev_priv(netdev);
2316
2317 if(eeprom->magic != E100_EEPROM_MAGIC)
2318 return -EINVAL;
2319
2320 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2321
2322 return e100_eeprom_save(nic, eeprom->offset >> 1,
2323 (eeprom->len >> 1) + 1);
2324}
2325
2326static void e100_get_ringparam(struct net_device *netdev,
2327 struct ethtool_ringparam *ring)
2328{
2329 struct nic *nic = netdev_priv(netdev);
2330 struct param_range *rfds = &nic->params.rfds;
2331 struct param_range *cbs = &nic->params.cbs;
2332
2333 ring->rx_max_pending = rfds->max;
2334 ring->tx_max_pending = cbs->max;
2335 ring->rx_mini_max_pending = 0;
2336 ring->rx_jumbo_max_pending = 0;
2337 ring->rx_pending = rfds->count;
2338 ring->tx_pending = cbs->count;
2339 ring->rx_mini_pending = 0;
2340 ring->rx_jumbo_pending = 0;
2341}
2342
2343static int e100_set_ringparam(struct net_device *netdev,
2344 struct ethtool_ringparam *ring)
2345{
2346 struct nic *nic = netdev_priv(netdev);
2347 struct param_range *rfds = &nic->params.rfds;
2348 struct param_range *cbs = &nic->params.cbs;
2349
05479938 2350 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2351 return -EINVAL;
2352
2353 if(netif_running(netdev))
2354 e100_down(nic);
2355 rfds->count = max(ring->rx_pending, rfds->min);
2356 rfds->count = min(rfds->count, rfds->max);
2357 cbs->count = max(ring->tx_pending, cbs->min);
2358 cbs->count = min(cbs->count, cbs->max);
2359 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2360 rfds->count, cbs->count);
2361 if(netif_running(netdev))
2362 e100_up(nic);
2363
2364 return 0;
2365}
2366
2367static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2368 "Link test (on/offline)",
2369 "Eeprom test (on/offline)",
2370 "Self test (offline)",
2371 "Mac loopback (offline)",
2372 "Phy loopback (offline)",
2373};
2374#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2375
2376static int e100_diag_test_count(struct net_device *netdev)
2377{
2378 return E100_TEST_LEN;
2379}
2380
2381static void e100_diag_test(struct net_device *netdev,
2382 struct ethtool_test *test, u64 *data)
2383{
2384 struct ethtool_cmd cmd;
2385 struct nic *nic = netdev_priv(netdev);
2386 int i, err;
2387
2388 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2389 data[0] = !mii_link_ok(&nic->mii);
2390 data[1] = e100_eeprom_load(nic);
2391 if(test->flags & ETH_TEST_FL_OFFLINE) {
2392
2393 /* save speed, duplex & autoneg settings */
2394 err = mii_ethtool_gset(&nic->mii, &cmd);
2395
2396 if(netif_running(netdev))
2397 e100_down(nic);
2398 data[2] = e100_self_test(nic);
2399 data[3] = e100_loopback_test(nic, lb_mac);
2400 data[4] = e100_loopback_test(nic, lb_phy);
2401
2402 /* restore speed, duplex & autoneg settings */
2403 err = mii_ethtool_sset(&nic->mii, &cmd);
2404
2405 if(netif_running(netdev))
2406 e100_up(nic);
2407 }
2408 for(i = 0; i < E100_TEST_LEN; i++)
2409 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2410
2411 msleep_interruptible(4 * 1000);
1da177e4
LT
2412}
2413
2414static int e100_phys_id(struct net_device *netdev, u32 data)
2415{
2416 struct nic *nic = netdev_priv(netdev);
2417
2418 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2419 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2420 mod_timer(&nic->blink_timer, jiffies);
2421 msleep_interruptible(data * 1000);
2422 del_timer_sync(&nic->blink_timer);
2423 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2424
2425 return 0;
2426}
2427
2428static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2429 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2430 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2431 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2432 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2433 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2434 "tx_heartbeat_errors", "tx_window_errors",
2435 /* device-specific stats */
2436 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2437 "tx_flow_control_pause", "rx_flow_control_pause",
2438 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2439};
2440#define E100_NET_STATS_LEN 21
2441#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2442
2443static int e100_get_stats_count(struct net_device *netdev)
2444{
2445 return E100_STATS_LEN;
2446}
2447
2448static void e100_get_ethtool_stats(struct net_device *netdev,
2449 struct ethtool_stats *stats, u64 *data)
2450{
2451 struct nic *nic = netdev_priv(netdev);
2452 int i;
2453
2454 for(i = 0; i < E100_NET_STATS_LEN; i++)
2455 data[i] = ((unsigned long *)&nic->net_stats)[i];
2456
2457 data[i++] = nic->tx_deferred;
2458 data[i++] = nic->tx_single_collisions;
2459 data[i++] = nic->tx_multiple_collisions;
2460 data[i++] = nic->tx_fc_pause;
2461 data[i++] = nic->rx_fc_pause;
2462 data[i++] = nic->rx_fc_unsupported;
2463 data[i++] = nic->tx_tco_frames;
2464 data[i++] = nic->rx_tco_frames;
2465}
2466
2467static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2468{
2469 switch(stringset) {
2470 case ETH_SS_TEST:
2471 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2472 break;
2473 case ETH_SS_STATS:
2474 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2475 break;
2476 }
2477}
2478
7282d491 2479static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2480 .get_settings = e100_get_settings,
2481 .set_settings = e100_set_settings,
2482 .get_drvinfo = e100_get_drvinfo,
2483 .get_regs_len = e100_get_regs_len,
2484 .get_regs = e100_get_regs,
2485 .get_wol = e100_get_wol,
2486 .set_wol = e100_set_wol,
2487 .get_msglevel = e100_get_msglevel,
2488 .set_msglevel = e100_set_msglevel,
2489 .nway_reset = e100_nway_reset,
2490 .get_link = e100_get_link,
2491 .get_eeprom_len = e100_get_eeprom_len,
2492 .get_eeprom = e100_get_eeprom,
2493 .set_eeprom = e100_set_eeprom,
2494 .get_ringparam = e100_get_ringparam,
2495 .set_ringparam = e100_set_ringparam,
2496 .self_test_count = e100_diag_test_count,
2497 .self_test = e100_diag_test,
2498 .get_strings = e100_get_strings,
2499 .phys_id = e100_phys_id,
2500 .get_stats_count = e100_get_stats_count,
2501 .get_ethtool_stats = e100_get_ethtool_stats,
a92dd923 2502 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2503};
2504
2505static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2506{
2507 struct nic *nic = netdev_priv(netdev);
2508
2509 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2510}
2511
2512static int e100_alloc(struct nic *nic)
2513{
2514 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2515 &nic->dma_addr);
2516 return nic->mem ? 0 : -ENOMEM;
2517}
2518
2519static void e100_free(struct nic *nic)
2520{
2521 if(nic->mem) {
2522 pci_free_consistent(nic->pdev, sizeof(struct mem),
2523 nic->mem, nic->dma_addr);
2524 nic->mem = NULL;
2525 }
2526}
2527
2528static int e100_open(struct net_device *netdev)
2529{
2530 struct nic *nic = netdev_priv(netdev);
2531 int err = 0;
2532
2533 netif_carrier_off(netdev);
2534 if((err = e100_up(nic)))
2535 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2536 return err;
2537}
2538
2539static int e100_close(struct net_device *netdev)
2540{
2541 e100_down(netdev_priv(netdev));
2542 return 0;
2543}
2544
2545static int __devinit e100_probe(struct pci_dev *pdev,
2546 const struct pci_device_id *ent)
2547{
2548 struct net_device *netdev;
2549 struct nic *nic;
2550 int err;
2551
2552 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2553 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2554 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2555 return -ENOMEM;
2556 }
2557
2558 netdev->open = e100_open;
2559 netdev->stop = e100_close;
2560 netdev->hard_start_xmit = e100_xmit_frame;
2561 netdev->get_stats = e100_get_stats;
2562 netdev->set_multicast_list = e100_set_multicast_list;
2563 netdev->set_mac_address = e100_set_mac_address;
2564 netdev->change_mtu = e100_change_mtu;
2565 netdev->do_ioctl = e100_do_ioctl;
2566 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2567 netdev->tx_timeout = e100_tx_timeout;
2568 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2569 netdev->poll = e100_poll;
2570 netdev->weight = E100_NAPI_WEIGHT;
2571#ifdef CONFIG_NET_POLL_CONTROLLER
2572 netdev->poll_controller = e100_netpoll;
2573#endif
0eb5a34c 2574 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2575
2576 nic = netdev_priv(netdev);
2577 nic->netdev = netdev;
2578 nic->pdev = pdev;
2579 nic->msg_enable = (1 << debug) - 1;
2580 pci_set_drvdata(pdev, netdev);
2581
2582 if((err = pci_enable_device(pdev))) {
2583 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2584 goto err_out_free_dev;
2585 }
2586
2587 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2588 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2589 "base address, aborting.\n");
2590 err = -ENODEV;
2591 goto err_out_disable_pdev;
2592 }
2593
2594 if((err = pci_request_regions(pdev, DRV_NAME))) {
2595 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2596 goto err_out_disable_pdev;
2597 }
2598
1e7f0bd8 2599 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2600 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2601 goto err_out_free_res;
2602 }
2603
2604 SET_MODULE_OWNER(netdev);
2605 SET_NETDEV_DEV(netdev, &pdev->dev);
2606
2607 nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
2608 if(!nic->csr) {
2609 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2610 err = -ENOMEM;
2611 goto err_out_free_res;
2612 }
2613
2614 if(ent->driver_data)
2615 nic->flags |= ich;
2616 else
2617 nic->flags &= ~ich;
2618
2619 e100_get_defaults(nic);
2620
1f53367d 2621 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2622 spin_lock_init(&nic->cb_lock);
2623 spin_lock_init(&nic->cmd_lock);
ac7c6669 2624 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2625
2626 /* Reset the device before pci_set_master() in case device is in some
2627 * funky state and has an interrupt pending - hint: we don't have the
2628 * interrupt handler registered yet. */
2629 e100_hw_reset(nic);
2630
2631 pci_set_master(pdev);
2632
2633 init_timer(&nic->watchdog);
2634 nic->watchdog.function = e100_watchdog;
2635 nic->watchdog.data = (unsigned long)nic;
2636 init_timer(&nic->blink_timer);
2637 nic->blink_timer.function = e100_blink_led;
2638 nic->blink_timer.data = (unsigned long)nic;
2639
c4028958 2640 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2641
1da177e4
LT
2642 if((err = e100_alloc(nic))) {
2643 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2644 goto err_out_iounmap;
2645 }
2646
1da177e4
LT
2647 if((err = e100_eeprom_load(nic)))
2648 goto err_out_free;
2649
f92d8728
MC
2650 e100_phy_init(nic);
2651
1da177e4 2652 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923
JL
2653 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2654 if(!is_valid_ether_addr(netdev->perm_addr)) {
1da177e4
LT
2655 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2656 "EEPROM, aborting.\n");
2657 err = -EAGAIN;
2658 goto err_out_free;
2659 }
2660
2661 /* Wol magic packet can be enabled from eeprom */
2662 if((nic->mac >= mac_82558_D101_A4) &&
2663 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2664 nic->flags |= wol_magic;
2665
6bdacb1a 2666 /* ack any pending wake events, disable PME */
3435dbce
JB
2667 err = pci_enable_wake(pdev, 0, 0);
2668 if (err)
2669 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
1da177e4
LT
2670
2671 strcpy(netdev->name, "eth%d");
2672 if((err = register_netdev(netdev))) {
2673 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2674 goto err_out_free;
2675 }
2676
7c7459d1 2677 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
1da177e4 2678 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
7c7459d1 2679 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
1da177e4
LT
2680 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2681 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2682
2683 return 0;
2684
2685err_out_free:
2686 e100_free(nic);
2687err_out_iounmap:
2688 iounmap(nic->csr);
2689err_out_free_res:
2690 pci_release_regions(pdev);
2691err_out_disable_pdev:
2692 pci_disable_device(pdev);
2693err_out_free_dev:
2694 pci_set_drvdata(pdev, NULL);
2695 free_netdev(netdev);
2696 return err;
2697}
2698
2699static void __devexit e100_remove(struct pci_dev *pdev)
2700{
2701 struct net_device *netdev = pci_get_drvdata(pdev);
2702
2703 if(netdev) {
2704 struct nic *nic = netdev_priv(netdev);
2705 unregister_netdev(netdev);
2706 e100_free(nic);
2707 iounmap(nic->csr);
2708 free_netdev(netdev);
2709 pci_release_regions(pdev);
2710 pci_disable_device(pdev);
2711 pci_set_drvdata(pdev, NULL);
2712 }
2713}
2714
e8e82b76 2715#ifdef CONFIG_PM
1da177e4
LT
2716static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2717{
2718 struct net_device *netdev = pci_get_drvdata(pdev);
2719 struct nic *nic = netdev_priv(netdev);
2720
824545e7
AK
2721#ifdef CONFIG_E100_NAPI
2722 if (netif_running(netdev))
2723 netif_poll_disable(nic->netdev);
2724#endif
e8e82b76
AK
2725 del_timer_sync(&nic->watchdog);
2726 netif_carrier_off(nic->netdev);
1da177e4
LT
2727
2728 pci_save_state(pdev);
e8e82b76
AK
2729
2730 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2731 pci_enable_wake(pdev, PCI_D3hot, 1);
2732 pci_enable_wake(pdev, PCI_D3cold, 1);
2733 } else {
2734 pci_enable_wake(pdev, PCI_D3hot, 0);
2735 pci_enable_wake(pdev, PCI_D3cold, 0);
2736 }
975b366a 2737
1da177e4 2738 pci_disable_device(pdev);
e8e82b76 2739 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2740
2741 return 0;
2742}
2743
2744static int e100_resume(struct pci_dev *pdev)
2745{
2746 struct net_device *netdev = pci_get_drvdata(pdev);
2747 struct nic *nic = netdev_priv(netdev);
2748
975b366a 2749 pci_set_power_state(pdev, PCI_D0);
1da177e4 2750 pci_restore_state(pdev);
6bdacb1a 2751 /* ack any pending wake events, disable PME */
975b366a 2752 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2753
2754 netif_device_attach(netdev);
975b366a 2755 if (netif_running(netdev))
1da177e4
LT
2756 e100_up(nic);
2757
2758 return 0;
2759}
975b366a 2760#endif /* CONFIG_PM */
1da177e4 2761
6bdacb1a 2762
d18c3db5 2763static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2764{
e8e82b76
AK
2765 struct net_device *netdev = pci_get_drvdata(pdev);
2766 struct nic *nic = netdev_priv(netdev);
2767
824545e7
AK
2768#ifdef CONFIG_E100_NAPI
2769 if (netif_running(netdev))
2770 netif_poll_disable(nic->netdev);
2771#endif
e8e82b76
AK
2772 del_timer_sync(&nic->watchdog);
2773 netif_carrier_off(nic->netdev);
2774
2775 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2776 pci_enable_wake(pdev, PCI_D3hot, 1);
2777 pci_enable_wake(pdev, PCI_D3cold, 1);
2778 } else {
2779 pci_enable_wake(pdev, PCI_D3hot, 0);
2780 pci_enable_wake(pdev, PCI_D3cold, 0);
2781 }
2782
2783 pci_disable_device(pdev);
2784 pci_set_power_state(pdev, PCI_D3hot);
6bdacb1a
MC
2785}
2786
2cc30492
AK
2787/* ------------------ PCI Error Recovery infrastructure -------------- */
2788/**
2789 * e100_io_error_detected - called when PCI error is detected.
2790 * @pdev: Pointer to PCI device
2791 * @state: The current pci conneection state
2792 */
2793static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2794{
2795 struct net_device *netdev = pci_get_drvdata(pdev);
2796
2797 /* Similar to calling e100_down(), but avoids adpater I/O. */
2798 netdev->stop(netdev);
2799
2800 /* Detach; put netif into state similar to hotplug unplug. */
2801 netif_poll_enable(netdev);
2802 netif_device_detach(netdev);
b1d26f24 2803 pci_disable_device(pdev);
2cc30492
AK
2804
2805 /* Request a slot reset. */
2806 return PCI_ERS_RESULT_NEED_RESET;
2807}
2808
2809/**
2810 * e100_io_slot_reset - called after the pci bus has been reset.
2811 * @pdev: Pointer to PCI device
2812 *
2813 * Restart the card from scratch.
2814 */
2815static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2816{
2817 struct net_device *netdev = pci_get_drvdata(pdev);
2818 struct nic *nic = netdev_priv(netdev);
2819
2820 if (pci_enable_device(pdev)) {
2821 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2822 return PCI_ERS_RESULT_DISCONNECT;
2823 }
2824 pci_set_master(pdev);
2825
2826 /* Only one device per card can do a reset */
2827 if (0 != PCI_FUNC(pdev->devfn))
2828 return PCI_ERS_RESULT_RECOVERED;
2829 e100_hw_reset(nic);
2830 e100_phy_init(nic);
2831
2832 return PCI_ERS_RESULT_RECOVERED;
2833}
2834
2835/**
2836 * e100_io_resume - resume normal operations
2837 * @pdev: Pointer to PCI device
2838 *
2839 * Resume normal operations after an error recovery
2840 * sequence has been completed.
2841 */
2842static void e100_io_resume(struct pci_dev *pdev)
2843{
2844 struct net_device *netdev = pci_get_drvdata(pdev);
2845 struct nic *nic = netdev_priv(netdev);
2846
2847 /* ack any pending wake events, disable PME */
2848 pci_enable_wake(pdev, 0, 0);
2849
2850 netif_device_attach(netdev);
2851 if (netif_running(netdev)) {
2852 e100_open(netdev);
2853 mod_timer(&nic->watchdog, jiffies);
2854 }
2855}
2856
2857static struct pci_error_handlers e100_err_handler = {
2858 .error_detected = e100_io_error_detected,
2859 .slot_reset = e100_io_slot_reset,
2860 .resume = e100_io_resume,
2861};
6bdacb1a 2862
1da177e4
LT
2863static struct pci_driver e100_driver = {
2864 .name = DRV_NAME,
2865 .id_table = e100_id_table,
2866 .probe = e100_probe,
2867 .remove = __devexit_p(e100_remove),
e8e82b76 2868#ifdef CONFIG_PM
975b366a 2869 /* Power Management hooks */
1da177e4
LT
2870 .suspend = e100_suspend,
2871 .resume = e100_resume,
2872#endif
05479938 2873 .shutdown = e100_shutdown,
2cc30492 2874 .err_handler = &e100_err_handler,
1da177e4
LT
2875};
2876
2877static int __init e100_init_module(void)
2878{
2879 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2880 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2881 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2882 }
29917620 2883 return pci_register_driver(&e100_driver);
1da177e4
LT
2884}
2885
2886static void __exit e100_cleanup_module(void)
2887{
2888 pci_unregister_driver(&e100_driver);
2889}
2890
2891module_init(e100_init_module);
2892module_exit(e100_cleanup_module);
This page took 0.321781 seconds and 5 git commands to generate.