[PATCH] e100: Increased delay loop for command blocks
[deliverable/linux.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
118 *
119 * V. Miscellaneous
120 *
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
126 *
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
128 *
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
131 *
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
135 */
136
137#include <linux/config.h>
138#include <linux/module.h>
139#include <linux/moduleparam.h>
140#include <linux/kernel.h>
141#include <linux/types.h>
142#include <linux/slab.h>
143#include <linux/delay.h>
144#include <linux/init.h>
145#include <linux/pci.h>
1e7f0bd8 146#include <linux/dma-mapping.h>
1da177e4
LT
147#include <linux/netdevice.h>
148#include <linux/etherdevice.h>
149#include <linux/mii.h>
150#include <linux/if_vlan.h>
151#include <linux/skbuff.h>
152#include <linux/ethtool.h>
153#include <linux/string.h>
154#include <asm/unaligned.h>
155
156
157#define DRV_NAME "e100"
158#define DRV_EXT "-NAPI"
042e2fb7 159#define DRV_VERSION "3.4.8-k2"DRV_EXT
1da177e4 160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
042e2fb7 161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
1da177e4
LT
162#define PFX DRV_NAME ": "
163
164#define E100_WATCHDOG_PERIOD (2 * HZ)
165#define E100_NAPI_WEIGHT 16
166
167MODULE_DESCRIPTION(DRV_DESCRIPTION);
168MODULE_AUTHOR(DRV_COPYRIGHT);
169MODULE_LICENSE("GPL");
170MODULE_VERSION(DRV_VERSION);
171
172static int debug = 3;
173module_param(debug, int, 0);
174MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
175#define DPRINTK(nlevel, klevel, fmt, args...) \
176 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
177 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
178 __FUNCTION__ , ## args))
179
180#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
181 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
182 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
183static struct pci_device_id e100_id_table[] = {
184 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
185 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
186 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
187 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
188 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
189 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
190 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
191 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
192 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
193 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
194 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
195 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
196 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
197 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
198 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
199 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
200 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
201 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
202 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
203 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
204 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
205 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
206 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
207 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
208 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
209 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
210 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
211 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
212 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
213 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
214 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
215 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
216 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
217 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
218 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
219 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
220 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
221 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
222 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
223 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 224 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
225 { 0, }
226};
227MODULE_DEVICE_TABLE(pci, e100_id_table);
228
229enum mac {
230 mac_82557_D100_A = 0,
231 mac_82557_D100_B = 1,
232 mac_82557_D100_C = 2,
233 mac_82558_D101_A4 = 4,
234 mac_82558_D101_B0 = 5,
235 mac_82559_D101M = 8,
236 mac_82559_D101S = 9,
237 mac_82550_D102 = 12,
238 mac_82550_D102_C = 13,
239 mac_82551_E = 14,
240 mac_82551_F = 15,
241 mac_82551_10 = 16,
242 mac_unknown = 0xFF,
243};
244
245enum phy {
246 phy_100a = 0x000003E0,
247 phy_100c = 0x035002A8,
248 phy_82555_tx = 0x015002A8,
249 phy_nsc_tx = 0x5C002000,
250 phy_82562_et = 0x033002A8,
251 phy_82562_em = 0x032002A8,
252 phy_82562_ek = 0x031002A8,
253 phy_82562_eh = 0x017002A8,
254 phy_unknown = 0xFFFFFFFF,
255};
256
257/* CSR (Control/Status Registers) */
258struct csr {
259 struct {
260 u8 status;
261 u8 stat_ack;
262 u8 cmd_lo;
263 u8 cmd_hi;
264 u32 gen_ptr;
265 } scb;
266 u32 port;
267 u16 flash_ctrl;
268 u8 eeprom_ctrl_lo;
269 u8 eeprom_ctrl_hi;
270 u32 mdi_ctrl;
271 u32 rx_dma_count;
272};
273
274enum scb_status {
275 rus_ready = 0x10,
276 rus_mask = 0x3C,
277};
278
1f53367d
MC
279enum ru_state {
280 RU_SUSPENDED = 0,
281 RU_RUNNING = 1,
282 RU_UNINITIALIZED = -1,
283};
284
1da177e4
LT
285enum scb_stat_ack {
286 stat_ack_not_ours = 0x00,
287 stat_ack_sw_gen = 0x04,
288 stat_ack_rnr = 0x10,
289 stat_ack_cu_idle = 0x20,
290 stat_ack_frame_rx = 0x40,
291 stat_ack_cu_cmd_done = 0x80,
292 stat_ack_not_present = 0xFF,
293 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
294 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
295};
296
297enum scb_cmd_hi {
298 irq_mask_none = 0x00,
299 irq_mask_all = 0x01,
300 irq_sw_gen = 0x02,
301};
302
303enum scb_cmd_lo {
304 cuc_nop = 0x00,
305 ruc_start = 0x01,
306 ruc_load_base = 0x06,
307 cuc_start = 0x10,
308 cuc_resume = 0x20,
309 cuc_dump_addr = 0x40,
310 cuc_dump_stats = 0x50,
311 cuc_load_base = 0x60,
312 cuc_dump_reset = 0x70,
313};
314
315enum cuc_dump {
316 cuc_dump_complete = 0x0000A005,
317 cuc_dump_reset_complete = 0x0000A007,
318};
319
320enum port {
321 software_reset = 0x0000,
322 selftest = 0x0001,
323 selective_reset = 0x0002,
324};
325
326enum eeprom_ctrl_lo {
327 eesk = 0x01,
328 eecs = 0x02,
329 eedi = 0x04,
330 eedo = 0x08,
331};
332
333enum mdi_ctrl {
334 mdi_write = 0x04000000,
335 mdi_read = 0x08000000,
336 mdi_ready = 0x10000000,
337};
338
339enum eeprom_op {
340 op_write = 0x05,
341 op_read = 0x06,
342 op_ewds = 0x10,
343 op_ewen = 0x13,
344};
345
346enum eeprom_offsets {
347 eeprom_cnfg_mdix = 0x03,
348 eeprom_id = 0x0A,
349 eeprom_config_asf = 0x0D,
350 eeprom_smbus_addr = 0x90,
351};
352
353enum eeprom_cnfg_mdix {
354 eeprom_mdix_enabled = 0x0080,
355};
356
357enum eeprom_id {
358 eeprom_id_wol = 0x0020,
359};
360
361enum eeprom_config_asf {
362 eeprom_asf = 0x8000,
363 eeprom_gcl = 0x4000,
364};
365
366enum cb_status {
367 cb_complete = 0x8000,
368 cb_ok = 0x2000,
369};
370
371enum cb_command {
372 cb_nop = 0x0000,
373 cb_iaaddr = 0x0001,
374 cb_config = 0x0002,
375 cb_multi = 0x0003,
376 cb_tx = 0x0004,
377 cb_ucode = 0x0005,
378 cb_dump = 0x0006,
379 cb_tx_sf = 0x0008,
380 cb_cid = 0x1f00,
381 cb_i = 0x2000,
382 cb_s = 0x4000,
383 cb_el = 0x8000,
384};
385
386struct rfd {
387 u16 status;
388 u16 command;
389 u32 link;
390 u32 rbd;
391 u16 actual_size;
392 u16 size;
393};
394
395struct rx {
396 struct rx *next, *prev;
397 struct sk_buff *skb;
398 dma_addr_t dma_addr;
399};
400
401#if defined(__BIG_ENDIAN_BITFIELD)
402#define X(a,b) b,a
403#else
404#define X(a,b) a,b
405#endif
406struct config {
407/*0*/ u8 X(byte_count:6, pad0:2);
408/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
409/*2*/ u8 adaptive_ifs;
410/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
411 term_write_cache_line:1), pad3:4);
412/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
413/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
414/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
415 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
416 rx_discard_overruns:1), rx_save_bad_frames:1);
417/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
418 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
419 tx_dynamic_tbd:1);
420/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
421/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
422 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
423/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
424 loopback:2);
425/*11*/ u8 X(linear_priority:3, pad11:5);
426/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
427/*13*/ u8 ip_addr_lo;
428/*14*/ u8 ip_addr_hi;
429/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
430 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
431 pad15_2:1), crs_or_cdt:1);
432/*16*/ u8 fc_delay_lo;
433/*17*/ u8 fc_delay_hi;
434/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
435 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
436/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
437 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
438 full_duplex_force:1), full_duplex_pin:1);
439/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
440/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
441/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
442 u8 pad_d102[9];
443};
444
445#define E100_MAX_MULTICAST_ADDRS 64
446struct multi {
447 u16 count;
448 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
449};
450
451/* Important: keep total struct u32-aligned */
452#define UCODE_SIZE 134
453struct cb {
454 u16 status;
455 u16 command;
456 u32 link;
457 union {
458 u8 iaaddr[ETH_ALEN];
459 u32 ucode[UCODE_SIZE];
460 struct config config;
461 struct multi multi;
462 struct {
463 u32 tbd_array;
464 u16 tcb_byte_count;
465 u8 threshold;
466 u8 tbd_count;
467 struct {
468 u32 buf_addr;
469 u16 size;
470 u16 eol;
471 } tbd;
472 } tcb;
473 u32 dump_buffer_addr;
474 } u;
475 struct cb *next, *prev;
476 dma_addr_t dma_addr;
477 struct sk_buff *skb;
478};
479
480enum loopback {
481 lb_none = 0, lb_mac = 1, lb_phy = 3,
482};
483
484struct stats {
485 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
486 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
487 tx_multiple_collisions, tx_total_collisions;
488 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
489 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
490 rx_short_frame_errors;
491 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
492 u16 xmt_tco_frames, rcv_tco_frames;
493 u32 complete;
494};
495
496struct mem {
497 struct {
498 u32 signature;
499 u32 result;
500 } selftest;
501 struct stats stats;
502 u8 dump_buf[596];
503};
504
505struct param_range {
506 u32 min;
507 u32 max;
508 u32 count;
509};
510
511struct params {
512 struct param_range rfds;
513 struct param_range cbs;
514};
515
516struct nic {
517 /* Begin: frequently used values: keep adjacent for cache effect */
518 u32 msg_enable ____cacheline_aligned;
519 struct net_device *netdev;
520 struct pci_dev *pdev;
521
522 struct rx *rxs ____cacheline_aligned;
523 struct rx *rx_to_use;
524 struct rx *rx_to_clean;
525 struct rfd blank_rfd;
1f53367d 526 enum ru_state ru_running;
1da177e4
LT
527
528 spinlock_t cb_lock ____cacheline_aligned;
529 spinlock_t cmd_lock;
530 struct csr __iomem *csr;
531 enum scb_cmd_lo cuc_cmd;
532 unsigned int cbs_avail;
533 struct cb *cbs;
534 struct cb *cb_to_use;
535 struct cb *cb_to_send;
536 struct cb *cb_to_clean;
537 u16 tx_command;
538 /* End: frequently used values: keep adjacent for cache effect */
539
540 enum {
541 ich = (1 << 0),
542 promiscuous = (1 << 1),
543 multicast_all = (1 << 2),
544 wol_magic = (1 << 3),
545 ich_10h_workaround = (1 << 4),
546 } flags ____cacheline_aligned;
547
548 enum mac mac;
549 enum phy phy;
550 struct params params;
551 struct net_device_stats net_stats;
552 struct timer_list watchdog;
553 struct timer_list blink_timer;
554 struct mii_if_info mii;
2acdb1e0 555 struct work_struct tx_timeout_task;
1da177e4
LT
556 enum loopback loopback;
557
558 struct mem *mem;
559 dma_addr_t dma_addr;
560
561 dma_addr_t cbs_dma_addr;
562 u8 adaptive_ifs;
563 u8 tx_threshold;
564 u32 tx_frames;
565 u32 tx_collisions;
566 u32 tx_deferred;
567 u32 tx_single_collisions;
568 u32 tx_multiple_collisions;
569 u32 tx_fc_pause;
570 u32 tx_tco_frames;
571
572 u32 rx_fc_pause;
573 u32 rx_fc_unsupported;
574 u32 rx_tco_frames;
575 u32 rx_over_length_errors;
576
577 u8 rev_id;
578 u16 leds;
579 u16 eeprom_wc;
580 u16 eeprom[256];
581};
582
583static inline void e100_write_flush(struct nic *nic)
584{
585 /* Flush previous PCI writes through intermediate bridges
586 * by doing a benign read */
587 (void)readb(&nic->csr->scb.status);
588}
589
590static inline void e100_enable_irq(struct nic *nic)
591{
592 unsigned long flags;
593
594 spin_lock_irqsave(&nic->cmd_lock, flags);
595 writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
596 spin_unlock_irqrestore(&nic->cmd_lock, flags);
597 e100_write_flush(nic);
598}
599
600static inline void e100_disable_irq(struct nic *nic)
601{
602 unsigned long flags;
603
604 spin_lock_irqsave(&nic->cmd_lock, flags);
605 writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
606 spin_unlock_irqrestore(&nic->cmd_lock, flags);
607 e100_write_flush(nic);
608}
609
610static void e100_hw_reset(struct nic *nic)
611{
612 /* Put CU and RU into idle with a selective reset to get
613 * device off of PCI bus */
614 writel(selective_reset, &nic->csr->port);
615 e100_write_flush(nic); udelay(20);
616
617 /* Now fully reset device */
618 writel(software_reset, &nic->csr->port);
619 e100_write_flush(nic); udelay(20);
620
621 /* Mask off our interrupt line - it's unmasked after reset */
622 e100_disable_irq(nic);
623}
624
625static int e100_self_test(struct nic *nic)
626{
627 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
628
629 /* Passing the self-test is a pretty good indication
630 * that the device can DMA to/from host memory */
631
632 nic->mem->selftest.signature = 0;
633 nic->mem->selftest.result = 0xFFFFFFFF;
634
635 writel(selftest | dma_addr, &nic->csr->port);
636 e100_write_flush(nic);
637 /* Wait 10 msec for self-test to complete */
638 msleep(10);
639
640 /* Interrupts are enabled after self-test */
641 e100_disable_irq(nic);
642
643 /* Check results of self-test */
644 if(nic->mem->selftest.result != 0) {
645 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
646 nic->mem->selftest.result);
647 return -ETIMEDOUT;
648 }
649 if(nic->mem->selftest.signature == 0) {
650 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
651 return -ETIMEDOUT;
652 }
653
654 return 0;
655}
656
657static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
658{
659 u32 cmd_addr_data[3];
660 u8 ctrl;
661 int i, j;
662
663 /* Three cmds: write/erase enable, write data, write/erase disable */
664 cmd_addr_data[0] = op_ewen << (addr_len - 2);
665 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
666 cpu_to_le16(data);
667 cmd_addr_data[2] = op_ewds << (addr_len - 2);
668
669 /* Bit-bang cmds to write word to eeprom */
670 for(j = 0; j < 3; j++) {
671
672 /* Chip select */
673 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
674 e100_write_flush(nic); udelay(4);
675
676 for(i = 31; i >= 0; i--) {
677 ctrl = (cmd_addr_data[j] & (1 << i)) ?
678 eecs | eedi : eecs;
679 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
680 e100_write_flush(nic); udelay(4);
681
682 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
683 e100_write_flush(nic); udelay(4);
684 }
685 /* Wait 10 msec for cmd to complete */
686 msleep(10);
687
688 /* Chip deselect */
689 writeb(0, &nic->csr->eeprom_ctrl_lo);
690 e100_write_flush(nic); udelay(4);
691 }
692};
693
694/* General technique stolen from the eepro100 driver - very clever */
695static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
696{
697 u32 cmd_addr_data;
698 u16 data = 0;
699 u8 ctrl;
700 int i;
701
702 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
703
704 /* Chip select */
705 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
706 e100_write_flush(nic); udelay(4);
707
708 /* Bit-bang to read word from eeprom */
709 for(i = 31; i >= 0; i--) {
710 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
711 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
712 e100_write_flush(nic); udelay(4);
713
714 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
715 e100_write_flush(nic); udelay(4);
716
717 /* Eeprom drives a dummy zero to EEDO after receiving
718 * complete address. Use this to adjust addr_len. */
719 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
720 if(!(ctrl & eedo) && i > 16) {
721 *addr_len -= (i - 16);
722 i = 17;
723 }
724
725 data = (data << 1) | (ctrl & eedo ? 1 : 0);
726 }
727
728 /* Chip deselect */
729 writeb(0, &nic->csr->eeprom_ctrl_lo);
730 e100_write_flush(nic); udelay(4);
731
732 return le16_to_cpu(data);
733};
734
735/* Load entire EEPROM image into driver cache and validate checksum */
736static int e100_eeprom_load(struct nic *nic)
737{
738 u16 addr, addr_len = 8, checksum = 0;
739
740 /* Try reading with an 8-bit addr len to discover actual addr len */
741 e100_eeprom_read(nic, &addr_len, 0);
742 nic->eeprom_wc = 1 << addr_len;
743
744 for(addr = 0; addr < nic->eeprom_wc; addr++) {
745 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
746 if(addr < nic->eeprom_wc - 1)
747 checksum += cpu_to_le16(nic->eeprom[addr]);
748 }
749
750 /* The checksum, stored in the last word, is calculated such that
751 * the sum of words should be 0xBABA */
752 checksum = le16_to_cpu(0xBABA - checksum);
753 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
754 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
755 return -EAGAIN;
756 }
757
758 return 0;
759}
760
761/* Save (portion of) driver EEPROM cache to device and update checksum */
762static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
763{
764 u16 addr, addr_len = 8, checksum = 0;
765
766 /* Try reading with an 8-bit addr len to discover actual addr len */
767 e100_eeprom_read(nic, &addr_len, 0);
768 nic->eeprom_wc = 1 << addr_len;
769
770 if(start + count >= nic->eeprom_wc)
771 return -EINVAL;
772
773 for(addr = start; addr < start + count; addr++)
774 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
775
776 /* The checksum, stored in the last word, is calculated such that
777 * the sum of words should be 0xBABA */
778 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
779 checksum += cpu_to_le16(nic->eeprom[addr]);
780 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
781 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
782 nic->eeprom[nic->eeprom_wc - 1]);
783
784 return 0;
785}
786
962082b6 787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 788#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
1da177e4
LT
789static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
790{
791 unsigned long flags;
792 unsigned int i;
793 int err = 0;
794
795 spin_lock_irqsave(&nic->cmd_lock, flags);
796
797 /* Previous command is accepted when SCB clears */
798 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
799 if(likely(!readb(&nic->csr->scb.cmd_lo)))
800 break;
801 cpu_relax();
e6280f26 802 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
803 udelay(5);
804 }
805 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
806 err = -EAGAIN;
807 goto err_unlock;
808 }
809
810 if(unlikely(cmd != cuc_resume))
811 writel(dma_addr, &nic->csr->scb.gen_ptr);
812 writeb(cmd, &nic->csr->scb.cmd_lo);
813
814err_unlock:
815 spin_unlock_irqrestore(&nic->cmd_lock, flags);
816
817 return err;
818}
819
820static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
821 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
822{
823 struct cb *cb;
824 unsigned long flags;
825 int err = 0;
826
827 spin_lock_irqsave(&nic->cb_lock, flags);
828
829 if(unlikely(!nic->cbs_avail)) {
830 err = -ENOMEM;
831 goto err_unlock;
832 }
833
834 cb = nic->cb_to_use;
835 nic->cb_to_use = cb->next;
836 nic->cbs_avail--;
837 cb->skb = skb;
838
839 if(unlikely(!nic->cbs_avail))
840 err = -ENOSPC;
841
842 cb_prepare(nic, cb, skb);
843
844 /* Order is important otherwise we'll be in a race with h/w:
845 * set S-bit in current first, then clear S-bit in previous. */
846 cb->command |= cpu_to_le16(cb_s);
847 wmb();
848 cb->prev->command &= cpu_to_le16(~cb_s);
849
850 while(nic->cb_to_send != nic->cb_to_use) {
851 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
852 nic->cb_to_send->dma_addr))) {
853 /* Ok, here's where things get sticky. It's
854 * possible that we can't schedule the command
855 * because the controller is too busy, so
856 * let's just queue the command and try again
857 * when another command is scheduled. */
962082b6
MC
858 if(err == -ENOSPC) {
859 //request a reset
860 schedule_work(&nic->tx_timeout_task);
861 }
1da177e4
LT
862 break;
863 } else {
864 nic->cuc_cmd = cuc_resume;
865 nic->cb_to_send = nic->cb_to_send->next;
866 }
867 }
868
869err_unlock:
870 spin_unlock_irqrestore(&nic->cb_lock, flags);
871
872 return err;
873}
874
875static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
876{
877 u32 data_out = 0;
878 unsigned int i;
879
880 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
881
882 for(i = 0; i < 100; i++) {
883 udelay(20);
884 if((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
885 break;
886 }
887
888 DPRINTK(HW, DEBUG,
889 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
890 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
891 return (u16)data_out;
892}
893
894static int mdio_read(struct net_device *netdev, int addr, int reg)
895{
896 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
897}
898
899static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
900{
901 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
902}
903
904static void e100_get_defaults(struct nic *nic)
905{
962082b6 906 struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
1da177e4
LT
907 struct param_range cbs = { .min = 64, .max = 256, .count = 64 };
908
909 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
910 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
911 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
912 if(nic->mac == mac_unknown)
913 nic->mac = mac_82557_D100_A;
914
915 nic->params.rfds = rfds;
916 nic->params.cbs = cbs;
917
918 /* Quadwords to DMA into FIFO before starting frame transmit */
919 nic->tx_threshold = 0xE0;
920
962082b6
MC
921 /* no interrupt for every tx completion, delay = 256us if not 557*/
922 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
923 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
924
925 /* Template for a freshly allocated RFD */
926 nic->blank_rfd.command = cpu_to_le16(cb_el);
927 nic->blank_rfd.rbd = 0xFFFFFFFF;
928 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
929
930 /* MII setup */
931 nic->mii.phy_id_mask = 0x1F;
932 nic->mii.reg_num_mask = 0x1F;
933 nic->mii.dev = nic->netdev;
934 nic->mii.mdio_read = mdio_read;
935 nic->mii.mdio_write = mdio_write;
936}
937
938static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
939{
940 struct config *config = &cb->u.config;
941 u8 *c = (u8 *)config;
942
943 cb->command = cpu_to_le16(cb_config);
944
945 memset(config, 0, sizeof(struct config));
946
947 config->byte_count = 0x16; /* bytes in this struct */
948 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
949 config->direct_rx_dma = 0x1; /* reserved */
950 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
951 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
952 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
953 config->tx_underrun_retry = 0x3; /* # of underrun retries */
954 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
955 config->pad10 = 0x6;
956 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
957 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
958 config->ifs = 0x6; /* x16 = inter frame spacing */
959 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
960 config->pad15_1 = 0x1;
961 config->pad15_2 = 0x1;
962 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
963 config->fc_delay_hi = 0x40; /* time delay for fc frame */
964 config->tx_padding = 0x1; /* 1=pad short frames */
965 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
966 config->pad18 = 0x1;
967 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
968 config->pad20_1 = 0x1F;
969 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
970 config->pad21_1 = 0x5;
971
972 config->adaptive_ifs = nic->adaptive_ifs;
973 config->loopback = nic->loopback;
974
975 if(nic->mii.force_media && nic->mii.full_duplex)
976 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
977
978 if(nic->flags & promiscuous || nic->loopback) {
979 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
980 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
981 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
982 }
983
984 if(nic->flags & multicast_all)
985 config->multicast_all = 0x1; /* 1=accept, 0=no */
986
6bdacb1a
MC
987 /* disable WoL when up */
988 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
989 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
990
991 if(nic->mac >= mac_82558_D101_A4) {
992 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
993 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
994 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
995 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
996 if(nic->mac >= mac_82559_D101M)
997 config->tno_intr = 0x1; /* TCO stats enable */
998 else
999 config->standard_stat_counter = 0x0;
1000 }
1001
1002 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1003 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1004 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1005 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1006 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1007 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1008}
1009
1010static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1011{
1012 int i;
1013 static const u32 ucode[UCODE_SIZE] = {
1014 /* NFS packets are misinterpreted as TCO packets and
1015 * incorrectly routed to the BMC over SMBus. This
1016 * microcode patch checks the fragmented IP bit in the
1017 * NFS/UDP header to distinguish between NFS and TCO. */
1018 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF,
1019 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000,
1020 0x00906EFD, 0x00900EFD, 0x00E00EF8,
1021 };
1022
1023 if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1024 for(i = 0; i < UCODE_SIZE; i++)
1025 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1026 cb->command = cpu_to_le16(cb_ucode);
1027 } else
1028 cb->command = cpu_to_le16(cb_nop);
1029}
1030
1031static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1032 struct sk_buff *skb)
1033{
1034 cb->command = cpu_to_le16(cb_iaaddr);
1035 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1036}
1037
1038static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1039{
1040 cb->command = cpu_to_le16(cb_dump);
1041 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1042 offsetof(struct mem, dump_buf));
1043}
1044
1045#define NCONFIG_AUTO_SWITCH 0x0080
1046#define MII_NSC_CONG MII_RESV1
1047#define NSC_CONG_ENABLE 0x0100
1048#define NSC_CONG_TXREADY 0x0400
1049#define ADVERTISE_FC_SUPPORTED 0x0400
1050static int e100_phy_init(struct nic *nic)
1051{
1052 struct net_device *netdev = nic->netdev;
1053 u32 addr;
1054 u16 bmcr, stat, id_lo, id_hi, cong;
1055
1056 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1057 for(addr = 0; addr < 32; addr++) {
1058 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1059 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1060 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1061 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1062 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1063 break;
1064 }
1065 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1066 if(addr == 32)
1067 return -EAGAIN;
1068
1069 /* Selected the phy and isolate the rest */
1070 for(addr = 0; addr < 32; addr++) {
1071 if(addr != nic->mii.phy_id) {
1072 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1073 } else {
1074 bmcr = mdio_read(netdev, addr, MII_BMCR);
1075 mdio_write(netdev, addr, MII_BMCR,
1076 bmcr & ~BMCR_ISOLATE);
1077 }
1078 }
1079
1080 /* Get phy ID */
1081 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1082 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1083 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1084 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1085
1086 /* Handle National tx phys */
1087#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1088 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1089 /* Disable congestion control */
1090 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1091 cong |= NSC_CONG_TXREADY;
1092 cong &= ~NSC_CONG_ENABLE;
1093 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1094 }
1095
1096 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
64895145
MC
1097 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
1098 /* enable/disable MDI/MDI-X auto-switching.
1099 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
1100 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
1101 (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
1102 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
1103 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
1104 else
1105 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
1106 }
1da177e4
LT
1107
1108 return 0;
1109}
1110
1111static int e100_hw_init(struct nic *nic)
1112{
1113 int err;
1114
1115 e100_hw_reset(nic);
1116
1117 DPRINTK(HW, ERR, "e100_hw_init\n");
1118 if(!in_interrupt() && (err = e100_self_test(nic)))
1119 return err;
1120
1121 if((err = e100_phy_init(nic)))
1122 return err;
1123 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1124 return err;
1125 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1126 return err;
1127 if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
1128 return err;
1129 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1130 return err;
1131 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1132 return err;
1133 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1134 nic->dma_addr + offsetof(struct mem, stats))))
1135 return err;
1136 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1137 return err;
1138
1139 e100_disable_irq(nic);
1140
1141 return 0;
1142}
1143
1144static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1145{
1146 struct net_device *netdev = nic->netdev;
1147 struct dev_mc_list *list = netdev->mc_list;
1148 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1149
1150 cb->command = cpu_to_le16(cb_multi);
1151 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1152 for(i = 0; list && i < count; i++, list = list->next)
1153 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1154 ETH_ALEN);
1155}
1156
1157static void e100_set_multicast_list(struct net_device *netdev)
1158{
1159 struct nic *nic = netdev_priv(netdev);
1160
1161 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1162 netdev->mc_count, netdev->flags);
1163
1164 if(netdev->flags & IFF_PROMISC)
1165 nic->flags |= promiscuous;
1166 else
1167 nic->flags &= ~promiscuous;
1168
1169 if(netdev->flags & IFF_ALLMULTI ||
1170 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1171 nic->flags |= multicast_all;
1172 else
1173 nic->flags &= ~multicast_all;
1174
1175 e100_exec_cb(nic, NULL, e100_configure);
1176 e100_exec_cb(nic, NULL, e100_multi);
1177}
1178
1179static void e100_update_stats(struct nic *nic)
1180{
1181 struct net_device_stats *ns = &nic->net_stats;
1182 struct stats *s = &nic->mem->stats;
1183 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1184 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1185 &s->complete;
1186
1187 /* Device's stats reporting may take several microseconds to
1188 * complete, so where always waiting for results of the
1189 * previous command. */
1190
1191 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1192 *complete = 0;
1193 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1194 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1195 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1196 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1197 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1198 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1199 ns->collisions += nic->tx_collisions;
1200 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1201 le32_to_cpu(s->tx_lost_crs);
1202 ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
1203 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1204 nic->rx_over_length_errors;
1205 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1206 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1207 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1208 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1209 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1210 le32_to_cpu(s->rx_alignment_errors) +
1211 le32_to_cpu(s->rx_short_frame_errors) +
1212 le32_to_cpu(s->rx_cdt_errors);
1213 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1214 nic->tx_single_collisions +=
1215 le32_to_cpu(s->tx_single_collisions);
1216 nic->tx_multiple_collisions +=
1217 le32_to_cpu(s->tx_multiple_collisions);
1218 if(nic->mac >= mac_82558_D101_A4) {
1219 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1220 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1221 nic->rx_fc_unsupported +=
1222 le32_to_cpu(s->fc_rcv_unsupported);
1223 if(nic->mac >= mac_82559_D101M) {
1224 nic->tx_tco_frames +=
1225 le16_to_cpu(s->xmt_tco_frames);
1226 nic->rx_tco_frames +=
1227 le16_to_cpu(s->rcv_tco_frames);
1228 }
1229 }
1230 }
1231
1f53367d
MC
1232
1233 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1234 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1235}
1236
1237static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1238{
1239 /* Adjust inter-frame-spacing (IFS) between two transmits if
1240 * we're getting collisions on a half-duplex connection. */
1241
1242 if(duplex == DUPLEX_HALF) {
1243 u32 prev = nic->adaptive_ifs;
1244 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1245
1246 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1247 (nic->tx_frames > min_frames)) {
1248 if(nic->adaptive_ifs < 60)
1249 nic->adaptive_ifs += 5;
1250 } else if (nic->tx_frames < min_frames) {
1251 if(nic->adaptive_ifs >= 5)
1252 nic->adaptive_ifs -= 5;
1253 }
1254 if(nic->adaptive_ifs != prev)
1255 e100_exec_cb(nic, NULL, e100_configure);
1256 }
1257}
1258
1259static void e100_watchdog(unsigned long data)
1260{
1261 struct nic *nic = (struct nic *)data;
1262 struct ethtool_cmd cmd;
1263
1264 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1265
1266 /* mii library handles link maintenance tasks */
1267
1268 mii_ethtool_gset(&nic->mii, &cmd);
1269
1270 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1271 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1272 cmd.speed == SPEED_100 ? "100" : "10",
1273 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1274 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1275 DPRINTK(LINK, INFO, "link down\n");
1276 }
1277
1278 mii_check_link(&nic->mii);
1279
1280 /* Software generated interrupt to recover from (rare) Rx
1281 * allocation failure.
1282 * Unfortunately have to use a spinlock to not re-enable interrupts
1283 * accidentally, due to hardware that shares a register between the
1284 * interrupt mask bit and the SW Interrupt generation bit */
1285 spin_lock_irq(&nic->cmd_lock);
1286 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1287 spin_unlock_irq(&nic->cmd_lock);
1288 e100_write_flush(nic);
1289
1290 e100_update_stats(nic);
1291 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1292
1293 if(nic->mac <= mac_82557_D100_C)
1294 /* Issue a multicast command to workaround a 557 lock up */
1295 e100_set_multicast_list(nic->netdev);
1296
1297 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1298 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1299 nic->flags |= ich_10h_workaround;
1300 else
1301 nic->flags &= ~ich_10h_workaround;
1302
1303 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1304}
1305
1306static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1307 struct sk_buff *skb)
1308{
1309 cb->command = nic->tx_command;
962082b6 1310 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1311 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1312 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1313 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1314 cb->u.tcb.tcb_byte_count = 0;
1315 cb->u.tcb.threshold = nic->tx_threshold;
1316 cb->u.tcb.tbd_count = 1;
1317 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1318 skb->data, skb->len, PCI_DMA_TODEVICE));
962082b6 1319 // check for mapping failure?
1da177e4
LT
1320 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1321}
1322
1323static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1324{
1325 struct nic *nic = netdev_priv(netdev);
1326 int err;
1327
1328 if(nic->flags & ich_10h_workaround) {
1329 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1330 Issue a NOP command followed by a 1us delay before
1331 issuing the Tx command. */
1f53367d
MC
1332 if(e100_exec_cmd(nic, cuc_nop, 0))
1333 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1334 udelay(1);
1335 }
1336
1337 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1338
1339 switch(err) {
1340 case -ENOSPC:
1341 /* We queued the skb, but now we're out of space. */
1342 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1343 netif_stop_queue(netdev);
1344 break;
1345 case -ENOMEM:
1346 /* This is a hard error - log it. */
1347 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1348 netif_stop_queue(netdev);
1349 return 1;
1350 }
1351
1352 netdev->trans_start = jiffies;
1353 return 0;
1354}
1355
1356static inline int e100_tx_clean(struct nic *nic)
1357{
1358 struct cb *cb;
1359 int tx_cleaned = 0;
1360
1361 spin_lock(&nic->cb_lock);
1362
1363 DPRINTK(TX_DONE, DEBUG, "cb->status = 0x%04X\n",
1364 nic->cb_to_clean->status);
1365
1366 /* Clean CBs marked complete */
1367 for(cb = nic->cb_to_clean;
1368 cb->status & cpu_to_le16(cb_complete);
1369 cb = nic->cb_to_clean = cb->next) {
1370 if(likely(cb->skb != NULL)) {
1371 nic->net_stats.tx_packets++;
1372 nic->net_stats.tx_bytes += cb->skb->len;
1373
1374 pci_unmap_single(nic->pdev,
1375 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1376 le16_to_cpu(cb->u.tcb.tbd.size),
1377 PCI_DMA_TODEVICE);
1378 dev_kfree_skb_any(cb->skb);
1379 cb->skb = NULL;
1380 tx_cleaned = 1;
1381 }
1382 cb->status = 0;
1383 nic->cbs_avail++;
1384 }
1385
1386 spin_unlock(&nic->cb_lock);
1387
1388 /* Recover from running out of Tx resources in xmit_frame */
1389 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1390 netif_wake_queue(nic->netdev);
1391
1392 return tx_cleaned;
1393}
1394
1395static void e100_clean_cbs(struct nic *nic)
1396{
1397 if(nic->cbs) {
1398 while(nic->cbs_avail != nic->params.cbs.count) {
1399 struct cb *cb = nic->cb_to_clean;
1400 if(cb->skb) {
1401 pci_unmap_single(nic->pdev,
1402 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1403 le16_to_cpu(cb->u.tcb.tbd.size),
1404 PCI_DMA_TODEVICE);
1405 dev_kfree_skb(cb->skb);
1406 }
1407 nic->cb_to_clean = nic->cb_to_clean->next;
1408 nic->cbs_avail++;
1409 }
1410 pci_free_consistent(nic->pdev,
1411 sizeof(struct cb) * nic->params.cbs.count,
1412 nic->cbs, nic->cbs_dma_addr);
1413 nic->cbs = NULL;
1414 nic->cbs_avail = 0;
1415 }
1416 nic->cuc_cmd = cuc_start;
1417 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1418 nic->cbs;
1419}
1420
1421static int e100_alloc_cbs(struct nic *nic)
1422{
1423 struct cb *cb;
1424 unsigned int i, count = nic->params.cbs.count;
1425
1426 nic->cuc_cmd = cuc_start;
1427 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1428 nic->cbs_avail = 0;
1429
1430 nic->cbs = pci_alloc_consistent(nic->pdev,
1431 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1432 if(!nic->cbs)
1433 return -ENOMEM;
1434
1435 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1436 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1437 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1438
1439 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1440 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1441 ((i+1) % count) * sizeof(struct cb));
1442 cb->skb = NULL;
1443 }
1444
1445 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1446 nic->cbs_avail = count;
1447
1448 return 0;
1449}
1450
1f53367d 1451static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1452{
1f53367d
MC
1453 if(!nic->rxs) return;
1454 if(RU_SUSPENDED != nic->ru_running) return;
1455
1456 /* handle init time starts */
1457 if(!rx) rx = nic->rxs;
1458
1da177e4 1459 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1f53367d
MC
1460 if(rx->skb) {
1461 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1462 nic->ru_running = RU_RUNNING;
1da177e4
LT
1463 }
1464}
1465
1466#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1467static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1468{
1469 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN)))
1470 return -ENOMEM;
1471
1472 /* Align, init, and map the RFD. */
1473 rx->skb->dev = nic->netdev;
1474 skb_reserve(rx->skb, NET_IP_ALIGN);
1475 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
1476 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1477 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1478
1f53367d
MC
1479 if(pci_dma_mapping_error(rx->dma_addr)) {
1480 dev_kfree_skb_any(rx->skb);
1481 rx->skb = 0;
1482 rx->dma_addr = 0;
1483 return -ENOMEM;
1484 }
1485
1da177e4
LT
1486 /* Link the RFD to end of RFA by linking previous RFD to
1487 * this one, and clearing EL bit of previous. */
1488 if(rx->prev->skb) {
1489 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1490 put_unaligned(cpu_to_le32(rx->dma_addr),
1491 (u32 *)&prev_rfd->link);
1492 wmb();
1493 prev_rfd->command &= ~cpu_to_le16(cb_el);
1494 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1495 sizeof(struct rfd), PCI_DMA_TODEVICE);
1496 }
1497
1498 return 0;
1499}
1500
1501static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1502 unsigned int *work_done, unsigned int work_to_do)
1503{
1504 struct sk_buff *skb = rx->skb;
1505 struct rfd *rfd = (struct rfd *)skb->data;
1506 u16 rfd_status, actual_size;
1507
1508 if(unlikely(work_done && *work_done >= work_to_do))
1509 return -EAGAIN;
1510
1511 /* Need to sync before taking a peek at cb_complete bit */
1512 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1513 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1514 rfd_status = le16_to_cpu(rfd->status);
1515
1516 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1517
1518 /* If data isn't ready, nothing to indicate */
1519 if(unlikely(!(rfd_status & cb_complete)))
1f53367d 1520 return -ENODATA;
1da177e4
LT
1521
1522 /* Get actual data size */
1523 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1524 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1525 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1526
1527 /* Get data */
1528 pci_unmap_single(nic->pdev, rx->dma_addr,
1529 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1530
1f53367d
MC
1531 /* this allows for a fast restart without re-enabling interrupts */
1532 if(le16_to_cpu(rfd->command) & cb_el)
1533 nic->ru_running = RU_SUSPENDED;
1534
1da177e4
LT
1535 /* Pull off the RFD and put the actual data (minus eth hdr) */
1536 skb_reserve(skb, sizeof(struct rfd));
1537 skb_put(skb, actual_size);
1538 skb->protocol = eth_type_trans(skb, nic->netdev);
1539
1540 if(unlikely(!(rfd_status & cb_ok))) {
1541 /* Don't indicate if hardware indicates errors */
1542 nic->net_stats.rx_dropped++;
1543 dev_kfree_skb_any(skb);
136df52d 1544 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1545 /* Don't indicate oversized frames */
1546 nic->rx_over_length_errors++;
1547 nic->net_stats.rx_dropped++;
1548 dev_kfree_skb_any(skb);
1549 } else {
1550 nic->net_stats.rx_packets++;
1551 nic->net_stats.rx_bytes += actual_size;
1552 nic->netdev->last_rx = jiffies;
1553 netif_receive_skb(skb);
1554 if(work_done)
1555 (*work_done)++;
1556 }
1557
1558 rx->skb = NULL;
1559
1560 return 0;
1561}
1562
1563static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1564 unsigned int work_to_do)
1565{
1566 struct rx *rx;
1f53367d
MC
1567 int restart_required = 0;
1568 struct rx *rx_to_start = NULL;
1569
1570 /* are we already rnr? then pay attention!!! this ensures that
1571 * the state machine progression never allows a start with a
1572 * partially cleaned list, avoiding a race between hardware
1573 * and rx_to_clean when in NAPI mode */
1574 if(RU_SUSPENDED == nic->ru_running)
1575 restart_required = 1;
1da177e4
LT
1576
1577 /* Indicate newly arrived packets */
1578 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1f53367d
MC
1579 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1580 if(-EAGAIN == err) {
1581 /* hit quota so have more work to do, restart once
1582 * cleanup is complete */
1583 restart_required = 0;
1584 break;
1585 } else if(-ENODATA == err)
1da177e4
LT
1586 break; /* No more to clean */
1587 }
1588
1f53367d
MC
1589 /* save our starting point as the place we'll restart the receiver */
1590 if(restart_required)
1591 rx_to_start = nic->rx_to_clean;
1592
1da177e4
LT
1593 /* Alloc new skbs to refill list */
1594 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1595 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1596 break; /* Better luck next time (see watchdog) */
1597 }
1598
1f53367d
MC
1599 if(restart_required) {
1600 // ack the rnr?
1601 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1602 e100_start_receiver(nic, rx_to_start);
1603 if(work_done)
1604 (*work_done)++;
1605 }
1da177e4
LT
1606}
1607
1608static void e100_rx_clean_list(struct nic *nic)
1609{
1610 struct rx *rx;
1611 unsigned int i, count = nic->params.rfds.count;
1612
1f53367d
MC
1613 nic->ru_running = RU_UNINITIALIZED;
1614
1da177e4
LT
1615 if(nic->rxs) {
1616 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1617 if(rx->skb) {
1618 pci_unmap_single(nic->pdev, rx->dma_addr,
1619 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1620 dev_kfree_skb(rx->skb);
1621 }
1622 }
1623 kfree(nic->rxs);
1624 nic->rxs = NULL;
1625 }
1626
1627 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1628}
1629
1630static int e100_rx_alloc_list(struct nic *nic)
1631{
1632 struct rx *rx;
1633 unsigned int i, count = nic->params.rfds.count;
1634
1635 nic->rx_to_use = nic->rx_to_clean = NULL;
1f53367d 1636 nic->ru_running = RU_UNINITIALIZED;
1da177e4
LT
1637
1638 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
1639 return -ENOMEM;
1640 memset(nic->rxs, 0, sizeof(struct rx) * count);
1641
1642 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1643 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1644 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1645 if(e100_rx_alloc_skb(nic, rx)) {
1646 e100_rx_clean_list(nic);
1647 return -ENOMEM;
1648 }
1649 }
1650
1651 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1f53367d 1652 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
1653
1654 return 0;
1655}
1656
1657static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
1658{
1659 struct net_device *netdev = dev_id;
1660 struct nic *nic = netdev_priv(netdev);
1661 u8 stat_ack = readb(&nic->csr->scb.stat_ack);
1662
1663 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1664
1665 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1666 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1667 return IRQ_NONE;
1668
1669 /* Ack interrupt(s) */
1670 writeb(stat_ack, &nic->csr->scb.stat_ack);
1671
1672 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1673 if(stat_ack & stat_ack_rnr)
1f53367d 1674 nic->ru_running = RU_SUSPENDED;
1da177e4 1675
0685c31b
MC
1676 if(likely(netif_rx_schedule_prep(netdev))) {
1677 e100_disable_irq(nic);
1678 __netif_rx_schedule(netdev);
1679 }
1da177e4
LT
1680
1681 return IRQ_HANDLED;
1682}
1683
1684static int e100_poll(struct net_device *netdev, int *budget)
1685{
1686 struct nic *nic = netdev_priv(netdev);
1687 unsigned int work_to_do = min(netdev->quota, *budget);
1688 unsigned int work_done = 0;
1689 int tx_cleaned;
1690
1691 e100_rx_clean(nic, &work_done, work_to_do);
1692 tx_cleaned = e100_tx_clean(nic);
1693
1694 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1695 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1696 netif_rx_complete(netdev);
1697 e100_enable_irq(nic);
1698 return 0;
1699 }
1700
1701 *budget -= work_done;
1702 netdev->quota -= work_done;
1703
1704 return 1;
1705}
1706
1707#ifdef CONFIG_NET_POLL_CONTROLLER
1708static void e100_netpoll(struct net_device *netdev)
1709{
1710 struct nic *nic = netdev_priv(netdev);
1711 e100_disable_irq(nic);
1712 e100_intr(nic->pdev->irq, netdev, NULL);
1713 e100_tx_clean(nic);
1714 e100_enable_irq(nic);
1715}
1716#endif
1717
1718static struct net_device_stats *e100_get_stats(struct net_device *netdev)
1719{
1720 struct nic *nic = netdev_priv(netdev);
1721 return &nic->net_stats;
1722}
1723
1724static int e100_set_mac_address(struct net_device *netdev, void *p)
1725{
1726 struct nic *nic = netdev_priv(netdev);
1727 struct sockaddr *addr = p;
1728
1729 if (!is_valid_ether_addr(addr->sa_data))
1730 return -EADDRNOTAVAIL;
1731
1732 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1733 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
1734
1735 return 0;
1736}
1737
1738static int e100_change_mtu(struct net_device *netdev, int new_mtu)
1739{
1740 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1741 return -EINVAL;
1742 netdev->mtu = new_mtu;
1743 return 0;
1744}
1745
6bdacb1a 1746#ifdef CONFIG_PM
1da177e4
LT
1747static int e100_asf(struct nic *nic)
1748{
1749 /* ASF can be enabled from eeprom */
1750 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
1751 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
1752 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
1753 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
1754}
6bdacb1a 1755#endif
1da177e4
LT
1756
1757static int e100_up(struct nic *nic)
1758{
1759 int err;
1760
1761 if((err = e100_rx_alloc_list(nic)))
1762 return err;
1763 if((err = e100_alloc_cbs(nic)))
1764 goto err_rx_clean_list;
1765 if((err = e100_hw_init(nic)))
1766 goto err_clean_cbs;
1767 e100_set_multicast_list(nic->netdev);
1f53367d 1768 e100_start_receiver(nic, 0);
1da177e4
LT
1769 mod_timer(&nic->watchdog, jiffies);
1770 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
1771 nic->netdev->name, nic->netdev)))
1772 goto err_no_irq;
1da177e4 1773 netif_wake_queue(nic->netdev);
0236ebb7
MC
1774 netif_poll_enable(nic->netdev);
1775 /* enable ints _after_ enabling poll, preventing a race between
1776 * disable ints+schedule */
1777 e100_enable_irq(nic);
1da177e4
LT
1778 return 0;
1779
1780err_no_irq:
1781 del_timer_sync(&nic->watchdog);
1782err_clean_cbs:
1783 e100_clean_cbs(nic);
1784err_rx_clean_list:
1785 e100_rx_clean_list(nic);
1786 return err;
1787}
1788
1789static void e100_down(struct nic *nic)
1790{
0236ebb7
MC
1791 /* wait here for poll to complete */
1792 netif_poll_disable(nic->netdev);
1793 netif_stop_queue(nic->netdev);
1da177e4
LT
1794 e100_hw_reset(nic);
1795 free_irq(nic->pdev->irq, nic->netdev);
1796 del_timer_sync(&nic->watchdog);
1797 netif_carrier_off(nic->netdev);
1da177e4
LT
1798 e100_clean_cbs(nic);
1799 e100_rx_clean_list(nic);
1800}
1801
1802static void e100_tx_timeout(struct net_device *netdev)
1803{
1804 struct nic *nic = netdev_priv(netdev);
1805
2acdb1e0
MC
1806 /* Reset outside of interrupt context, to avoid request_irq
1807 * in interrupt context */
1808 schedule_work(&nic->tx_timeout_task);
1809}
1810
1811static void e100_tx_timeout_task(struct net_device *netdev)
1812{
1813 struct nic *nic = netdev_priv(netdev);
1814
1da177e4
LT
1815 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
1816 readb(&nic->csr->scb.status));
1817 e100_down(netdev_priv(netdev));
1818 e100_up(netdev_priv(netdev));
1819}
1820
1821static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
1822{
1823 int err;
1824 struct sk_buff *skb;
1825
1826 /* Use driver resources to perform internal MAC or PHY
1827 * loopback test. A single packet is prepared and transmitted
1828 * in loopback mode, and the test passes if the received
1829 * packet compares byte-for-byte to the transmitted packet. */
1830
1831 if((err = e100_rx_alloc_list(nic)))
1832 return err;
1833 if((err = e100_alloc_cbs(nic)))
1834 goto err_clean_rx;
1835
1836 /* ICH PHY loopback is broken so do MAC loopback instead */
1837 if(nic->flags & ich && loopback_mode == lb_phy)
1838 loopback_mode = lb_mac;
1839
1840 nic->loopback = loopback_mode;
1841 if((err = e100_hw_init(nic)))
1842 goto err_loopback_none;
1843
1844 if(loopback_mode == lb_phy)
1845 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
1846 BMCR_LOOPBACK);
1847
1f53367d 1848 e100_start_receiver(nic, 0);
1da177e4
LT
1849
1850 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
1851 err = -ENOMEM;
1852 goto err_loopback_none;
1853 }
1854 skb_put(skb, ETH_DATA_LEN);
1855 memset(skb->data, 0xFF, ETH_DATA_LEN);
1856 e100_xmit_frame(skb, nic->netdev);
1857
1858 msleep(10);
1859
1860 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1861 skb->data, ETH_DATA_LEN))
1862 err = -EAGAIN;
1863
1864err_loopback_none:
1865 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
1866 nic->loopback = lb_none;
1867 e100_hw_init(nic);
1868 e100_clean_cbs(nic);
1869err_clean_rx:
1870 e100_rx_clean_list(nic);
1871 return err;
1872}
1873
1874#define MII_LED_CONTROL 0x1B
1875static void e100_blink_led(unsigned long data)
1876{
1877 struct nic *nic = (struct nic *)data;
1878 enum led_state {
1879 led_on = 0x01,
1880 led_off = 0x04,
1881 led_on_559 = 0x05,
1882 led_on_557 = 0x07,
1883 };
1884
1885 nic->leds = (nic->leds & led_on) ? led_off :
1886 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
1887 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
1888 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
1889}
1890
1891static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1892{
1893 struct nic *nic = netdev_priv(netdev);
1894 return mii_ethtool_gset(&nic->mii, cmd);
1895}
1896
1897static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1898{
1899 struct nic *nic = netdev_priv(netdev);
1900 int err;
1901
1902 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
1903 err = mii_ethtool_sset(&nic->mii, cmd);
1904 e100_exec_cb(nic, NULL, e100_configure);
1905
1906 return err;
1907}
1908
1909static void e100_get_drvinfo(struct net_device *netdev,
1910 struct ethtool_drvinfo *info)
1911{
1912 struct nic *nic = netdev_priv(netdev);
1913 strcpy(info->driver, DRV_NAME);
1914 strcpy(info->version, DRV_VERSION);
1915 strcpy(info->fw_version, "N/A");
1916 strcpy(info->bus_info, pci_name(nic->pdev));
1917}
1918
1919static int e100_get_regs_len(struct net_device *netdev)
1920{
1921 struct nic *nic = netdev_priv(netdev);
1922#define E100_PHY_REGS 0x1C
1923#define E100_REGS_LEN 1 + E100_PHY_REGS + \
1924 sizeof(nic->mem->dump_buf) / sizeof(u32)
1925 return E100_REGS_LEN * sizeof(u32);
1926}
1927
1928static void e100_get_regs(struct net_device *netdev,
1929 struct ethtool_regs *regs, void *p)
1930{
1931 struct nic *nic = netdev_priv(netdev);
1932 u32 *buff = p;
1933 int i;
1934
1935 regs->version = (1 << 24) | nic->rev_id;
1936 buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
1937 readb(&nic->csr->scb.cmd_lo) << 16 |
1938 readw(&nic->csr->scb.status);
1939 for(i = E100_PHY_REGS; i >= 0; i--)
1940 buff[1 + E100_PHY_REGS - i] =
1941 mdio_read(netdev, nic->mii.phy_id, i);
1942 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
1943 e100_exec_cb(nic, NULL, e100_dump);
1944 msleep(10);
1945 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
1946 sizeof(nic->mem->dump_buf));
1947}
1948
1949static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1950{
1951 struct nic *nic = netdev_priv(netdev);
1952 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
1953 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
1954}
1955
1956static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1957{
1958 struct nic *nic = netdev_priv(netdev);
1959
1960 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
1961 return -EOPNOTSUPP;
1962
1963 if(wol->wolopts)
1964 nic->flags |= wol_magic;
1965 else
1966 nic->flags &= ~wol_magic;
1967
1da177e4
LT
1968 e100_exec_cb(nic, NULL, e100_configure);
1969
1970 return 0;
1971}
1972
1973static u32 e100_get_msglevel(struct net_device *netdev)
1974{
1975 struct nic *nic = netdev_priv(netdev);
1976 return nic->msg_enable;
1977}
1978
1979static void e100_set_msglevel(struct net_device *netdev, u32 value)
1980{
1981 struct nic *nic = netdev_priv(netdev);
1982 nic->msg_enable = value;
1983}
1984
1985static int e100_nway_reset(struct net_device *netdev)
1986{
1987 struct nic *nic = netdev_priv(netdev);
1988 return mii_nway_restart(&nic->mii);
1989}
1990
1991static u32 e100_get_link(struct net_device *netdev)
1992{
1993 struct nic *nic = netdev_priv(netdev);
1994 return mii_link_ok(&nic->mii);
1995}
1996
1997static int e100_get_eeprom_len(struct net_device *netdev)
1998{
1999 struct nic *nic = netdev_priv(netdev);
2000 return nic->eeprom_wc << 1;
2001}
2002
2003#define E100_EEPROM_MAGIC 0x1234
2004static int e100_get_eeprom(struct net_device *netdev,
2005 struct ethtool_eeprom *eeprom, u8 *bytes)
2006{
2007 struct nic *nic = netdev_priv(netdev);
2008
2009 eeprom->magic = E100_EEPROM_MAGIC;
2010 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2011
2012 return 0;
2013}
2014
2015static int e100_set_eeprom(struct net_device *netdev,
2016 struct ethtool_eeprom *eeprom, u8 *bytes)
2017{
2018 struct nic *nic = netdev_priv(netdev);
2019
2020 if(eeprom->magic != E100_EEPROM_MAGIC)
2021 return -EINVAL;
2022
2023 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2024
2025 return e100_eeprom_save(nic, eeprom->offset >> 1,
2026 (eeprom->len >> 1) + 1);
2027}
2028
2029static void e100_get_ringparam(struct net_device *netdev,
2030 struct ethtool_ringparam *ring)
2031{
2032 struct nic *nic = netdev_priv(netdev);
2033 struct param_range *rfds = &nic->params.rfds;
2034 struct param_range *cbs = &nic->params.cbs;
2035
2036 ring->rx_max_pending = rfds->max;
2037 ring->tx_max_pending = cbs->max;
2038 ring->rx_mini_max_pending = 0;
2039 ring->rx_jumbo_max_pending = 0;
2040 ring->rx_pending = rfds->count;
2041 ring->tx_pending = cbs->count;
2042 ring->rx_mini_pending = 0;
2043 ring->rx_jumbo_pending = 0;
2044}
2045
2046static int e100_set_ringparam(struct net_device *netdev,
2047 struct ethtool_ringparam *ring)
2048{
2049 struct nic *nic = netdev_priv(netdev);
2050 struct param_range *rfds = &nic->params.rfds;
2051 struct param_range *cbs = &nic->params.cbs;
2052
2053 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2054 return -EINVAL;
2055
2056 if(netif_running(netdev))
2057 e100_down(nic);
2058 rfds->count = max(ring->rx_pending, rfds->min);
2059 rfds->count = min(rfds->count, rfds->max);
2060 cbs->count = max(ring->tx_pending, cbs->min);
2061 cbs->count = min(cbs->count, cbs->max);
2062 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2063 rfds->count, cbs->count);
2064 if(netif_running(netdev))
2065 e100_up(nic);
2066
2067 return 0;
2068}
2069
2070static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2071 "Link test (on/offline)",
2072 "Eeprom test (on/offline)",
2073 "Self test (offline)",
2074 "Mac loopback (offline)",
2075 "Phy loopback (offline)",
2076};
2077#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2078
2079static int e100_diag_test_count(struct net_device *netdev)
2080{
2081 return E100_TEST_LEN;
2082}
2083
2084static void e100_diag_test(struct net_device *netdev,
2085 struct ethtool_test *test, u64 *data)
2086{
2087 struct ethtool_cmd cmd;
2088 struct nic *nic = netdev_priv(netdev);
2089 int i, err;
2090
2091 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2092 data[0] = !mii_link_ok(&nic->mii);
2093 data[1] = e100_eeprom_load(nic);
2094 if(test->flags & ETH_TEST_FL_OFFLINE) {
2095
2096 /* save speed, duplex & autoneg settings */
2097 err = mii_ethtool_gset(&nic->mii, &cmd);
2098
2099 if(netif_running(netdev))
2100 e100_down(nic);
2101 data[2] = e100_self_test(nic);
2102 data[3] = e100_loopback_test(nic, lb_mac);
2103 data[4] = e100_loopback_test(nic, lb_phy);
2104
2105 /* restore speed, duplex & autoneg settings */
2106 err = mii_ethtool_sset(&nic->mii, &cmd);
2107
2108 if(netif_running(netdev))
2109 e100_up(nic);
2110 }
2111 for(i = 0; i < E100_TEST_LEN; i++)
2112 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2113
2114 msleep_interruptible(4 * 1000);
1da177e4
LT
2115}
2116
2117static int e100_phys_id(struct net_device *netdev, u32 data)
2118{
2119 struct nic *nic = netdev_priv(netdev);
2120
2121 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2122 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2123 mod_timer(&nic->blink_timer, jiffies);
2124 msleep_interruptible(data * 1000);
2125 del_timer_sync(&nic->blink_timer);
2126 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2127
2128 return 0;
2129}
2130
2131static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2132 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2133 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2134 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2135 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2136 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2137 "tx_heartbeat_errors", "tx_window_errors",
2138 /* device-specific stats */
2139 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2140 "tx_flow_control_pause", "rx_flow_control_pause",
2141 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2142};
2143#define E100_NET_STATS_LEN 21
2144#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2145
2146static int e100_get_stats_count(struct net_device *netdev)
2147{
2148 return E100_STATS_LEN;
2149}
2150
2151static void e100_get_ethtool_stats(struct net_device *netdev,
2152 struct ethtool_stats *stats, u64 *data)
2153{
2154 struct nic *nic = netdev_priv(netdev);
2155 int i;
2156
2157 for(i = 0; i < E100_NET_STATS_LEN; i++)
2158 data[i] = ((unsigned long *)&nic->net_stats)[i];
2159
2160 data[i++] = nic->tx_deferred;
2161 data[i++] = nic->tx_single_collisions;
2162 data[i++] = nic->tx_multiple_collisions;
2163 data[i++] = nic->tx_fc_pause;
2164 data[i++] = nic->rx_fc_pause;
2165 data[i++] = nic->rx_fc_unsupported;
2166 data[i++] = nic->tx_tco_frames;
2167 data[i++] = nic->rx_tco_frames;
2168}
2169
2170static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2171{
2172 switch(stringset) {
2173 case ETH_SS_TEST:
2174 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2175 break;
2176 case ETH_SS_STATS:
2177 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2178 break;
2179 }
2180}
2181
2182static struct ethtool_ops e100_ethtool_ops = {
2183 .get_settings = e100_get_settings,
2184 .set_settings = e100_set_settings,
2185 .get_drvinfo = e100_get_drvinfo,
2186 .get_regs_len = e100_get_regs_len,
2187 .get_regs = e100_get_regs,
2188 .get_wol = e100_get_wol,
2189 .set_wol = e100_set_wol,
2190 .get_msglevel = e100_get_msglevel,
2191 .set_msglevel = e100_set_msglevel,
2192 .nway_reset = e100_nway_reset,
2193 .get_link = e100_get_link,
2194 .get_eeprom_len = e100_get_eeprom_len,
2195 .get_eeprom = e100_get_eeprom,
2196 .set_eeprom = e100_set_eeprom,
2197 .get_ringparam = e100_get_ringparam,
2198 .set_ringparam = e100_set_ringparam,
2199 .self_test_count = e100_diag_test_count,
2200 .self_test = e100_diag_test,
2201 .get_strings = e100_get_strings,
2202 .phys_id = e100_phys_id,
2203 .get_stats_count = e100_get_stats_count,
2204 .get_ethtool_stats = e100_get_ethtool_stats,
2205};
2206
2207static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2208{
2209 struct nic *nic = netdev_priv(netdev);
2210
2211 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2212}
2213
2214static int e100_alloc(struct nic *nic)
2215{
2216 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2217 &nic->dma_addr);
2218 return nic->mem ? 0 : -ENOMEM;
2219}
2220
2221static void e100_free(struct nic *nic)
2222{
2223 if(nic->mem) {
2224 pci_free_consistent(nic->pdev, sizeof(struct mem),
2225 nic->mem, nic->dma_addr);
2226 nic->mem = NULL;
2227 }
2228}
2229
2230static int e100_open(struct net_device *netdev)
2231{
2232 struct nic *nic = netdev_priv(netdev);
2233 int err = 0;
2234
2235 netif_carrier_off(netdev);
2236 if((err = e100_up(nic)))
2237 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2238 return err;
2239}
2240
2241static int e100_close(struct net_device *netdev)
2242{
2243 e100_down(netdev_priv(netdev));
2244 return 0;
2245}
2246
2247static int __devinit e100_probe(struct pci_dev *pdev,
2248 const struct pci_device_id *ent)
2249{
2250 struct net_device *netdev;
2251 struct nic *nic;
2252 int err;
2253
2254 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2255 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2256 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2257 return -ENOMEM;
2258 }
2259
2260 netdev->open = e100_open;
2261 netdev->stop = e100_close;
2262 netdev->hard_start_xmit = e100_xmit_frame;
2263 netdev->get_stats = e100_get_stats;
2264 netdev->set_multicast_list = e100_set_multicast_list;
2265 netdev->set_mac_address = e100_set_mac_address;
2266 netdev->change_mtu = e100_change_mtu;
2267 netdev->do_ioctl = e100_do_ioctl;
2268 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2269 netdev->tx_timeout = e100_tx_timeout;
2270 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2271 netdev->poll = e100_poll;
2272 netdev->weight = E100_NAPI_WEIGHT;
2273#ifdef CONFIG_NET_POLL_CONTROLLER
2274 netdev->poll_controller = e100_netpoll;
2275#endif
2276 strcpy(netdev->name, pci_name(pdev));
2277
2278 nic = netdev_priv(netdev);
2279 nic->netdev = netdev;
2280 nic->pdev = pdev;
2281 nic->msg_enable = (1 << debug) - 1;
2282 pci_set_drvdata(pdev, netdev);
2283
2284 if((err = pci_enable_device(pdev))) {
2285 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2286 goto err_out_free_dev;
2287 }
2288
2289 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2290 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2291 "base address, aborting.\n");
2292 err = -ENODEV;
2293 goto err_out_disable_pdev;
2294 }
2295
2296 if((err = pci_request_regions(pdev, DRV_NAME))) {
2297 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2298 goto err_out_disable_pdev;
2299 }
2300
1e7f0bd8 2301 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2302 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2303 goto err_out_free_res;
2304 }
2305
2306 SET_MODULE_OWNER(netdev);
2307 SET_NETDEV_DEV(netdev, &pdev->dev);
2308
2309 nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
2310 if(!nic->csr) {
2311 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2312 err = -ENOMEM;
2313 goto err_out_free_res;
2314 }
2315
2316 if(ent->driver_data)
2317 nic->flags |= ich;
2318 else
2319 nic->flags &= ~ich;
2320
2321 e100_get_defaults(nic);
2322
1f53367d 2323 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2324 spin_lock_init(&nic->cb_lock);
2325 spin_lock_init(&nic->cmd_lock);
2326
2327 /* Reset the device before pci_set_master() in case device is in some
2328 * funky state and has an interrupt pending - hint: we don't have the
2329 * interrupt handler registered yet. */
2330 e100_hw_reset(nic);
2331
2332 pci_set_master(pdev);
2333
2334 init_timer(&nic->watchdog);
2335 nic->watchdog.function = e100_watchdog;
2336 nic->watchdog.data = (unsigned long)nic;
2337 init_timer(&nic->blink_timer);
2338 nic->blink_timer.function = e100_blink_led;
2339 nic->blink_timer.data = (unsigned long)nic;
2340
2acdb1e0
MC
2341 INIT_WORK(&nic->tx_timeout_task,
2342 (void (*)(void *))e100_tx_timeout_task, netdev);
2343
1da177e4
LT
2344 if((err = e100_alloc(nic))) {
2345 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2346 goto err_out_iounmap;
2347 }
2348
1da177e4
LT
2349 if((err = e100_eeprom_load(nic)))
2350 goto err_out_free;
2351
f92d8728
MC
2352 e100_phy_init(nic);
2353
1da177e4
LT
2354 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2355 if(!is_valid_ether_addr(netdev->dev_addr)) {
2356 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2357 "EEPROM, aborting.\n");
2358 err = -EAGAIN;
2359 goto err_out_free;
2360 }
2361
2362 /* Wol magic packet can be enabled from eeprom */
2363 if((nic->mac >= mac_82558_D101_A4) &&
2364 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2365 nic->flags |= wol_magic;
2366
6bdacb1a
MC
2367 /* ack any pending wake events, disable PME */
2368 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2369
2370 strcpy(netdev->name, "eth%d");
2371 if((err = register_netdev(netdev))) {
2372 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2373 goto err_out_free;
2374 }
2375
2376 DPRINTK(PROBE, INFO, "addr 0x%lx, irq %d, "
2377 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
2378 pci_resource_start(pdev, 0), pdev->irq,
2379 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2380 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2381
2382 return 0;
2383
2384err_out_free:
2385 e100_free(nic);
2386err_out_iounmap:
2387 iounmap(nic->csr);
2388err_out_free_res:
2389 pci_release_regions(pdev);
2390err_out_disable_pdev:
2391 pci_disable_device(pdev);
2392err_out_free_dev:
2393 pci_set_drvdata(pdev, NULL);
2394 free_netdev(netdev);
2395 return err;
2396}
2397
2398static void __devexit e100_remove(struct pci_dev *pdev)
2399{
2400 struct net_device *netdev = pci_get_drvdata(pdev);
2401
2402 if(netdev) {
2403 struct nic *nic = netdev_priv(netdev);
2404 unregister_netdev(netdev);
2405 e100_free(nic);
2406 iounmap(nic->csr);
2407 free_netdev(netdev);
2408 pci_release_regions(pdev);
2409 pci_disable_device(pdev);
2410 pci_set_drvdata(pdev, NULL);
2411 }
2412}
2413
2414#ifdef CONFIG_PM
2415static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2416{
2417 struct net_device *netdev = pci_get_drvdata(pdev);
2418 struct nic *nic = netdev_priv(netdev);
2419
2420 if(netif_running(netdev))
2421 e100_down(nic);
2422 e100_hw_reset(nic);
2423 netif_device_detach(netdev);
2424
2425 pci_save_state(pdev);
2426 pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic)));
2427 pci_disable_device(pdev);
2428 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2429
2430 return 0;
2431}
2432
2433static int e100_resume(struct pci_dev *pdev)
2434{
2435 struct net_device *netdev = pci_get_drvdata(pdev);
2436 struct nic *nic = netdev_priv(netdev);
2437
2438 pci_set_power_state(pdev, PCI_D0);
2439 pci_restore_state(pdev);
6bdacb1a
MC
2440 /* ack any pending wake events, disable PME */
2441 pci_enable_wake(pdev, 0, 0);
1f53367d
MC
2442 if(e100_hw_init(nic))
2443 DPRINTK(HW, ERR, "e100_hw_init failed\n");
1da177e4
LT
2444
2445 netif_device_attach(netdev);
2446 if(netif_running(netdev))
2447 e100_up(nic);
2448
2449 return 0;
2450}
2451#endif
2452
6bdacb1a 2453
d18c3db5 2454static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2455{
6bdacb1a
MC
2456 struct net_device *netdev = pci_get_drvdata(pdev);
2457 struct nic *nic = netdev_priv(netdev);
2458
2459#ifdef CONFIG_PM
2460 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
2461#else
2462 pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
2463#endif
2464}
2465
2466
1da177e4
LT
2467static struct pci_driver e100_driver = {
2468 .name = DRV_NAME,
2469 .id_table = e100_id_table,
2470 .probe = e100_probe,
2471 .remove = __devexit_p(e100_remove),
2472#ifdef CONFIG_PM
2473 .suspend = e100_suspend,
2474 .resume = e100_resume,
2475#endif
d18c3db5 2476 .shutdown = e100_shutdown,
1da177e4
LT
2477};
2478
2479static int __init e100_init_module(void)
2480{
2481 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2482 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2483 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2484 }
2485 return pci_module_init(&e100_driver);
2486}
2487
2488static void __exit e100_cleanup_module(void)
2489{
2490 pci_unregister_driver(&e100_driver);
2491}
2492
2493module_init(e100_init_module);
2494module_exit(e100_cleanup_module);
This page took 0.163262 seconds and 5 git commands to generate.