[NET]: Nuke SET_MODULE_OWNER macro.
[deliverable/linux.git] / drivers / net / eepro100.c
1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2 /*
3 Written 1996-1999 by Donald Becker.
4
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
9
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
12
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
15
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
28 */
29
30 static const char * const version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
36
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
43
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49 #else
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
52 #endif
53
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
57
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
60
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
63
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83 /* Operational parameters that usually are not changed. */
84
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
89
90 #include <linux/module.h>
91
92 #include <linux/kernel.h>
93 #include <linux/string.h>
94 #include <linux/errno.h>
95 #include <linux/ioport.h>
96 #include <linux/slab.h>
97 #include <linux/interrupt.h>
98 #include <linux/timer.h>
99 #include <linux/pci.h>
100 #include <linux/spinlock.h>
101 #include <linux/init.h>
102 #include <linux/mii.h>
103 #include <linux/delay.h>
104 #include <linux/bitops.h>
105
106 #include <asm/io.h>
107 #include <asm/uaccess.h>
108 #include <asm/irq.h>
109
110 #include <linux/netdevice.h>
111 #include <linux/etherdevice.h>
112 #include <linux/rtnetlink.h>
113 #include <linux/skbuff.h>
114 #include <linux/ethtool.h>
115
116 static int use_io;
117 static int debug = -1;
118 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
119 NETIF_MSG_HW | \
120 NETIF_MSG_RX_ERR | \
121 NETIF_MSG_TX_ERR)
122 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
123
124
125 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
126 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
127 MODULE_LICENSE("GPL");
128 module_param(use_io, int, 0);
129 module_param(debug, int, 0);
130 module_param_array(options, int, NULL, 0);
131 module_param_array(full_duplex, int, NULL, 0);
132 module_param(congenb, int, 0);
133 module_param(txfifo, int, 0);
134 module_param(rxfifo, int, 0);
135 module_param(txdmacount, int, 0);
136 module_param(rxdmacount, int, 0);
137 module_param(rx_copybreak, int, 0);
138 module_param(max_interrupt_work, int, 0);
139 module_param(multicast_filter_limit, int, 0);
140 MODULE_PARM_DESC(debug, "debug level (0-6)");
141 MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
142 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
143 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
144 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
145 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
146 MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
147 MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
148 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
149 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
150 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
151
152 #define RUN_AT(x) (jiffies + (x))
153
154 #define netdevice_start(dev)
155 #define netdevice_stop(dev)
156 #define netif_set_tx_timeout(dev, tf, tm) \
157 do { \
158 (dev)->tx_timeout = (tf); \
159 (dev)->watchdog_timeo = (tm); \
160 } while(0)
161
162
163
164 /*
165 Theory of Operation
166
167 I. Board Compatibility
168
169 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
170 single-chip fast Ethernet controller for PCI, as used on the Intel
171 EtherExpress Pro 100 adapter.
172
173 II. Board-specific settings
174
175 PCI bus devices are configured by the system at boot time, so no jumpers
176 need to be set on the board. The system BIOS should be set to assign the
177 PCI INTA signal to an otherwise unused system IRQ line. While it's
178 possible to share PCI interrupt lines, it negatively impacts performance and
179 only recent kernels support it.
180
181 III. Driver operation
182
183 IIIA. General
184 The Speedo3 is very similar to other Intel network chips, that is to say
185 "apparently designed on a different planet". This chips retains the complex
186 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
187 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
188 Tx mode, but in a simplified lower-overhead manner: it associates only a
189 single buffer descriptor with each frame descriptor.
190
191 Despite the extra space overhead in each receive skbuff, the driver must use
192 the simplified Rx buffer mode to assure that only a single data buffer is
193 associated with each RxFD. The driver implements this by reserving space
194 for the Rx descriptor at the head of each Rx skbuff.
195
196 The Speedo-3 has receive and command unit base addresses that are added to
197 almost all descriptor pointers. The driver sets these to zero, so that all
198 pointer fields are absolute addresses.
199
200 The System Control Block (SCB) of some previous Intel chips exists on the
201 chip in both PCI I/O and memory space. This driver uses the I/O space
202 registers, but might switch to memory mapped mode to better support non-x86
203 processors.
204
205 IIIB. Transmit structure
206
207 The driver must use the complex Tx command+descriptor mode in order to
208 have a indirect pointer to the skbuff data section. Each Tx command block
209 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
210 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
211 speedo_private data structure for each adapter instance.
212
213 The newer i82558 explicitly supports this structure, and can read the two
214 TxBDs in the same PCI burst as the TxCB.
215
216 This ring structure is used for all normal transmit packets, but the
217 transmit packet descriptors aren't long enough for most non-Tx commands such
218 as CmdConfigure. This is complicated by the possibility that the chip has
219 already loaded the link address in the previous descriptor. So for these
220 commands we convert the next free descriptor on the ring to a NoOp, and point
221 that descriptor's link to the complex command.
222
223 An additional complexity of these non-transmit commands are that they may be
224 added asynchronous to the normal transmit queue, so we disable interrupts
225 whenever the Tx descriptor ring is manipulated.
226
227 A notable aspect of these special configure commands is that they do
228 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
229 is done at interrupt time using the 'dirty_tx' index, and checking for the
230 command-complete bit. While the setup frames may have the NoOp command on the
231 Tx ring marked as complete, but not have completed the setup command, this
232 is not a problem. The tx_ring entry can be still safely reused, as the
233 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
234
235 Commands may have bits set e.g. CmdSuspend in the command word to either
236 suspend or stop the transmit/command unit. This driver always flags the last
237 command with CmdSuspend, erases the CmdSuspend in the previous command, and
238 then issues a CU_RESUME.
239 Note: Watch out for the potential race condition here: imagine
240 erasing the previous suspend
241 the chip processes the previous command
242 the chip processes the final command, and suspends
243 doing the CU_RESUME
244 the chip processes the next-yet-valid post-final-command.
245 So blindly sending a CU_RESUME is only safe if we do it immediately after
246 after erasing the previous CmdSuspend, without the possibility of an
247 intervening delay. Thus the resume command is always within the
248 interrupts-disabled region. This is a timing dependence, but handling this
249 condition in a timing-independent way would considerably complicate the code.
250
251 Note: In previous generation Intel chips, restarting the command unit was a
252 notoriously slow process. This is presumably no longer true.
253
254 IIIC. Receive structure
255
256 Because of the bus-master support on the Speedo3 this driver uses the new
257 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
258 This scheme allocates full-sized skbuffs as receive buffers. The value
259 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
260 trade-off the memory wasted by passing the full-sized skbuff to the queue
261 layer for all frames vs. the copying cost of copying a frame to a
262 correctly-sized skbuff.
263
264 For small frames the copying cost is negligible (esp. considering that we
265 are pre-loading the cache with immediately useful header information), so we
266 allocate a new, minimally-sized skbuff. For large frames the copying cost
267 is non-trivial, and the larger copy might flush the cache of useful data, so
268 we pass up the skbuff the packet was received into.
269
270 IV. Notes
271
272 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
273 that stated that I could disclose the information. But I still resent
274 having to sign an Intel NDA when I'm helping Intel sell their own product!
275
276 */
277
278 static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
279
280 /* Offsets to the various registers.
281 All accesses need not be longword aligned. */
282 enum speedo_offsets {
283 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
284 SCBIntmask = 3,
285 SCBPointer = 4, /* General purpose pointer. */
286 SCBPort = 8, /* Misc. commands and operands. */
287 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
288 SCBCtrlMDI = 16, /* MDI interface control. */
289 SCBEarlyRx = 20, /* Early receive byte count. */
290 };
291 /* Commands that can be put in a command list entry. */
292 enum commands {
293 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
294 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
295 CmdDump = 0x60000, CmdDiagnose = 0x70000,
296 CmdSuspend = 0x40000000, /* Suspend after completion. */
297 CmdIntr = 0x20000000, /* Interrupt after completion. */
298 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
299 };
300 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
301 status bits. Previous driver versions used separate 16 bit fields for
302 commands and statuses. --SAW
303 */
304 #if defined(__alpha__)
305 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
306 #else
307 # if defined(__LITTLE_ENDIAN)
308 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
309 # elif defined(__BIG_ENDIAN)
310 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
311 # else
312 # error Unsupported byteorder
313 # endif
314 #endif
315
316 enum SCBCmdBits {
317 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
318 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
319 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
320 /* The rest are Rx and Tx commands. */
321 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
322 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
323 CUDumpStats=0x0070, /* Dump then reset stats counters. */
324 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
325 RxResumeNoResources=0x0007,
326 };
327
328 enum SCBPort_cmds {
329 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
330 };
331
332 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
333 struct descriptor { /* A generic descriptor. */
334 volatile s32 cmd_status; /* All command and status fields. */
335 u32 link; /* struct descriptor * */
336 unsigned char params[0];
337 };
338
339 /* The Speedo3 Rx and Tx buffer descriptors. */
340 struct RxFD { /* Receive frame descriptor. */
341 volatile s32 status;
342 u32 link; /* struct RxFD * */
343 u32 rx_buf_addr; /* void * */
344 u32 count;
345 } RxFD_ALIGNMENT;
346
347 /* Selected elements of the Tx/RxFD.status word. */
348 enum RxFD_bits {
349 RxComplete=0x8000, RxOK=0x2000,
350 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
351 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
352 TxUnderrun=0x1000, StatusComplete=0x8000,
353 };
354
355 #define CONFIG_DATA_SIZE 22
356 struct TxFD { /* Transmit frame descriptor set. */
357 s32 status;
358 u32 link; /* void * */
359 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
360 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
361 /* This constitutes two "TBD" entries -- we only use one. */
362 #define TX_DESCR_BUF_OFFSET 16
363 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
364 s32 tx_buf_size0; /* Length of Tx frame. */
365 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
366 s32 tx_buf_size1; /* Length of Tx frame. */
367 /* the structure must have space for at least CONFIG_DATA_SIZE starting
368 * from tx_desc_addr field */
369 };
370
371 /* Multicast filter setting block. --SAW */
372 struct speedo_mc_block {
373 struct speedo_mc_block *next;
374 unsigned int tx;
375 dma_addr_t frame_dma;
376 unsigned int len;
377 struct descriptor frame __attribute__ ((__aligned__(16)));
378 };
379
380 /* Elements of the dump_statistics block. This block must be lword aligned. */
381 struct speedo_stats {
382 u32 tx_good_frames;
383 u32 tx_coll16_errs;
384 u32 tx_late_colls;
385 u32 tx_underruns;
386 u32 tx_lost_carrier;
387 u32 tx_deferred;
388 u32 tx_one_colls;
389 u32 tx_multi_colls;
390 u32 tx_total_colls;
391 u32 rx_good_frames;
392 u32 rx_crc_errs;
393 u32 rx_align_errs;
394 u32 rx_resource_errs;
395 u32 rx_overrun_errs;
396 u32 rx_colls_errs;
397 u32 rx_runt_errs;
398 u32 done_marker;
399 };
400
401 enum Rx_ring_state_bits {
402 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
403 };
404
405 /* Do not change the position (alignment) of the first few elements!
406 The later elements are grouped for cache locality.
407
408 Unfortunately, all the positions have been shifted since there.
409 A new re-alignment is required. 2000/03/06 SAW */
410 struct speedo_private {
411 void __iomem *regs;
412 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
413 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
414 /* The addresses of a Tx/Rx-in-place packets/buffers. */
415 struct sk_buff *tx_skbuff[TX_RING_SIZE];
416 struct sk_buff *rx_skbuff[RX_RING_SIZE];
417 /* Mapped addresses of the rings. */
418 dma_addr_t tx_ring_dma;
419 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
420 dma_addr_t rx_ring_dma[RX_RING_SIZE];
421 struct descriptor *last_cmd; /* Last command sent. */
422 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
423 spinlock_t lock; /* Group with Tx control cache line. */
424 u32 tx_threshold; /* The value for txdesc.count. */
425 struct RxFD *last_rxf; /* Last filled RX buffer. */
426 dma_addr_t last_rxf_dma;
427 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
428 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
429 struct net_device_stats stats;
430 struct speedo_stats *lstats;
431 dma_addr_t lstats_dma;
432 int chip_id;
433 struct pci_dev *pdev;
434 struct timer_list timer; /* Media selection timer. */
435 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
436 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
437 long in_interrupt; /* Word-aligned dev->interrupt */
438 unsigned char acpi_pwr;
439 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
440 unsigned int tx_full:1; /* The Tx queue is full. */
441 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
442 unsigned int rx_bug:1; /* Work around receiver hang errata. */
443 unsigned char default_port:8; /* Last dev->if_port value. */
444 unsigned char rx_ring_state; /* RX ring status flags. */
445 unsigned short phy[2]; /* PHY media interfaces available. */
446 unsigned short partner; /* Link partner caps. */
447 struct mii_if_info mii_if; /* MII API hooks, info */
448 u32 msg_enable; /* debug message level */
449 };
450
451 /* The parameters for a CmdConfigure operation.
452 There are so many options that it would be difficult to document each bit.
453 We mostly use the default or recommended settings. */
454 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
455 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
456 0, 0x2E, 0, 0x60, 0,
457 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
458 0x3f, 0x05, };
459 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
460 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
461 0, 0x2E, 0, 0x60, 0x08, 0x88,
462 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
463 0x31, 0x05, };
464
465 /* PHY media interface chips. */
466 static const char * const phys[] = {
467 "None", "i82553-A/B", "i82553-C", "i82503",
468 "DP83840", "80c240", "80c24", "i82555",
469 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
470 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
471 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
472 S80C24, I82555, DP83840A=10, };
473 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
474 #define EE_READ_CMD (6)
475
476 static int eepro100_init_one(struct pci_dev *pdev,
477 const struct pci_device_id *ent);
478
479 static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
480 static int mdio_read(struct net_device *dev, int phy_id, int location);
481 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
482 static int speedo_open(struct net_device *dev);
483 static void speedo_resume(struct net_device *dev);
484 static void speedo_timer(unsigned long data);
485 static void speedo_init_rx_ring(struct net_device *dev);
486 static void speedo_tx_timeout(struct net_device *dev);
487 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
488 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
489 static int speedo_rx(struct net_device *dev);
490 static void speedo_tx_buffer_gc(struct net_device *dev);
491 static irqreturn_t speedo_interrupt(int irq, void *dev_instance);
492 static int speedo_close(struct net_device *dev);
493 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
494 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
495 static void set_rx_mode(struct net_device *dev);
496 static void speedo_show_state(struct net_device *dev);
497 static const struct ethtool_ops ethtool_ops;
498
499
500
501 #ifdef honor_default_port
502 /* Optional driver feature to allow forcing the transceiver setting.
503 Not recommended. */
504 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
505 0x2000, 0x2100, 0x0400, 0x3100};
506 #endif
507
508 /* How to wait for the command unit to accept a command.
509 Typically this takes 0 ticks. */
510 static inline unsigned char wait_for_cmd_done(struct net_device *dev,
511 struct speedo_private *sp)
512 {
513 int wait = 1000;
514 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
515 unsigned char r;
516
517 do {
518 udelay(1);
519 r = ioread8(cmd_ioaddr);
520 } while(r && --wait >= 0);
521
522 if (wait < 0)
523 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
524 return r;
525 }
526
527 static int __devinit eepro100_init_one (struct pci_dev *pdev,
528 const struct pci_device_id *ent)
529 {
530 void __iomem *ioaddr;
531 int irq, pci_bar;
532 int acpi_idle_state = 0, pm;
533 static int cards_found /* = 0 */;
534 unsigned long pci_base;
535
536 #ifndef MODULE
537 /* when built-in, we only print version if device is found */
538 static int did_version;
539 if (did_version++ == 0)
540 printk(version);
541 #endif
542
543 /* save power state before pci_enable_device overwrites it */
544 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
545 if (pm) {
546 u16 pwr_command;
547 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
548 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
549 }
550
551 if (pci_enable_device(pdev))
552 goto err_out_free_mmio_region;
553
554 pci_set_master(pdev);
555
556 if (!request_region(pci_resource_start(pdev, 1),
557 pci_resource_len(pdev, 1), "eepro100")) {
558 dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
559 goto err_out_none;
560 }
561 if (!request_mem_region(pci_resource_start(pdev, 0),
562 pci_resource_len(pdev, 0), "eepro100")) {
563 dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
564 goto err_out_free_pio_region;
565 }
566
567 irq = pdev->irq;
568 pci_bar = use_io ? 1 : 0;
569 pci_base = pci_resource_start(pdev, pci_bar);
570 if (DEBUG & NETIF_MSG_PROBE)
571 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
572 pci_base, irq);
573
574 ioaddr = pci_iomap(pdev, pci_bar, 0);
575 if (!ioaddr) {
576 dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
577 goto err_out_free_mmio_region;
578 }
579
580 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
581 cards_found++;
582 else
583 goto err_out_iounmap;
584
585 return 0;
586
587 err_out_iounmap: ;
588 pci_iounmap(pdev, ioaddr);
589 err_out_free_mmio_region:
590 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
591 err_out_free_pio_region:
592 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
593 err_out_none:
594 return -ENODEV;
595 }
596
597 #ifdef CONFIG_NET_POLL_CONTROLLER
598 /*
599 * Polling 'interrupt' - used by things like netconsole to send skbs
600 * without having to re-enable interrupts. It's not called while
601 * the interrupt routine is executing.
602 */
603
604 static void poll_speedo (struct net_device *dev)
605 {
606 /* disable_irq is not very nice, but with the funny lockless design
607 we have no other choice. */
608 disable_irq(dev->irq);
609 speedo_interrupt (dev->irq, dev);
610 enable_irq(dev->irq);
611 }
612 #endif
613
614 static int __devinit speedo_found1(struct pci_dev *pdev,
615 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
616 {
617 struct net_device *dev;
618 struct speedo_private *sp;
619 const char *product;
620 int i, option;
621 u16 eeprom[0x100];
622 int size;
623 void *tx_ring_space;
624 dma_addr_t tx_ring_dma;
625
626 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
627 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
628 if (tx_ring_space == NULL)
629 return -1;
630
631 dev = alloc_etherdev(sizeof(struct speedo_private));
632 if (dev == NULL) {
633 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
634 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
635 return -1;
636 }
637
638 SET_NETDEV_DEV(dev, &pdev->dev);
639
640 if (dev->mem_start > 0)
641 option = dev->mem_start;
642 else if (card_idx >= 0 && options[card_idx] >= 0)
643 option = options[card_idx];
644 else
645 option = 0;
646
647 rtnl_lock();
648 if (dev_alloc_name(dev, dev->name) < 0)
649 goto err_free_unlock;
650
651 /* Read the station address EEPROM before doing the reset.
652 Nominally his should even be done before accepting the device, but
653 then we wouldn't have a device name with which to report the error.
654 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
655 */
656 {
657 void __iomem *iobase;
658 int read_cmd, ee_size;
659 u16 sum;
660 int j;
661
662 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
663 requirements. */
664 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
665 if (!iobase)
666 goto err_free_unlock;
667 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
668 == 0xffe0000) {
669 ee_size = 0x100;
670 read_cmd = EE_READ_CMD << 24;
671 } else {
672 ee_size = 0x40;
673 read_cmd = EE_READ_CMD << 22;
674 }
675
676 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
677 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
678 eeprom[i] = value;
679 sum += value;
680 if (i < 3) {
681 dev->dev_addr[j++] = value;
682 dev->dev_addr[j++] = value >> 8;
683 }
684 }
685 if (sum != 0xBABA)
686 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
687 "check settings before activating this device!\n",
688 dev->name, sum);
689 /* Don't unregister_netdev(dev); as the EEPro may actually be
690 usable, especially if the MAC address is set later.
691 On the other hand, it may be unusable if MDI data is corrupted. */
692
693 pci_iounmap(pdev, iobase);
694 }
695
696 /* Reset the chip: stop Tx and Rx processes and clear counters.
697 This takes less than 10usec and will easily finish before the next
698 action. */
699 iowrite32(PortReset, ioaddr + SCBPort);
700 ioread32(ioaddr + SCBPort);
701 udelay(10);
702
703 if (eeprom[3] & 0x0100)
704 product = "OEM i82557/i82558 10/100 Ethernet";
705 else
706 product = pci_name(pdev);
707
708 printk(KERN_INFO "%s: %s, ", dev->name, product);
709
710 for (i = 0; i < 5; i++)
711 printk("%2.2X:", dev->dev_addr[i]);
712 printk("%2.2X, ", dev->dev_addr[i]);
713 printk("IRQ %d.\n", pdev->irq);
714
715 sp = netdev_priv(dev);
716
717 /* we must initialize this early, for mdio_{read,write} */
718 sp->regs = ioaddr;
719
720 #if 1 || defined(kernel_bloat)
721 /* OK, this is pure kernel bloat. I don't like it when other drivers
722 waste non-pageable kernel space to emit similar messages, but I need
723 them for bug reports. */
724 {
725 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
726 /* The self-test results must be paragraph aligned. */
727 volatile s32 *self_test_results;
728 int boguscnt = 16000; /* Timeout for set-test. */
729 if ((eeprom[3] & 0x03) != 0x03)
730 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
731 " work-around.\n");
732 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
733 " connectors present:",
734 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
735 for (i = 0; i < 4; i++)
736 if (eeprom[5] & (1<<i))
737 printk(connectors[i]);
738 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
739 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
740 if (eeprom[7] & 0x0700)
741 printk(KERN_INFO " Secondary interface chip %s.\n",
742 phys[(eeprom[7]>>8)&7]);
743 if (((eeprom[6]>>8) & 0x3f) == DP83840
744 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
745 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
746 if (congenb)
747 mdi_reg23 |= 0x0100;
748 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
749 mdi_reg23);
750 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
751 }
752 if ((option >= 0) && (option & 0x70)) {
753 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
754 (option & 0x20 ? 100 : 10),
755 (option & 0x10 ? "full" : "half"));
756 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
757 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
758 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
759 }
760
761 /* Perform a system self-test. */
762 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
763 self_test_results[0] = 0;
764 self_test_results[1] = -1;
765 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
766 do {
767 udelay(10);
768 } while (self_test_results[1] == -1 && --boguscnt >= 0);
769
770 if (boguscnt < 0) { /* Test optimized out. */
771 printk(KERN_ERR "Self test failed, status %8.8x:\n"
772 KERN_ERR " Failure to initialize the i82557.\n"
773 KERN_ERR " Verify that the card is a bus-master"
774 " capable slot.\n",
775 self_test_results[1]);
776 } else
777 printk(KERN_INFO " General self-test: %s.\n"
778 KERN_INFO " Serial sub-system self-test: %s.\n"
779 KERN_INFO " Internal registers self-test: %s.\n"
780 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
781 self_test_results[1] & 0x1000 ? "failed" : "passed",
782 self_test_results[1] & 0x0020 ? "failed" : "passed",
783 self_test_results[1] & 0x0008 ? "failed" : "passed",
784 self_test_results[1] & 0x0004 ? "failed" : "passed",
785 self_test_results[0]);
786 }
787 #endif /* kernel_bloat */
788
789 iowrite32(PortReset, ioaddr + SCBPort);
790 ioread32(ioaddr + SCBPort);
791 udelay(10);
792
793 /* Return the chip to its original power state. */
794 pci_set_power_state(pdev, acpi_idle_state);
795
796 pci_set_drvdata (pdev, dev);
797 SET_NETDEV_DEV(dev, &pdev->dev);
798
799 dev->irq = pdev->irq;
800
801 sp->pdev = pdev;
802 sp->msg_enable = DEBUG;
803 sp->acpi_pwr = acpi_idle_state;
804 sp->tx_ring = tx_ring_space;
805 sp->tx_ring_dma = tx_ring_dma;
806 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
807 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
808 init_timer(&sp->timer); /* used in ioctl() */
809 spin_lock_init(&sp->lock);
810
811 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
812 if (card_idx >= 0) {
813 if (full_duplex[card_idx] >= 0)
814 sp->mii_if.full_duplex = full_duplex[card_idx];
815 }
816 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
817
818 sp->phy[0] = eeprom[6];
819 sp->phy[1] = eeprom[7];
820
821 sp->mii_if.phy_id = eeprom[6] & 0x1f;
822 sp->mii_if.phy_id_mask = 0x1f;
823 sp->mii_if.reg_num_mask = 0x1f;
824 sp->mii_if.dev = dev;
825 sp->mii_if.mdio_read = mdio_read;
826 sp->mii_if.mdio_write = mdio_write;
827
828 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
829 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
830 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
831 || (pdev->device == 0x245D)) {
832 sp->chip_id = 1;
833 }
834
835 if (sp->rx_bug)
836 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
837
838 /* The Speedo-specific entries in the device structure. */
839 dev->open = &speedo_open;
840 dev->hard_start_xmit = &speedo_start_xmit;
841 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
842 dev->stop = &speedo_close;
843 dev->get_stats = &speedo_get_stats;
844 dev->set_multicast_list = &set_rx_mode;
845 dev->do_ioctl = &speedo_ioctl;
846 SET_ETHTOOL_OPS(dev, &ethtool_ops);
847 #ifdef CONFIG_NET_POLL_CONTROLLER
848 dev->poll_controller = &poll_speedo;
849 #endif
850
851 if (register_netdevice(dev))
852 goto err_free_unlock;
853 rtnl_unlock();
854
855 return 0;
856
857 err_free_unlock:
858 rtnl_unlock();
859 free_netdev(dev);
860 return -1;
861 }
862
863 static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
864 {
865 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
866 int wait = 0;
867 do
868 if (ioread8(cmd_ioaddr) == 0) break;
869 while(++wait <= 200);
870 if (wait > 100)
871 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
872 ioread8(cmd_ioaddr), wait);
873
874 iowrite8(cmd, cmd_ioaddr);
875
876 for (wait = 0; wait <= 100; wait++)
877 if (ioread8(cmd_ioaddr) == 0) return;
878 for (; wait <= 20000; wait++)
879 if (ioread8(cmd_ioaddr) == 0) return;
880 else udelay(1);
881 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
882 " Current status %8.8x.\n",
883 cmd, wait, ioread32(sp->regs + SCBStatus));
884 }
885
886 /* Serial EEPROM section.
887 A "bit" grungy, but we work our way through bit-by-bit :->. */
888 /* EEPROM_Ctrl bits. */
889 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
890 #define EE_CS 0x02 /* EEPROM chip select. */
891 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
892 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
893 #define EE_ENB (0x4800 | EE_CS)
894 #define EE_WRITE_0 0x4802
895 #define EE_WRITE_1 0x4806
896 #define EE_OFFSET SCBeeprom
897
898 /* The fixes for the code were kindly provided by Dragan Stancevic
899 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
900 access timing.
901 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
902 interval for serial EEPROM. However, it looks like that there is an
903 additional requirement dictating larger udelay's in the code below.
904 2000/05/24 SAW */
905 static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
906 {
907 unsigned retval = 0;
908 void __iomem *ee_addr = ioaddr + SCBeeprom;
909
910 iowrite16(EE_ENB, ee_addr); udelay(2);
911 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
912
913 /* Shift the command bits out. */
914 do {
915 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
916 iowrite16(dataval, ee_addr); udelay(2);
917 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
918 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
919 } while (--cmd_len >= 0);
920 iowrite16(EE_ENB, ee_addr); udelay(2);
921
922 /* Terminate the EEPROM access. */
923 iowrite16(EE_ENB & ~EE_CS, ee_addr);
924 return retval;
925 }
926
927 static int mdio_read(struct net_device *dev, int phy_id, int location)
928 {
929 struct speedo_private *sp = netdev_priv(dev);
930 void __iomem *ioaddr = sp->regs;
931 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
932 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
933 do {
934 val = ioread32(ioaddr + SCBCtrlMDI);
935 if (--boguscnt < 0) {
936 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
937 break;
938 }
939 } while (! (val & 0x10000000));
940 return val & 0xffff;
941 }
942
943 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
944 {
945 struct speedo_private *sp = netdev_priv(dev);
946 void __iomem *ioaddr = sp->regs;
947 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
948 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
949 ioaddr + SCBCtrlMDI);
950 do {
951 val = ioread32(ioaddr + SCBCtrlMDI);
952 if (--boguscnt < 0) {
953 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
954 break;
955 }
956 } while (! (val & 0x10000000));
957 }
958
959 static int
960 speedo_open(struct net_device *dev)
961 {
962 struct speedo_private *sp = netdev_priv(dev);
963 void __iomem *ioaddr = sp->regs;
964 int retval;
965
966 if (netif_msg_ifup(sp))
967 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
968
969 pci_set_power_state(sp->pdev, PCI_D0);
970
971 /* Set up the Tx queue early.. */
972 sp->cur_tx = 0;
973 sp->dirty_tx = 0;
974 sp->last_cmd = NULL;
975 sp->tx_full = 0;
976 sp->in_interrupt = 0;
977
978 /* .. we can safely take handler calls during init. */
979 retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
980 if (retval) {
981 return retval;
982 }
983
984 dev->if_port = sp->default_port;
985
986 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
987 /* Retrigger negotiation to reset previous errors. */
988 if ((sp->phy[0] & 0x8000) == 0) {
989 int phy_addr = sp->phy[0] & 0x1f ;
990 /* Use 0x3300 for restarting NWay, other values to force xcvr:
991 0x0000 10-HD
992 0x0100 10-FD
993 0x2000 100-HD
994 0x2100 100-FD
995 */
996 #ifdef honor_default_port
997 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
998 #else
999 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1000 #endif
1001 }
1002 #endif
1003
1004 speedo_init_rx_ring(dev);
1005
1006 /* Fire up the hardware. */
1007 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1008 speedo_resume(dev);
1009
1010 netdevice_start(dev);
1011 netif_start_queue(dev);
1012
1013 /* Setup the chip and configure the multicast list. */
1014 sp->mc_setup_head = NULL;
1015 sp->mc_setup_tail = NULL;
1016 sp->flow_ctrl = sp->partner = 0;
1017 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1018 set_rx_mode(dev);
1019 if ((sp->phy[0] & 0x8000) == 0)
1020 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1021
1022 mii_check_link(&sp->mii_if);
1023
1024 if (netif_msg_ifup(sp)) {
1025 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1026 dev->name, ioread16(ioaddr + SCBStatus));
1027 }
1028
1029 /* Set the timer. The timer serves a dual purpose:
1030 1) to monitor the media interface (e.g. link beat) and perhaps switch
1031 to an alternate media type
1032 2) to monitor Rx activity, and restart the Rx process if the receiver
1033 hangs. */
1034 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1035 sp->timer.data = (unsigned long)dev;
1036 sp->timer.function = &speedo_timer; /* timer handler */
1037 add_timer(&sp->timer);
1038
1039 /* No need to wait for the command unit to accept here. */
1040 if ((sp->phy[0] & 0x8000) == 0)
1041 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1042
1043 return 0;
1044 }
1045
1046 /* Start the chip hardware after a full reset. */
1047 static void speedo_resume(struct net_device *dev)
1048 {
1049 struct speedo_private *sp = netdev_priv(dev);
1050 void __iomem *ioaddr = sp->regs;
1051
1052 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1053 sp->tx_threshold = 0x01208000;
1054
1055 /* Set the segment registers to '0'. */
1056 if (wait_for_cmd_done(dev, sp) != 0) {
1057 iowrite32(PortPartialReset, ioaddr + SCBPort);
1058 udelay(10);
1059 }
1060
1061 iowrite32(0, ioaddr + SCBPointer);
1062 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1063 udelay(10); /* Bogus, but it avoids the bug. */
1064
1065 /* Note: these next two operations can take a while. */
1066 do_slow_command(dev, sp, RxAddrLoad);
1067 do_slow_command(dev, sp, CUCmdBase);
1068
1069 /* Load the statistics block and rx ring addresses. */
1070 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1071 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1072
1073 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1074 sp->lstats->done_marker = 0;
1075 wait_for_cmd_done(dev, sp);
1076
1077 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1078 if (netif_msg_rx_err(sp))
1079 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1080 dev->name);
1081 } else {
1082 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1083 ioaddr + SCBPointer);
1084 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1085 }
1086
1087 /* Note: RxStart should complete instantly. */
1088 do_slow_command(dev, sp, RxStart);
1089 do_slow_command(dev, sp, CUDumpStats);
1090
1091 /* Fill the first command with our physical address. */
1092 {
1093 struct descriptor *ias_cmd;
1094
1095 ias_cmd =
1096 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1097 /* Avoid a bug(?!) here by marking the command already completed. */
1098 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1099 ias_cmd->link =
1100 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1101 memcpy(ias_cmd->params, dev->dev_addr, 6);
1102 if (sp->last_cmd)
1103 clear_suspend(sp->last_cmd);
1104 sp->last_cmd = ias_cmd;
1105 }
1106
1107 /* Start the chip's Tx process and unmask interrupts. */
1108 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1109 ioaddr + SCBPointer);
1110 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1111 remain masked --Dragan */
1112 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1113 }
1114
1115 /*
1116 * Sometimes the receiver stops making progress. This routine knows how to
1117 * get it going again, without losing packets or being otherwise nasty like
1118 * a chip reset would be. Previously the driver had a whole sequence
1119 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1120 * do another, etc. But those things don't really matter. Separate logic
1121 * in the ISR provides for allocating buffers--the other half of operation
1122 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1123 * This problem with the old, more involved algorithm is shown up under
1124 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1125 */
1126 static void
1127 speedo_rx_soft_reset(struct net_device *dev)
1128 {
1129 struct speedo_private *sp = netdev_priv(dev);
1130 struct RxFD *rfd;
1131 void __iomem *ioaddr;
1132
1133 ioaddr = sp->regs;
1134 if (wait_for_cmd_done(dev, sp) != 0) {
1135 printk("%s: previous command stalled\n", dev->name);
1136 return;
1137 }
1138 /*
1139 * Put the hardware into a known state.
1140 */
1141 iowrite8(RxAbort, ioaddr + SCBCmd);
1142
1143 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1144
1145 rfd->rx_buf_addr = 0xffffffff;
1146
1147 if (wait_for_cmd_done(dev, sp) != 0) {
1148 printk("%s: RxAbort command stalled\n", dev->name);
1149 return;
1150 }
1151 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1152 ioaddr + SCBPointer);
1153 iowrite8(RxStart, ioaddr + SCBCmd);
1154 }
1155
1156
1157 /* Media monitoring and control. */
1158 static void speedo_timer(unsigned long data)
1159 {
1160 struct net_device *dev = (struct net_device *)data;
1161 struct speedo_private *sp = netdev_priv(dev);
1162 void __iomem *ioaddr = sp->regs;
1163 int phy_num = sp->phy[0] & 0x1f;
1164
1165 /* We have MII and lost link beat. */
1166 if ((sp->phy[0] & 0x8000) == 0) {
1167 int partner = mdio_read(dev, phy_num, MII_LPA);
1168 if (partner != sp->partner) {
1169 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1170 if (netif_msg_link(sp)) {
1171 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1172 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1173 dev->name, sp->partner, partner, sp->mii_if.advertising);
1174 }
1175 sp->partner = partner;
1176 if (flow_ctrl != sp->flow_ctrl) {
1177 sp->flow_ctrl = flow_ctrl;
1178 sp->rx_mode = -1; /* Trigger a reload. */
1179 }
1180 }
1181 }
1182 mii_check_link(&sp->mii_if);
1183 if (netif_msg_timer(sp)) {
1184 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1185 dev->name, ioread16(ioaddr + SCBStatus));
1186 }
1187 if (sp->rx_mode < 0 ||
1188 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1189 /* We haven't received a packet in a Long Time. We might have been
1190 bitten by the receiver hang bug. This can be cleared by sending
1191 a set multicast list command. */
1192 if (netif_msg_timer(sp))
1193 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1194 " from a timer routine,"
1195 " m=%d, j=%ld, l=%ld.\n",
1196 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1197 set_rx_mode(dev);
1198 }
1199 /* We must continue to monitor the media. */
1200 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1201 add_timer(&sp->timer);
1202 }
1203
1204 static void speedo_show_state(struct net_device *dev)
1205 {
1206 struct speedo_private *sp = netdev_priv(dev);
1207 int i;
1208
1209 if (netif_msg_pktdata(sp)) {
1210 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1211 dev->name, sp->cur_tx, sp->dirty_tx);
1212 for (i = 0; i < TX_RING_SIZE; i++)
1213 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1214 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1215 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1216 i, sp->tx_ring[i].status);
1217
1218 printk(KERN_DEBUG "%s: Printing Rx ring"
1219 " (next to receive into %u, dirty index %u).\n",
1220 dev->name, sp->cur_rx, sp->dirty_rx);
1221 for (i = 0; i < RX_RING_SIZE; i++)
1222 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1223 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1224 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1225 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1226 i, (sp->rx_ringp[i] != NULL) ?
1227 (unsigned)sp->rx_ringp[i]->status : 0);
1228 }
1229
1230 #if 0
1231 {
1232 void __iomem *ioaddr = sp->regs;
1233 int phy_num = sp->phy[0] & 0x1f;
1234 for (i = 0; i < 16; i++) {
1235 /* FIXME: what does it mean? --SAW */
1236 if (i == 6) i = 21;
1237 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1238 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1239 }
1240 }
1241 #endif
1242
1243 }
1244
1245 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1246 static void
1247 speedo_init_rx_ring(struct net_device *dev)
1248 {
1249 struct speedo_private *sp = netdev_priv(dev);
1250 struct RxFD *rxf, *last_rxf = NULL;
1251 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1252 int i;
1253
1254 sp->cur_rx = 0;
1255
1256 for (i = 0; i < RX_RING_SIZE; i++) {
1257 struct sk_buff *skb;
1258 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1259 if (skb)
1260 rx_align(skb); /* Align IP on 16 byte boundary */
1261 sp->rx_skbuff[i] = skb;
1262 if (skb == NULL)
1263 break; /* OK. Just initially short of Rx bufs. */
1264 skb->dev = dev; /* Mark as being used by this device. */
1265 rxf = (struct RxFD *)skb->data;
1266 sp->rx_ringp[i] = rxf;
1267 sp->rx_ring_dma[i] =
1268 pci_map_single(sp->pdev, rxf,
1269 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1270 skb_reserve(skb, sizeof(struct RxFD));
1271 if (last_rxf) {
1272 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1273 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1274 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1275 }
1276 last_rxf = rxf;
1277 last_rxf_dma = sp->rx_ring_dma[i];
1278 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1279 rxf->link = 0; /* None yet. */
1280 /* This field unused by i82557. */
1281 rxf->rx_buf_addr = 0xffffffff;
1282 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1283 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1284 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1285 }
1286 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1287 /* Mark the last entry as end-of-list. */
1288 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1289 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1290 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1291 sp->last_rxf = last_rxf;
1292 sp->last_rxf_dma = last_rxf_dma;
1293 }
1294
1295 static void speedo_purge_tx(struct net_device *dev)
1296 {
1297 struct speedo_private *sp = netdev_priv(dev);
1298 int entry;
1299
1300 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1301 entry = sp->dirty_tx % TX_RING_SIZE;
1302 if (sp->tx_skbuff[entry]) {
1303 sp->stats.tx_errors++;
1304 pci_unmap_single(sp->pdev,
1305 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1306 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1307 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1308 sp->tx_skbuff[entry] = NULL;
1309 }
1310 sp->dirty_tx++;
1311 }
1312 while (sp->mc_setup_head != NULL) {
1313 struct speedo_mc_block *t;
1314 if (netif_msg_tx_err(sp))
1315 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1316 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1317 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1318 t = sp->mc_setup_head->next;
1319 kfree(sp->mc_setup_head);
1320 sp->mc_setup_head = t;
1321 }
1322 sp->mc_setup_tail = NULL;
1323 sp->tx_full = 0;
1324 netif_wake_queue(dev);
1325 }
1326
1327 static void reset_mii(struct net_device *dev)
1328 {
1329 struct speedo_private *sp = netdev_priv(dev);
1330
1331 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1332 if ((sp->phy[0] & 0x8000) == 0) {
1333 int phy_addr = sp->phy[0] & 0x1f;
1334 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1335 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1336 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1337 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1338 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1339 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1340 #ifdef honor_default_port
1341 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1342 #else
1343 mdio_read(dev, phy_addr, MII_BMCR);
1344 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1345 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1346 #endif
1347 }
1348 }
1349
1350 static void speedo_tx_timeout(struct net_device *dev)
1351 {
1352 struct speedo_private *sp = netdev_priv(dev);
1353 void __iomem *ioaddr = sp->regs;
1354 int status = ioread16(ioaddr + SCBStatus);
1355 unsigned long flags;
1356
1357 if (netif_msg_tx_err(sp)) {
1358 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1359 " %4.4x at %d/%d command %8.8x.\n",
1360 dev->name, status, ioread16(ioaddr + SCBCmd),
1361 sp->dirty_tx, sp->cur_tx,
1362 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1363
1364 }
1365 speedo_show_state(dev);
1366 #if 0
1367 if ((status & 0x00C0) != 0x0080
1368 && (status & 0x003C) == 0x0010) {
1369 /* Only the command unit has stopped. */
1370 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1371 dev->name);
1372 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1373 ioaddr + SCBPointer);
1374 iowrite16(CUStart, ioaddr + SCBCmd);
1375 reset_mii(dev);
1376 } else {
1377 #else
1378 {
1379 #endif
1380 del_timer_sync(&sp->timer);
1381 /* Reset the Tx and Rx units. */
1382 iowrite32(PortReset, ioaddr + SCBPort);
1383 /* We may get spurious interrupts here. But I don't think that they
1384 may do much harm. 1999/12/09 SAW */
1385 udelay(10);
1386 /* Disable interrupts. */
1387 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1388 synchronize_irq(dev->irq);
1389 speedo_tx_buffer_gc(dev);
1390 /* Free as much as possible.
1391 It helps to recover from a hang because of out-of-memory.
1392 It also simplifies speedo_resume() in case TX ring is full or
1393 close-to-be full. */
1394 speedo_purge_tx(dev);
1395 speedo_refill_rx_buffers(dev, 1);
1396 spin_lock_irqsave(&sp->lock, flags);
1397 speedo_resume(dev);
1398 sp->rx_mode = -1;
1399 dev->trans_start = jiffies;
1400 spin_unlock_irqrestore(&sp->lock, flags);
1401 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1402 /* Reset MII transceiver. Do it before starting the timer to serialize
1403 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1404 reset_mii(dev);
1405 sp->timer.expires = RUN_AT(2*HZ);
1406 add_timer(&sp->timer);
1407 }
1408 return;
1409 }
1410
1411 static int
1412 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1413 {
1414 struct speedo_private *sp = netdev_priv(dev);
1415 void __iomem *ioaddr = sp->regs;
1416 int entry;
1417
1418 /* Prevent interrupts from changing the Tx ring from underneath us. */
1419 unsigned long flags;
1420
1421 spin_lock_irqsave(&sp->lock, flags);
1422
1423 /* Check if there are enough space. */
1424 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1425 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1426 netif_stop_queue(dev);
1427 sp->tx_full = 1;
1428 spin_unlock_irqrestore(&sp->lock, flags);
1429 return 1;
1430 }
1431
1432 /* Calculate the Tx descriptor entry. */
1433 entry = sp->cur_tx++ % TX_RING_SIZE;
1434
1435 sp->tx_skbuff[entry] = skb;
1436 sp->tx_ring[entry].status =
1437 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1438 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1439 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1440 sp->tx_ring[entry].link =
1441 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1442 sp->tx_ring[entry].tx_desc_addr =
1443 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1444 /* The data region is always in one buffer descriptor. */
1445 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1446 sp->tx_ring[entry].tx_buf_addr0 =
1447 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1448 skb->len, PCI_DMA_TODEVICE));
1449 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1450
1451 /* workaround for hardware bug on 10 mbit half duplex */
1452
1453 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1454 wait_for_cmd_done(dev, sp);
1455 iowrite8(0 , ioaddr + SCBCmd);
1456 udelay(1);
1457 }
1458
1459 /* Trigger the command unit resume. */
1460 wait_for_cmd_done(dev, sp);
1461 clear_suspend(sp->last_cmd);
1462 /* We want the time window between clearing suspend flag on the previous
1463 command and resuming CU to be as small as possible.
1464 Interrupts in between are very undesired. --SAW */
1465 iowrite8(CUResume, ioaddr + SCBCmd);
1466 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1467
1468 /* Leave room for set_rx_mode(). If there is no more space than reserved
1469 for multicast filter mark the ring as full. */
1470 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1471 netif_stop_queue(dev);
1472 sp->tx_full = 1;
1473 }
1474
1475 spin_unlock_irqrestore(&sp->lock, flags);
1476
1477 dev->trans_start = jiffies;
1478
1479 return 0;
1480 }
1481
1482 static void speedo_tx_buffer_gc(struct net_device *dev)
1483 {
1484 unsigned int dirty_tx;
1485 struct speedo_private *sp = netdev_priv(dev);
1486
1487 dirty_tx = sp->dirty_tx;
1488 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1489 int entry = dirty_tx % TX_RING_SIZE;
1490 int status = le32_to_cpu(sp->tx_ring[entry].status);
1491
1492 if (netif_msg_tx_done(sp))
1493 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1494 entry, status);
1495 if ((status & StatusComplete) == 0)
1496 break; /* It still hasn't been processed. */
1497 if (status & TxUnderrun)
1498 if (sp->tx_threshold < 0x01e08000) {
1499 if (netif_msg_tx_err(sp))
1500 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1501 dev->name);
1502 sp->tx_threshold += 0x00040000;
1503 }
1504 /* Free the original skb. */
1505 if (sp->tx_skbuff[entry]) {
1506 sp->stats.tx_packets++; /* Count only user packets. */
1507 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1508 pci_unmap_single(sp->pdev,
1509 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1510 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1511 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1512 sp->tx_skbuff[entry] = NULL;
1513 }
1514 dirty_tx++;
1515 }
1516
1517 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1518 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1519 " full=%d.\n",
1520 dirty_tx, sp->cur_tx, sp->tx_full);
1521 dirty_tx += TX_RING_SIZE;
1522 }
1523
1524 while (sp->mc_setup_head != NULL
1525 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1526 struct speedo_mc_block *t;
1527 if (netif_msg_tx_err(sp))
1528 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1529 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1530 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1531 t = sp->mc_setup_head->next;
1532 kfree(sp->mc_setup_head);
1533 sp->mc_setup_head = t;
1534 }
1535 if (sp->mc_setup_head == NULL)
1536 sp->mc_setup_tail = NULL;
1537
1538 sp->dirty_tx = dirty_tx;
1539 }
1540
1541 /* The interrupt handler does all of the Rx thread work and cleans up
1542 after the Tx thread. */
1543 static irqreturn_t speedo_interrupt(int irq, void *dev_instance)
1544 {
1545 struct net_device *dev = (struct net_device *)dev_instance;
1546 struct speedo_private *sp;
1547 void __iomem *ioaddr;
1548 long boguscnt = max_interrupt_work;
1549 unsigned short status;
1550 unsigned int handled = 0;
1551
1552 sp = netdev_priv(dev);
1553 ioaddr = sp->regs;
1554
1555 #ifndef final_version
1556 /* A lock to prevent simultaneous entry on SMP machines. */
1557 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1558 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1559 dev->name);
1560 sp->in_interrupt = 0; /* Avoid halting machine. */
1561 return IRQ_NONE;
1562 }
1563 #endif
1564
1565 do {
1566 status = ioread16(ioaddr + SCBStatus);
1567 /* Acknowledge all of the current interrupt sources ASAP. */
1568 /* Will change from 0xfc00 to 0xff00 when we start handling
1569 FCP and ER interrupts --Dragan */
1570 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1571
1572 if (netif_msg_intr(sp))
1573 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1574 dev->name, status);
1575
1576 if ((status & 0xfc00) == 0)
1577 break;
1578 handled = 1;
1579
1580
1581 if ((status & 0x5000) || /* Packet received, or Rx error. */
1582 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1583 /* Need to gather the postponed packet. */
1584 speedo_rx(dev);
1585
1586 /* Always check if all rx buffers are allocated. --SAW */
1587 speedo_refill_rx_buffers(dev, 0);
1588
1589 spin_lock(&sp->lock);
1590 /*
1591 * The chip may have suspended reception for various reasons.
1592 * Check for that, and re-prime it should this be the case.
1593 */
1594 switch ((status >> 2) & 0xf) {
1595 case 0: /* Idle */
1596 break;
1597 case 1: /* Suspended */
1598 case 2: /* No resources (RxFDs) */
1599 case 9: /* Suspended with no more RBDs */
1600 case 10: /* No resources due to no RBDs */
1601 case 12: /* Ready with no RBDs */
1602 speedo_rx_soft_reset(dev);
1603 break;
1604 case 3: case 5: case 6: case 7: case 8:
1605 case 11: case 13: case 14: case 15:
1606 /* these are all reserved values */
1607 break;
1608 }
1609
1610
1611 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1612 if (status & 0xA400) {
1613 speedo_tx_buffer_gc(dev);
1614 if (sp->tx_full
1615 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1616 /* The ring is no longer full. */
1617 sp->tx_full = 0;
1618 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1619 }
1620 }
1621
1622 spin_unlock(&sp->lock);
1623
1624 if (--boguscnt < 0) {
1625 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1626 dev->name, status);
1627 /* Clear all interrupt sources. */
1628 /* Will change from 0xfc00 to 0xff00 when we start handling
1629 FCP and ER interrupts --Dragan */
1630 iowrite16(0xfc00, ioaddr + SCBStatus);
1631 break;
1632 }
1633 } while (1);
1634
1635 if (netif_msg_intr(sp))
1636 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1637 dev->name, ioread16(ioaddr + SCBStatus));
1638
1639 clear_bit(0, (void*)&sp->in_interrupt);
1640 return IRQ_RETVAL(handled);
1641 }
1642
1643 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1644 {
1645 struct speedo_private *sp = netdev_priv(dev);
1646 struct RxFD *rxf;
1647 struct sk_buff *skb;
1648 /* Get a fresh skbuff to replace the consumed one. */
1649 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1650 if (skb)
1651 rx_align(skb); /* Align IP on 16 byte boundary */
1652 sp->rx_skbuff[entry] = skb;
1653 if (skb == NULL) {
1654 sp->rx_ringp[entry] = NULL;
1655 return NULL;
1656 }
1657 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1658 sp->rx_ring_dma[entry] =
1659 pci_map_single(sp->pdev, rxf,
1660 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1661 skb->dev = dev;
1662 skb_reserve(skb, sizeof(struct RxFD));
1663 rxf->rx_buf_addr = 0xffffffff;
1664 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1665 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1666 return rxf;
1667 }
1668
1669 static inline void speedo_rx_link(struct net_device *dev, int entry,
1670 struct RxFD *rxf, dma_addr_t rxf_dma)
1671 {
1672 struct speedo_private *sp = netdev_priv(dev);
1673 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1674 rxf->link = 0; /* None yet. */
1675 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1676 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1677 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1678 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1679 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1680 sp->last_rxf = rxf;
1681 sp->last_rxf_dma = rxf_dma;
1682 }
1683
1684 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1685 {
1686 struct speedo_private *sp = netdev_priv(dev);
1687 int entry;
1688 struct RxFD *rxf;
1689
1690 entry = sp->dirty_rx % RX_RING_SIZE;
1691 if (sp->rx_skbuff[entry] == NULL) {
1692 rxf = speedo_rx_alloc(dev, entry);
1693 if (rxf == NULL) {
1694 unsigned int forw;
1695 int forw_entry;
1696 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1697 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1698 dev->name, force);
1699 sp->rx_ring_state |= RrOOMReported;
1700 }
1701 speedo_show_state(dev);
1702 if (!force)
1703 return -1; /* Better luck next time! */
1704 /* Borrow an skb from one of next entries. */
1705 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1706 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1707 break;
1708 if (forw == sp->cur_rx)
1709 return -1;
1710 forw_entry = forw % RX_RING_SIZE;
1711 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1712 sp->rx_skbuff[forw_entry] = NULL;
1713 rxf = sp->rx_ringp[forw_entry];
1714 sp->rx_ringp[forw_entry] = NULL;
1715 sp->rx_ringp[entry] = rxf;
1716 }
1717 } else {
1718 rxf = sp->rx_ringp[entry];
1719 }
1720 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1721 sp->dirty_rx++;
1722 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1723 return 0;
1724 }
1725
1726 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1727 {
1728 struct speedo_private *sp = netdev_priv(dev);
1729
1730 /* Refill the RX ring. */
1731 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1732 speedo_refill_rx_buf(dev, force) != -1);
1733 }
1734
1735 static int
1736 speedo_rx(struct net_device *dev)
1737 {
1738 struct speedo_private *sp = netdev_priv(dev);
1739 int entry = sp->cur_rx % RX_RING_SIZE;
1740 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1741 int alloc_ok = 1;
1742 int npkts = 0;
1743
1744 if (netif_msg_intr(sp))
1745 printk(KERN_DEBUG " In speedo_rx().\n");
1746 /* If we own the next entry, it's a new packet. Send it up. */
1747 while (sp->rx_ringp[entry] != NULL) {
1748 int status;
1749 int pkt_len;
1750
1751 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1752 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1753 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1754 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1755
1756 if (!(status & RxComplete))
1757 break;
1758
1759 if (--rx_work_limit < 0)
1760 break;
1761
1762 /* Check for a rare out-of-memory case: the current buffer is
1763 the last buffer allocated in the RX ring. --SAW */
1764 if (sp->last_rxf == sp->rx_ringp[entry]) {
1765 /* Postpone the packet. It'll be reaped at an interrupt when this
1766 packet is no longer the last packet in the ring. */
1767 if (netif_msg_rx_err(sp))
1768 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1769 dev->name);
1770 sp->rx_ring_state |= RrPostponed;
1771 break;
1772 }
1773
1774 if (netif_msg_rx_status(sp))
1775 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1776 pkt_len);
1777 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1778 if (status & RxErrTooBig)
1779 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1780 "status %8.8x!\n", dev->name, status);
1781 else if (! (status & RxOK)) {
1782 /* There was a fatal error. This *should* be impossible. */
1783 sp->stats.rx_errors++;
1784 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1785 "status %8.8x.\n",
1786 dev->name, status);
1787 }
1788 } else {
1789 struct sk_buff *skb;
1790
1791 /* Check if the packet is long enough to just accept without
1792 copying to a properly sized skbuff. */
1793 if (pkt_len < rx_copybreak
1794 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1795 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1796 /* 'skb_put()' points to the start of sk_buff data area. */
1797 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1798 sizeof(struct RxFD) + pkt_len,
1799 PCI_DMA_FROMDEVICE);
1800
1801 #if 1 || USE_IP_CSUM
1802 /* Packet is in one chunk -- we can copy + cksum. */
1803 skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
1804 skb_put(skb, pkt_len);
1805 #else
1806 skb_copy_from_linear_data(sp->rx_skbuff[entry],
1807 skb_put(skb, pkt_len),
1808 pkt_len);
1809 #endif
1810 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1811 sizeof(struct RxFD) + pkt_len,
1812 PCI_DMA_FROMDEVICE);
1813 npkts++;
1814 } else {
1815 /* Pass up the already-filled skbuff. */
1816 skb = sp->rx_skbuff[entry];
1817 if (skb == NULL) {
1818 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1819 dev->name);
1820 break;
1821 }
1822 sp->rx_skbuff[entry] = NULL;
1823 skb_put(skb, pkt_len);
1824 npkts++;
1825 sp->rx_ringp[entry] = NULL;
1826 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1827 PKT_BUF_SZ + sizeof(struct RxFD),
1828 PCI_DMA_FROMDEVICE);
1829 }
1830 skb->protocol = eth_type_trans(skb, dev);
1831 netif_rx(skb);
1832 dev->last_rx = jiffies;
1833 sp->stats.rx_packets++;
1834 sp->stats.rx_bytes += pkt_len;
1835 }
1836 entry = (++sp->cur_rx) % RX_RING_SIZE;
1837 sp->rx_ring_state &= ~RrPostponed;
1838 /* Refill the recently taken buffers.
1839 Do it one-by-one to handle traffic bursts better. */
1840 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1841 alloc_ok = 0;
1842 }
1843
1844 /* Try hard to refill the recently taken buffers. */
1845 speedo_refill_rx_buffers(dev, 1);
1846
1847 if (npkts)
1848 sp->last_rx_time = jiffies;
1849
1850 return 0;
1851 }
1852
1853 static int
1854 speedo_close(struct net_device *dev)
1855 {
1856 struct speedo_private *sp = netdev_priv(dev);
1857 void __iomem *ioaddr = sp->regs;
1858 int i;
1859
1860 netdevice_stop(dev);
1861 netif_stop_queue(dev);
1862
1863 if (netif_msg_ifdown(sp))
1864 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1865 dev->name, ioread16(ioaddr + SCBStatus));
1866
1867 /* Shut off the media monitoring timer. */
1868 del_timer_sync(&sp->timer);
1869
1870 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1871
1872 /* Shutting down the chip nicely fails to disable flow control. So.. */
1873 iowrite32(PortPartialReset, ioaddr + SCBPort);
1874 ioread32(ioaddr + SCBPort); /* flush posted write */
1875 /*
1876 * The chip requires a 10 microsecond quiet period. Wait here!
1877 */
1878 udelay(10);
1879
1880 free_irq(dev->irq, dev);
1881 speedo_show_state(dev);
1882
1883 /* Free all the skbuffs in the Rx and Tx queues. */
1884 for (i = 0; i < RX_RING_SIZE; i++) {
1885 struct sk_buff *skb = sp->rx_skbuff[i];
1886 sp->rx_skbuff[i] = NULL;
1887 /* Clear the Rx descriptors. */
1888 if (skb) {
1889 pci_unmap_single(sp->pdev,
1890 sp->rx_ring_dma[i],
1891 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1892 dev_kfree_skb(skb);
1893 }
1894 }
1895
1896 for (i = 0; i < TX_RING_SIZE; i++) {
1897 struct sk_buff *skb = sp->tx_skbuff[i];
1898 sp->tx_skbuff[i] = NULL;
1899 /* Clear the Tx descriptors. */
1900 if (skb) {
1901 pci_unmap_single(sp->pdev,
1902 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1903 skb->len, PCI_DMA_TODEVICE);
1904 dev_kfree_skb(skb);
1905 }
1906 }
1907
1908 /* Free multicast setting blocks. */
1909 for (i = 0; sp->mc_setup_head != NULL; i++) {
1910 struct speedo_mc_block *t;
1911 t = sp->mc_setup_head->next;
1912 kfree(sp->mc_setup_head);
1913 sp->mc_setup_head = t;
1914 }
1915 sp->mc_setup_tail = NULL;
1916 if (netif_msg_ifdown(sp))
1917 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1918
1919 pci_set_power_state(sp->pdev, PCI_D2);
1920
1921 return 0;
1922 }
1923
1924 /* The Speedo-3 has an especially awkward and unusable method of getting
1925 statistics out of the chip. It takes an unpredictable length of time
1926 for the dump-stats command to complete. To avoid a busy-wait loop we
1927 update the stats with the previous dump results, and then trigger a
1928 new dump.
1929
1930 Oh, and incoming frames are dropped while executing dump-stats!
1931 */
1932 static struct net_device_stats *
1933 speedo_get_stats(struct net_device *dev)
1934 {
1935 struct speedo_private *sp = netdev_priv(dev);
1936 void __iomem *ioaddr = sp->regs;
1937
1938 /* Update only if the previous dump finished. */
1939 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1940 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1941 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1942 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1943 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1944 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1945 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1946 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1947 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1948 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1949 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1950 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1951 sp->lstats->done_marker = 0x0000;
1952 if (netif_running(dev)) {
1953 unsigned long flags;
1954 /* Take a spinlock to make wait_for_cmd_done and sending the
1955 command atomic. --SAW */
1956 spin_lock_irqsave(&sp->lock, flags);
1957 wait_for_cmd_done(dev, sp);
1958 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1959 spin_unlock_irqrestore(&sp->lock, flags);
1960 }
1961 }
1962 return &sp->stats;
1963 }
1964
1965 static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1966 {
1967 struct speedo_private *sp = netdev_priv(dev);
1968 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1969 strncpy(info->version, version, sizeof(info->version)-1);
1970 if (sp->pdev)
1971 strcpy(info->bus_info, pci_name(sp->pdev));
1972 }
1973
1974 static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1975 {
1976 struct speedo_private *sp = netdev_priv(dev);
1977 spin_lock_irq(&sp->lock);
1978 mii_ethtool_gset(&sp->mii_if, ecmd);
1979 spin_unlock_irq(&sp->lock);
1980 return 0;
1981 }
1982
1983 static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1984 {
1985 struct speedo_private *sp = netdev_priv(dev);
1986 int res;
1987 spin_lock_irq(&sp->lock);
1988 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1989 spin_unlock_irq(&sp->lock);
1990 return res;
1991 }
1992
1993 static int speedo_nway_reset(struct net_device *dev)
1994 {
1995 struct speedo_private *sp = netdev_priv(dev);
1996 return mii_nway_restart(&sp->mii_if);
1997 }
1998
1999 static u32 speedo_get_link(struct net_device *dev)
2000 {
2001 struct speedo_private *sp = netdev_priv(dev);
2002 return mii_link_ok(&sp->mii_if);
2003 }
2004
2005 static u32 speedo_get_msglevel(struct net_device *dev)
2006 {
2007 struct speedo_private *sp = netdev_priv(dev);
2008 return sp->msg_enable;
2009 }
2010
2011 static void speedo_set_msglevel(struct net_device *dev, u32 v)
2012 {
2013 struct speedo_private *sp = netdev_priv(dev);
2014 sp->msg_enable = v;
2015 }
2016
2017 static const struct ethtool_ops ethtool_ops = {
2018 .get_drvinfo = speedo_get_drvinfo,
2019 .get_settings = speedo_get_settings,
2020 .set_settings = speedo_set_settings,
2021 .nway_reset = speedo_nway_reset,
2022 .get_link = speedo_get_link,
2023 .get_msglevel = speedo_get_msglevel,
2024 .set_msglevel = speedo_set_msglevel,
2025 };
2026
2027 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2028 {
2029 struct speedo_private *sp = netdev_priv(dev);
2030 struct mii_ioctl_data *data = if_mii(rq);
2031 int phy = sp->phy[0] & 0x1f;
2032 int saved_acpi;
2033 int t;
2034
2035 switch(cmd) {
2036 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2037 data->phy_id = phy;
2038
2039 case SIOCGMIIREG: /* Read MII PHY register. */
2040 /* FIXME: these operations need to be serialized with MDIO
2041 access from the timeout handler.
2042 They are currently serialized only with MDIO access from the
2043 timer routine. 2000/05/09 SAW */
2044 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2045 t = del_timer_sync(&sp->timer);
2046 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2047 if (t)
2048 add_timer(&sp->timer); /* may be set to the past --SAW */
2049 pci_set_power_state(sp->pdev, saved_acpi);
2050 return 0;
2051
2052 case SIOCSMIIREG: /* Write MII PHY register. */
2053 if (!capable(CAP_NET_ADMIN))
2054 return -EPERM;
2055 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2056 t = del_timer_sync(&sp->timer);
2057 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2058 if (t)
2059 add_timer(&sp->timer); /* may be set to the past --SAW */
2060 pci_set_power_state(sp->pdev, saved_acpi);
2061 return 0;
2062 default:
2063 return -EOPNOTSUPP;
2064 }
2065 }
2066
2067 /* Set or clear the multicast filter for this adaptor.
2068 This is very ugly with Intel chips -- we usually have to execute an
2069 entire configuration command, plus process a multicast command.
2070 This is complicated. We must put a large configuration command and
2071 an arbitrarily-sized multicast command in the transmit list.
2072 To minimize the disruption -- the previous command might have already
2073 loaded the link -- we convert the current command block, normally a Tx
2074 command, into a no-op and link it to the new command.
2075 */
2076 static void set_rx_mode(struct net_device *dev)
2077 {
2078 struct speedo_private *sp = netdev_priv(dev);
2079 void __iomem *ioaddr = sp->regs;
2080 struct descriptor *last_cmd;
2081 char new_rx_mode;
2082 unsigned long flags;
2083 int entry, i;
2084
2085 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2086 new_rx_mode = 3;
2087 } else if ((dev->flags & IFF_ALLMULTI) ||
2088 dev->mc_count > multicast_filter_limit) {
2089 new_rx_mode = 1;
2090 } else
2091 new_rx_mode = 0;
2092
2093 if (netif_msg_rx_status(sp))
2094 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2095 sp->rx_mode, new_rx_mode);
2096
2097 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2098 /* The Tx ring is full -- don't add anything! Hope the mode will be
2099 * set again later. */
2100 sp->rx_mode = -1;
2101 return;
2102 }
2103
2104 if (new_rx_mode != sp->rx_mode) {
2105 u8 *config_cmd_data;
2106
2107 spin_lock_irqsave(&sp->lock, flags);
2108 entry = sp->cur_tx++ % TX_RING_SIZE;
2109 last_cmd = sp->last_cmd;
2110 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2111
2112 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2113 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2114 sp->tx_ring[entry].link =
2115 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2116 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2117 /* Construct a full CmdConfig frame. */
2118 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2119 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2120 config_cmd_data[4] = rxdmacount;
2121 config_cmd_data[5] = txdmacount + 0x80;
2122 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2123 /* 0x80 doesn't disable FC 0x84 does.
2124 Disable Flow control since we are not ACK-ing any FC interrupts
2125 for now. --Dragan */
2126 config_cmd_data[19] = 0x84;
2127 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2128 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2129 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2130 config_cmd_data[15] |= 0x80;
2131 config_cmd_data[8] = 0;
2132 }
2133 /* Trigger the command unit resume. */
2134 wait_for_cmd_done(dev, sp);
2135 clear_suspend(last_cmd);
2136 iowrite8(CUResume, ioaddr + SCBCmd);
2137 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2138 netif_stop_queue(dev);
2139 sp->tx_full = 1;
2140 }
2141 spin_unlock_irqrestore(&sp->lock, flags);
2142 }
2143
2144 if (new_rx_mode == 0 && dev->mc_count < 4) {
2145 /* The simple case of 0-3 multicast list entries occurs often, and
2146 fits within one tx_ring[] entry. */
2147 struct dev_mc_list *mclist;
2148 u16 *setup_params, *eaddrs;
2149
2150 spin_lock_irqsave(&sp->lock, flags);
2151 entry = sp->cur_tx++ % TX_RING_SIZE;
2152 last_cmd = sp->last_cmd;
2153 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2154
2155 sp->tx_skbuff[entry] = NULL;
2156 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2157 sp->tx_ring[entry].link =
2158 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2159 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2160 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2161 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2162 /* Fill in the multicast addresses. */
2163 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2164 i++, mclist = mclist->next) {
2165 eaddrs = (u16 *)mclist->dmi_addr;
2166 *setup_params++ = *eaddrs++;
2167 *setup_params++ = *eaddrs++;
2168 *setup_params++ = *eaddrs++;
2169 }
2170
2171 wait_for_cmd_done(dev, sp);
2172 clear_suspend(last_cmd);
2173 /* Immediately trigger the command unit resume. */
2174 iowrite8(CUResume, ioaddr + SCBCmd);
2175
2176 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2177 netif_stop_queue(dev);
2178 sp->tx_full = 1;
2179 }
2180 spin_unlock_irqrestore(&sp->lock, flags);
2181 } else if (new_rx_mode == 0) {
2182 struct dev_mc_list *mclist;
2183 u16 *setup_params, *eaddrs;
2184 struct speedo_mc_block *mc_blk;
2185 struct descriptor *mc_setup_frm;
2186 int i;
2187
2188 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2189 GFP_ATOMIC);
2190 if (mc_blk == NULL) {
2191 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2192 dev->name);
2193 sp->rx_mode = -1; /* We failed, try again. */
2194 return;
2195 }
2196 mc_blk->next = NULL;
2197 mc_blk->len = 2 + multicast_filter_limit*6;
2198 mc_blk->frame_dma =
2199 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2200 PCI_DMA_TODEVICE);
2201 mc_setup_frm = &mc_blk->frame;
2202
2203 /* Fill the setup frame. */
2204 if (netif_msg_ifup(sp))
2205 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2206 dev->name, mc_setup_frm);
2207 mc_setup_frm->cmd_status =
2208 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2209 /* Link set below. */
2210 setup_params = (u16 *)&mc_setup_frm->params;
2211 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2212 /* Fill in the multicast addresses. */
2213 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2214 i++, mclist = mclist->next) {
2215 eaddrs = (u16 *)mclist->dmi_addr;
2216 *setup_params++ = *eaddrs++;
2217 *setup_params++ = *eaddrs++;
2218 *setup_params++ = *eaddrs++;
2219 }
2220
2221 /* Disable interrupts while playing with the Tx Cmd list. */
2222 spin_lock_irqsave(&sp->lock, flags);
2223
2224 if (sp->mc_setup_tail)
2225 sp->mc_setup_tail->next = mc_blk;
2226 else
2227 sp->mc_setup_head = mc_blk;
2228 sp->mc_setup_tail = mc_blk;
2229 mc_blk->tx = sp->cur_tx;
2230
2231 entry = sp->cur_tx++ % TX_RING_SIZE;
2232 last_cmd = sp->last_cmd;
2233 sp->last_cmd = mc_setup_frm;
2234
2235 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2236 sp->tx_skbuff[entry] = NULL;
2237 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2238 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2239
2240 /* Set the link in the setup frame. */
2241 mc_setup_frm->link =
2242 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2243
2244 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2245 mc_blk->len, PCI_DMA_TODEVICE);
2246
2247 wait_for_cmd_done(dev, sp);
2248 clear_suspend(last_cmd);
2249 /* Immediately trigger the command unit resume. */
2250 iowrite8(CUResume, ioaddr + SCBCmd);
2251
2252 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2253 netif_stop_queue(dev);
2254 sp->tx_full = 1;
2255 }
2256 spin_unlock_irqrestore(&sp->lock, flags);
2257
2258 if (netif_msg_rx_status(sp))
2259 printk(" CmdMCSetup frame length %d in entry %d.\n",
2260 dev->mc_count, entry);
2261 }
2262
2263 sp->rx_mode = new_rx_mode;
2264 }
2265
2266 #ifdef CONFIG_PM
2267 static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2268 {
2269 struct net_device *dev = pci_get_drvdata (pdev);
2270 struct speedo_private *sp = netdev_priv(dev);
2271 void __iomem *ioaddr = sp->regs;
2272
2273 pci_save_state(pdev);
2274
2275 if (!netif_running(dev))
2276 return 0;
2277
2278 del_timer_sync(&sp->timer);
2279
2280 netif_device_detach(dev);
2281 iowrite32(PortPartialReset, ioaddr + SCBPort);
2282
2283 /* XXX call pci_set_power_state ()? */
2284 pci_disable_device(pdev);
2285 pci_set_power_state (pdev, PCI_D3hot);
2286 return 0;
2287 }
2288
2289 static int eepro100_resume(struct pci_dev *pdev)
2290 {
2291 struct net_device *dev = pci_get_drvdata (pdev);
2292 struct speedo_private *sp = netdev_priv(dev);
2293 void __iomem *ioaddr = sp->regs;
2294 int rc;
2295
2296 pci_set_power_state(pdev, PCI_D0);
2297 pci_restore_state(pdev);
2298
2299 rc = pci_enable_device(pdev);
2300 if (rc)
2301 return rc;
2302
2303 pci_set_master(pdev);
2304
2305 if (!netif_running(dev))
2306 return 0;
2307
2308 /* I'm absolutely uncertain if this part of code may work.
2309 The problems are:
2310 - correct hardware reinitialization;
2311 - correct driver behavior between different steps of the
2312 reinitialization;
2313 - serialization with other driver calls.
2314 2000/03/08 SAW */
2315 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2316 speedo_resume(dev);
2317 netif_device_attach(dev);
2318 sp->rx_mode = -1;
2319 sp->flow_ctrl = sp->partner = 0;
2320 set_rx_mode(dev);
2321 sp->timer.expires = RUN_AT(2*HZ);
2322 add_timer(&sp->timer);
2323 return 0;
2324 }
2325 #endif /* CONFIG_PM */
2326
2327 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2328 {
2329 struct net_device *dev = pci_get_drvdata (pdev);
2330 struct speedo_private *sp = netdev_priv(dev);
2331
2332 unregister_netdev(dev);
2333
2334 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2335 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2336
2337 pci_iounmap(pdev, sp->regs);
2338 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2339 + sizeof(struct speedo_stats),
2340 sp->tx_ring, sp->tx_ring_dma);
2341 pci_disable_device(pdev);
2342 free_netdev(dev);
2343 }
2344
2345 static struct pci_device_id eepro100_pci_tbl[] = {
2346 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2347 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2348 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2362 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2363 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2364 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2365 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2366 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2367 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2368 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2369 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2370 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2371 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2372 { 0,}
2373 };
2374 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2375
2376 static struct pci_driver eepro100_driver = {
2377 .name = "eepro100",
2378 .id_table = eepro100_pci_tbl,
2379 .probe = eepro100_init_one,
2380 .remove = __devexit_p(eepro100_remove_one),
2381 #ifdef CONFIG_PM
2382 .suspend = eepro100_suspend,
2383 .resume = eepro100_resume,
2384 #endif /* CONFIG_PM */
2385 };
2386
2387 static int __init eepro100_init_module(void)
2388 {
2389 #ifdef MODULE
2390 printk(version);
2391 #endif
2392 return pci_register_driver(&eepro100_driver);
2393 }
2394
2395 static void __exit eepro100_cleanup_module(void)
2396 {
2397 pci_unregister_driver(&eepro100_driver);
2398 }
2399
2400 module_init(eepro100_init_module);
2401 module_exit(eepro100_cleanup_module);
2402
2403 /*
2404 * Local variables:
2405 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2406 * c-indent-level: 4
2407 * c-basic-offset: 4
2408 * tab-width: 4
2409 * End:
2410 */
This page took 0.137395 seconds and 6 git commands to generate.