Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */ |
2 | /* | |
3 | Written 1999-2000 by Donald Becker. | |
4 | ||
5 | This software may be used and distributed according to the terms of | |
6 | the GNU General Public License (GPL), incorporated herein by reference. | |
7 | Drivers based on or derived from this code fall under the GPL and must | |
8 | retain the authorship, copyright and license notice. This file is not | |
9 | a complete program and may only be used when the entire operating | |
10 | system is licensed under the GPL. | |
11 | ||
12 | The author may be reached as becker@scyld.com, or C/O | |
13 | Scyld Computing Corporation | |
14 | 410 Severn Ave., Suite 210 | |
15 | Annapolis MD 21403 | |
16 | ||
17 | Support and updates available at | |
18 | http://www.scyld.com/network/sundance.html | |
03a8c661 | 19 | [link no longer provides useful info -jgarzik] |
e714d99c PDM |
20 | Archives of the mailing list are still available at |
21 | http://www.beowulf.org/pipermail/netdrivers/ | |
1da177e4 | 22 | |
1da177e4 LT |
23 | */ |
24 | ||
25 | #define DRV_NAME "sundance" | |
d5b20697 AG |
26 | #define DRV_VERSION "1.2" |
27 | #define DRV_RELDATE "11-Sep-2006" | |
1da177e4 LT |
28 | |
29 | ||
30 | /* The user-configurable values. | |
31 | These may be modified when a driver module is loaded.*/ | |
32 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | |
33 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | |
34 | Typical is a 64 element hash table based on the Ethernet CRC. */ | |
f71e1309 | 35 | static const int multicast_filter_limit = 32; |
1da177e4 LT |
36 | |
37 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | |
38 | Setting to > 1518 effectively disables this feature. | |
39 | This chip can receive into offset buffers, so the Alpha does not | |
40 | need a copy-align. */ | |
41 | static int rx_copybreak; | |
42 | static int flowctrl=1; | |
43 | ||
44 | /* media[] specifies the media type the NIC operates at. | |
45 | autosense Autosensing active media. | |
46 | 10mbps_hd 10Mbps half duplex. | |
47 | 10mbps_fd 10Mbps full duplex. | |
48 | 100mbps_hd 100Mbps half duplex. | |
49 | 100mbps_fd 100Mbps full duplex. | |
50 | 0 Autosensing active media. | |
51 | 1 10Mbps half duplex. | |
52 | 2 10Mbps full duplex. | |
53 | 3 100Mbps half duplex. | |
54 | 4 100Mbps full duplex. | |
55 | */ | |
56 | #define MAX_UNITS 8 | |
57 | static char *media[MAX_UNITS]; | |
58 | ||
59 | ||
60 | /* Operational parameters that are set at compile time. */ | |
61 | ||
62 | /* Keep the ring sizes a power of two for compile efficiency. | |
63 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | |
64 | Making the Tx ring too large decreases the effectiveness of channel | |
65 | bonding and packet priority, and more than 128 requires modifying the | |
66 | Tx error recovery. | |
67 | Large receive rings merely waste memory. */ | |
68 | #define TX_RING_SIZE 32 | |
69 | #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ | |
70 | #define RX_RING_SIZE 64 | |
71 | #define RX_BUDGET 32 | |
72 | #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) | |
73 | #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) | |
74 | ||
75 | /* Operational parameters that usually are not changed. */ | |
76 | /* Time in jiffies before concluding the transmitter is hung. */ | |
77 | #define TX_TIMEOUT (4*HZ) | |
78 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | |
79 | ||
80 | /* Include files, designed to support most kernel versions 2.0.0 and later. */ | |
81 | #include <linux/module.h> | |
82 | #include <linux/kernel.h> | |
83 | #include <linux/string.h> | |
84 | #include <linux/timer.h> | |
85 | #include <linux/errno.h> | |
86 | #include <linux/ioport.h> | |
1da177e4 LT |
87 | #include <linux/interrupt.h> |
88 | #include <linux/pci.h> | |
89 | #include <linux/netdevice.h> | |
90 | #include <linux/etherdevice.h> | |
91 | #include <linux/skbuff.h> | |
92 | #include <linux/init.h> | |
93 | #include <linux/bitops.h> | |
94 | #include <asm/uaccess.h> | |
95 | #include <asm/processor.h> /* Processor type for cache alignment. */ | |
96 | #include <asm/io.h> | |
97 | #include <linux/delay.h> | |
98 | #include <linux/spinlock.h> | |
99 | #ifndef _COMPAT_WITH_OLD_KERNEL | |
100 | #include <linux/crc32.h> | |
101 | #include <linux/ethtool.h> | |
102 | #include <linux/mii.h> | |
103 | #else | |
104 | #include "crc32.h" | |
105 | #include "ethtool.h" | |
106 | #include "mii.h" | |
107 | #include "compat.h" | |
108 | #endif | |
109 | ||
110 | /* These identify the driver base version and may not be removed. */ | |
3af0fe39 SH |
111 | static const char version[] __devinitconst = |
112 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE | |
113 | " Written by Donald Becker\n"; | |
1da177e4 LT |
114 | |
115 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | |
116 | MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); | |
117 | MODULE_LICENSE("GPL"); | |
118 | ||
119 | module_param(debug, int, 0); | |
120 | module_param(rx_copybreak, int, 0); | |
121 | module_param_array(media, charp, NULL, 0); | |
122 | module_param(flowctrl, int, 0); | |
123 | MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)"); | |
124 | MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames"); | |
125 | MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]"); | |
126 | ||
127 | /* | |
128 | Theory of Operation | |
129 | ||
130 | I. Board Compatibility | |
131 | ||
132 | This driver is designed for the Sundance Technologies "Alta" ST201 chip. | |
133 | ||
134 | II. Board-specific settings | |
135 | ||
136 | III. Driver operation | |
137 | ||
138 | IIIa. Ring buffers | |
139 | ||
140 | This driver uses two statically allocated fixed-size descriptor lists | |
141 | formed into rings by a branch from the final descriptor to the beginning of | |
142 | the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. | |
143 | Some chips explicitly use only 2^N sized rings, while others use a | |
144 | 'next descriptor' pointer that the driver forms into rings. | |
145 | ||
146 | IIIb/c. Transmit/Receive Structure | |
147 | ||
148 | This driver uses a zero-copy receive and transmit scheme. | |
149 | The driver allocates full frame size skbuffs for the Rx ring buffers at | |
150 | open() time and passes the skb->data field to the chip as receive data | |
151 | buffers. When an incoming frame is less than RX_COPYBREAK bytes long, | |
152 | a fresh skbuff is allocated and the frame is copied to the new skbuff. | |
153 | When the incoming frame is larger, the skbuff is passed directly up the | |
154 | protocol stack. Buffers consumed this way are replaced by newly allocated | |
155 | skbuffs in a later phase of receives. | |
156 | ||
157 | The RX_COPYBREAK value is chosen to trade-off the memory wasted by | |
158 | using a full-sized skbuff for small frames vs. the copying costs of larger | |
159 | frames. New boards are typically used in generously configured machines | |
160 | and the underfilled buffers have negligible impact compared to the benefit of | |
161 | a single allocation size, so the default value of zero results in never | |
162 | copying packets. When copying is done, the cost is usually mitigated by using | |
163 | a combined copy/checksum routine. Copying also preloads the cache, which is | |
164 | most useful with small frames. | |
165 | ||
166 | A subtle aspect of the operation is that the IP header at offset 14 in an | |
167 | ethernet frame isn't longword aligned for further processing. | |
168 | Unaligned buffers are permitted by the Sundance hardware, so | |
169 | frames are received into the skbuff at an offset of "+2", 16-byte aligning | |
170 | the IP header. | |
171 | ||
172 | IIId. Synchronization | |
173 | ||
174 | The driver runs as two independent, single-threaded flows of control. One | |
175 | is the send-packet routine, which enforces single-threaded use by the | |
176 | dev->tbusy flag. The other thread is the interrupt handler, which is single | |
177 | threaded by the hardware and interrupt handling software. | |
178 | ||
179 | The send packet thread has partial control over the Tx ring and 'dev->tbusy' | |
180 | flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next | |
181 | queue slot is empty, it clears the tbusy flag when finished otherwise it sets | |
182 | the 'lp->tx_full' flag. | |
183 | ||
184 | The interrupt handler has exclusive control over the Rx ring and records stats | |
185 | from the Tx ring. After reaping the stats, it marks the Tx queue entry as | |
186 | empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it | |
187 | clears both the tx_full and tbusy flags. | |
188 | ||
189 | IV. Notes | |
190 | ||
191 | IVb. References | |
192 | ||
193 | The Sundance ST201 datasheet, preliminary version. | |
b71b95ef PDM |
194 | The Kendin KS8723 datasheet, preliminary version. |
195 | The ICplus IP100 datasheet, preliminary version. | |
196 | http://www.scyld.com/expert/100mbps.html | |
197 | http://www.scyld.com/expert/NWay.html | |
1da177e4 LT |
198 | |
199 | IVc. Errata | |
200 | ||
201 | */ | |
202 | ||
203 | /* Work-around for Kendin chip bugs. */ | |
204 | #ifndef CONFIG_SUNDANCE_MMIO | |
205 | #define USE_IO_OPS 1 | |
206 | #endif | |
207 | ||
a3aa1884 | 208 | static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = { |
46009c8b JG |
209 | { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, |
210 | { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, | |
211 | { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, | |
212 | { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, | |
213 | { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, | |
214 | { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, | |
215 | { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, | |
216 | { } | |
1da177e4 LT |
217 | }; |
218 | MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); | |
219 | ||
220 | enum { | |
221 | netdev_io_size = 128 | |
222 | }; | |
223 | ||
224 | struct pci_id_info { | |
225 | const char *name; | |
226 | }; | |
46009c8b | 227 | static const struct pci_id_info pci_id_tbl[] __devinitdata = { |
1da177e4 LT |
228 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, |
229 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, | |
230 | {"D-Link DFE-580TX 4 port Server Adapter"}, | |
231 | {"D-Link DFE-530TXS FAST Ethernet Adapter"}, | |
232 | {"D-Link DL10050-based FAST Ethernet Adapter"}, | |
233 | {"Sundance Technology Alta"}, | |
1668b19f | 234 | {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, |
46009c8b | 235 | { } /* terminate list. */ |
1da177e4 LT |
236 | }; |
237 | ||
238 | /* This driver was written to use PCI memory space, however x86-oriented | |
239 | hardware often uses I/O space accesses. */ | |
240 | ||
241 | /* Offsets to the device registers. | |
242 | Unlike software-only systems, device drivers interact with complex hardware. | |
243 | It's not useful to define symbolic names for every register bit in the | |
244 | device. The name can only partially document the semantics and make | |
245 | the driver longer and more difficult to read. | |
246 | In general, only the important configuration values or bits changed | |
247 | multiple times should be defined symbolically. | |
248 | */ | |
249 | enum alta_offsets { | |
250 | DMACtrl = 0x00, | |
251 | TxListPtr = 0x04, | |
252 | TxDMABurstThresh = 0x08, | |
253 | TxDMAUrgentThresh = 0x09, | |
254 | TxDMAPollPeriod = 0x0a, | |
255 | RxDMAStatus = 0x0c, | |
256 | RxListPtr = 0x10, | |
257 | DebugCtrl0 = 0x1a, | |
258 | DebugCtrl1 = 0x1c, | |
259 | RxDMABurstThresh = 0x14, | |
260 | RxDMAUrgentThresh = 0x15, | |
261 | RxDMAPollPeriod = 0x16, | |
262 | LEDCtrl = 0x1a, | |
263 | ASICCtrl = 0x30, | |
264 | EEData = 0x34, | |
265 | EECtrl = 0x36, | |
1da177e4 LT |
266 | FlashAddr = 0x40, |
267 | FlashData = 0x44, | |
268 | TxStatus = 0x46, | |
269 | TxFrameId = 0x47, | |
270 | DownCounter = 0x18, | |
271 | IntrClear = 0x4a, | |
272 | IntrEnable = 0x4c, | |
273 | IntrStatus = 0x4e, | |
274 | MACCtrl0 = 0x50, | |
275 | MACCtrl1 = 0x52, | |
276 | StationAddr = 0x54, | |
277 | MaxFrameSize = 0x5A, | |
278 | RxMode = 0x5c, | |
279 | MIICtrl = 0x5e, | |
280 | MulticastFilter0 = 0x60, | |
281 | MulticastFilter1 = 0x64, | |
282 | RxOctetsLow = 0x68, | |
283 | RxOctetsHigh = 0x6a, | |
284 | TxOctetsLow = 0x6c, | |
285 | TxOctetsHigh = 0x6e, | |
286 | TxFramesOK = 0x70, | |
287 | RxFramesOK = 0x72, | |
288 | StatsCarrierError = 0x74, | |
289 | StatsLateColl = 0x75, | |
290 | StatsMultiColl = 0x76, | |
291 | StatsOneColl = 0x77, | |
292 | StatsTxDefer = 0x78, | |
293 | RxMissed = 0x79, | |
294 | StatsTxXSDefer = 0x7a, | |
295 | StatsTxAbort = 0x7b, | |
296 | StatsBcastTx = 0x7c, | |
297 | StatsBcastRx = 0x7d, | |
298 | StatsMcastTx = 0x7e, | |
299 | StatsMcastRx = 0x7f, | |
300 | /* Aliased and bogus values! */ | |
301 | RxStatus = 0x0c, | |
302 | }; | |
303 | enum ASICCtrl_HiWord_bit { | |
304 | GlobalReset = 0x0001, | |
305 | RxReset = 0x0002, | |
306 | TxReset = 0x0004, | |
307 | DMAReset = 0x0008, | |
308 | FIFOReset = 0x0010, | |
309 | NetworkReset = 0x0020, | |
310 | HostReset = 0x0040, | |
311 | ResetBusy = 0x0400, | |
312 | }; | |
313 | ||
314 | /* Bits in the interrupt status/mask registers. */ | |
315 | enum intr_status_bits { | |
316 | IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008, | |
317 | IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020, | |
318 | IntrDrvRqst=0x0040, | |
319 | StatsMax=0x0080, LinkChange=0x0100, | |
320 | IntrTxDMADone=0x0200, IntrRxDMADone=0x0400, | |
321 | }; | |
322 | ||
323 | /* Bits in the RxMode register. */ | |
324 | enum rx_mode_bits { | |
325 | AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08, | |
326 | AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01, | |
327 | }; | |
328 | /* Bits in MACCtrl. */ | |
329 | enum mac_ctrl0_bits { | |
330 | EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40, | |
331 | EnbFlowCtrl=0x100, EnbPassRxCRC=0x200, | |
332 | }; | |
333 | enum mac_ctrl1_bits { | |
334 | StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080, | |
335 | TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400, | |
336 | RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, | |
337 | }; | |
338 | ||
339 | /* The Rx and Tx buffer descriptors. */ | |
340 | /* Note that using only 32 bit fields simplifies conversion to big-endian | |
341 | architectures. */ | |
342 | struct netdev_desc { | |
14c9d9b0 AV |
343 | __le32 next_desc; |
344 | __le32 status; | |
345 | struct desc_frag { __le32 addr, length; } frag[1]; | |
1da177e4 LT |
346 | }; |
347 | ||
348 | /* Bits in netdev_desc.status */ | |
349 | enum desc_status_bits { | |
350 | DescOwn=0x8000, | |
351 | DescEndPacket=0x4000, | |
352 | DescEndRing=0x2000, | |
353 | LastFrag=0x80000000, | |
354 | DescIntrOnTx=0x8000, | |
355 | DescIntrOnDMADone=0x80000000, | |
356 | DisableAlign = 0x00000001, | |
357 | }; | |
358 | ||
359 | #define PRIV_ALIGN 15 /* Required alignment mask */ | |
360 | /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment | |
361 | within the structure. */ | |
362 | #define MII_CNT 4 | |
363 | struct netdev_private { | |
364 | /* Descriptor rings first for alignment. */ | |
365 | struct netdev_desc *rx_ring; | |
366 | struct netdev_desc *tx_ring; | |
367 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | |
368 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | |
369 | dma_addr_t tx_ring_dma; | |
370 | dma_addr_t rx_ring_dma; | |
1da177e4 LT |
371 | struct timer_list timer; /* Media monitoring timer. */ |
372 | /* Frequently used values: keep some adjacent for cache effect. */ | |
373 | spinlock_t lock; | |
374 | spinlock_t rx_lock; /* Group with Tx control cache line. */ | |
375 | int msg_enable; | |
376 | int chip_id; | |
377 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | |
378 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | |
379 | struct netdev_desc *last_tx; /* Last Tx descriptor used. */ | |
380 | unsigned int cur_tx, dirty_tx; | |
381 | /* These values are keep track of the transceiver/media in use. */ | |
382 | unsigned int flowctrl:1; | |
383 | unsigned int default_port:4; /* Last dev->if_port value. */ | |
384 | unsigned int an_enable:1; | |
385 | unsigned int speed; | |
386 | struct tasklet_struct rx_tasklet; | |
387 | struct tasklet_struct tx_tasklet; | |
388 | int budget; | |
389 | int cur_task; | |
390 | /* Multicast and receive mode. */ | |
391 | spinlock_t mcastlock; /* SMP lock multicast updates. */ | |
392 | u16 mcast_filter[4]; | |
393 | /* MII transceiver section. */ | |
394 | struct mii_if_info mii_if; | |
395 | int mii_preamble_required; | |
396 | unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ | |
397 | struct pci_dev *pci_dev; | |
398 | void __iomem *base; | |
1da177e4 LT |
399 | }; |
400 | ||
401 | /* The station address location in the EEPROM. */ | |
402 | #define EEPROM_SA_OFFSET 0x10 | |
403 | #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \ | |
404 | IntrDrvRqst | IntrTxDone | StatsMax | \ | |
405 | LinkChange) | |
406 | ||
407 | static int change_mtu(struct net_device *dev, int new_mtu); | |
408 | static int eeprom_read(void __iomem *ioaddr, int location); | |
409 | static int mdio_read(struct net_device *dev, int phy_id, int location); | |
410 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | |
50500155 | 411 | static int mdio_wait_link(struct net_device *dev, int wait); |
1da177e4 LT |
412 | static int netdev_open(struct net_device *dev); |
413 | static void check_duplex(struct net_device *dev); | |
414 | static void netdev_timer(unsigned long data); | |
415 | static void tx_timeout(struct net_device *dev); | |
416 | static void init_ring(struct net_device *dev); | |
61357325 | 417 | static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); |
1da177e4 | 418 | static int reset_tx (struct net_device *dev); |
7d12e780 | 419 | static irqreturn_t intr_handler(int irq, void *dev_instance); |
1da177e4 LT |
420 | static void rx_poll(unsigned long data); |
421 | static void tx_poll(unsigned long data); | |
422 | static void refill_rx (struct net_device *dev); | |
423 | static void netdev_error(struct net_device *dev, int intr_status); | |
424 | static void netdev_error(struct net_device *dev, int intr_status); | |
425 | static void set_rx_mode(struct net_device *dev); | |
426 | static int __set_mac_addr(struct net_device *dev); | |
427 | static struct net_device_stats *get_stats(struct net_device *dev); | |
428 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |
429 | static int netdev_close(struct net_device *dev); | |
7282d491 | 430 | static const struct ethtool_ops ethtool_ops; |
1da177e4 | 431 | |
b71b95ef PDM |
432 | static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) |
433 | { | |
434 | struct netdev_private *np = netdev_priv(dev); | |
435 | void __iomem *ioaddr = np->base + ASICCtrl; | |
436 | int countdown; | |
437 | ||
438 | /* ST201 documentation states ASICCtrl is a 32bit register */ | |
439 | iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr); | |
440 | /* ST201 documentation states reset can take up to 1 ms */ | |
441 | countdown = 10 + 1; | |
442 | while (ioread32 (ioaddr) & (ResetBusy << 16)) { | |
443 | if (--countdown == 0) { | |
444 | printk(KERN_WARNING "%s : reset not completed !!\n", dev->name); | |
445 | break; | |
446 | } | |
447 | udelay(100); | |
448 | } | |
449 | } | |
450 | ||
633a277e SH |
451 | static const struct net_device_ops netdev_ops = { |
452 | .ndo_open = netdev_open, | |
453 | .ndo_stop = netdev_close, | |
454 | .ndo_start_xmit = start_tx, | |
455 | .ndo_get_stats = get_stats, | |
456 | .ndo_set_multicast_list = set_rx_mode, | |
457 | .ndo_do_ioctl = netdev_ioctl, | |
458 | .ndo_tx_timeout = tx_timeout, | |
459 | .ndo_change_mtu = change_mtu, | |
460 | .ndo_set_mac_address = eth_mac_addr, | |
461 | .ndo_validate_addr = eth_validate_addr, | |
462 | }; | |
463 | ||
1da177e4 LT |
464 | static int __devinit sundance_probe1 (struct pci_dev *pdev, |
465 | const struct pci_device_id *ent) | |
466 | { | |
467 | struct net_device *dev; | |
468 | struct netdev_private *np; | |
469 | static int card_idx; | |
470 | int chip_idx = ent->driver_data; | |
471 | int irq; | |
472 | int i; | |
473 | void __iomem *ioaddr; | |
474 | u16 mii_ctl; | |
475 | void *ring_space; | |
476 | dma_addr_t ring_dma; | |
477 | #ifdef USE_IO_OPS | |
478 | int bar = 0; | |
479 | #else | |
480 | int bar = 1; | |
481 | #endif | |
ac1d49f8 | 482 | int phy, phy_end, phy_idx = 0; |
1da177e4 LT |
483 | |
484 | /* when built into the kernel, we only print version if device is found */ | |
485 | #ifndef MODULE | |
486 | static int printed_version; | |
487 | if (!printed_version++) | |
488 | printk(version); | |
489 | #endif | |
490 | ||
491 | if (pci_enable_device(pdev)) | |
492 | return -EIO; | |
493 | pci_set_master(pdev); | |
494 | ||
495 | irq = pdev->irq; | |
496 | ||
497 | dev = alloc_etherdev(sizeof(*np)); | |
498 | if (!dev) | |
499 | return -ENOMEM; | |
1da177e4 LT |
500 | SET_NETDEV_DEV(dev, &pdev->dev); |
501 | ||
502 | if (pci_request_regions(pdev, DRV_NAME)) | |
503 | goto err_out_netdev; | |
504 | ||
505 | ioaddr = pci_iomap(pdev, bar, netdev_io_size); | |
506 | if (!ioaddr) | |
507 | goto err_out_res; | |
508 | ||
509 | for (i = 0; i < 3; i++) | |
14c9d9b0 AV |
510 | ((__le16 *)dev->dev_addr)[i] = |
511 | cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); | |
30d60a82 | 512 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
1da177e4 LT |
513 | |
514 | dev->base_addr = (unsigned long)ioaddr; | |
515 | dev->irq = irq; | |
516 | ||
517 | np = netdev_priv(dev); | |
518 | np->base = ioaddr; | |
519 | np->pci_dev = pdev; | |
520 | np->chip_id = chip_idx; | |
521 | np->msg_enable = (1 << debug) - 1; | |
522 | spin_lock_init(&np->lock); | |
523 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); | |
524 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); | |
525 | ||
526 | ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); | |
527 | if (!ring_space) | |
528 | goto err_out_cleardev; | |
529 | np->tx_ring = (struct netdev_desc *)ring_space; | |
530 | np->tx_ring_dma = ring_dma; | |
531 | ||
532 | ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); | |
533 | if (!ring_space) | |
534 | goto err_out_unmap_tx; | |
535 | np->rx_ring = (struct netdev_desc *)ring_space; | |
536 | np->rx_ring_dma = ring_dma; | |
537 | ||
538 | np->mii_if.dev = dev; | |
539 | np->mii_if.mdio_read = mdio_read; | |
540 | np->mii_if.mdio_write = mdio_write; | |
541 | np->mii_if.phy_id_mask = 0x1f; | |
542 | np->mii_if.reg_num_mask = 0x1f; | |
543 | ||
544 | /* The chip-specific entries in the device structure. */ | |
633a277e | 545 | dev->netdev_ops = &netdev_ops; |
1da177e4 | 546 | SET_ETHTOOL_OPS(dev, ðtool_ops); |
1da177e4 | 547 | dev->watchdog_timeo = TX_TIMEOUT; |
633a277e | 548 | |
1da177e4 LT |
549 | pci_set_drvdata(pdev, dev); |
550 | ||
1da177e4 LT |
551 | i = register_netdev(dev); |
552 | if (i) | |
553 | goto err_out_unmap_rx; | |
554 | ||
e174961c | 555 | printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", |
0795af57 | 556 | dev->name, pci_id_tbl[chip_idx].name, ioaddr, |
e174961c | 557 | dev->dev_addr, irq); |
1da177e4 | 558 | |
67ec2f80 JL |
559 | np->phys[0] = 1; /* Default setting */ |
560 | np->mii_preamble_required++; | |
ac1d49f8 | 561 | |
0d615ec2 ACM |
562 | /* |
563 | * It seems some phys doesn't deal well with address 0 being accessed | |
ac1d49f8 | 564 | * first |
0d615ec2 | 565 | */ |
ac1d49f8 JG |
566 | if (sundance_pci_tbl[np->chip_id].device == 0x0200) { |
567 | phy = 0; | |
568 | phy_end = 31; | |
569 | } else { | |
570 | phy = 1; | |
571 | phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ | |
572 | } | |
573 | for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { | |
b06c093e | 574 | int phyx = phy & 0x1f; |
0d615ec2 | 575 | int mii_status = mdio_read(dev, phyx, MII_BMSR); |
67ec2f80 | 576 | if (mii_status != 0xffff && mii_status != 0x0000) { |
b06c093e JL |
577 | np->phys[phy_idx++] = phyx; |
578 | np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); | |
67ec2f80 JL |
579 | if ((mii_status & 0x0040) == 0) |
580 | np->mii_preamble_required++; | |
581 | printk(KERN_INFO "%s: MII PHY found at address %d, status " | |
582 | "0x%4.4x advertising %4.4x.\n", | |
b06c093e | 583 | dev->name, phyx, mii_status, np->mii_if.advertising); |
1da177e4 | 584 | } |
67ec2f80 JL |
585 | } |
586 | np->mii_preamble_required--; | |
1da177e4 | 587 | |
67ec2f80 JL |
588 | if (phy_idx == 0) { |
589 | printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", | |
590 | dev->name, ioread32(ioaddr + ASICCtrl)); | |
591 | goto err_out_unregister; | |
1da177e4 LT |
592 | } |
593 | ||
67ec2f80 JL |
594 | np->mii_if.phy_id = np->phys[0]; |
595 | ||
1da177e4 LT |
596 | /* Parse override configuration */ |
597 | np->an_enable = 1; | |
598 | if (card_idx < MAX_UNITS) { | |
599 | if (media[card_idx] != NULL) { | |
600 | np->an_enable = 0; | |
601 | if (strcmp (media[card_idx], "100mbps_fd") == 0 || | |
602 | strcmp (media[card_idx], "4") == 0) { | |
603 | np->speed = 100; | |
604 | np->mii_if.full_duplex = 1; | |
8e95a202 JP |
605 | } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || |
606 | strcmp (media[card_idx], "3") == 0) { | |
1da177e4 LT |
607 | np->speed = 100; |
608 | np->mii_if.full_duplex = 0; | |
609 | } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || | |
610 | strcmp (media[card_idx], "2") == 0) { | |
611 | np->speed = 10; | |
612 | np->mii_if.full_duplex = 1; | |
613 | } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || | |
614 | strcmp (media[card_idx], "1") == 0) { | |
615 | np->speed = 10; | |
616 | np->mii_if.full_duplex = 0; | |
617 | } else { | |
618 | np->an_enable = 1; | |
619 | } | |
620 | } | |
621 | if (flowctrl == 1) | |
622 | np->flowctrl = 1; | |
623 | } | |
624 | ||
625 | /* Fibre PHY? */ | |
626 | if (ioread32 (ioaddr + ASICCtrl) & 0x80) { | |
627 | /* Default 100Mbps Full */ | |
628 | if (np->an_enable) { | |
629 | np->speed = 100; | |
630 | np->mii_if.full_duplex = 1; | |
631 | np->an_enable = 0; | |
632 | } | |
633 | } | |
634 | /* Reset PHY */ | |
635 | mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); | |
636 | mdelay (300); | |
637 | /* If flow control enabled, we need to advertise it.*/ | |
638 | if (np->flowctrl) | |
639 | mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); | |
640 | mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); | |
641 | /* Force media type */ | |
642 | if (!np->an_enable) { | |
643 | mii_ctl = 0; | |
644 | mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; | |
645 | mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; | |
646 | mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); | |
647 | printk (KERN_INFO "Override speed=%d, %s duplex\n", | |
648 | np->speed, np->mii_if.full_duplex ? "Full" : "Half"); | |
649 | ||
650 | } | |
651 | ||
652 | /* Perhaps move the reset here? */ | |
653 | /* Reset the chip to erase previous misconfiguration. */ | |
654 | if (netif_msg_hw(np)) | |
655 | printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); | |
e714d99c | 656 | sundance_reset(dev, 0x00ff << 16); |
1da177e4 LT |
657 | if (netif_msg_hw(np)) |
658 | printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); | |
659 | ||
660 | card_idx++; | |
661 | return 0; | |
662 | ||
663 | err_out_unregister: | |
664 | unregister_netdev(dev); | |
665 | err_out_unmap_rx: | |
666 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); | |
667 | err_out_unmap_tx: | |
668 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); | |
669 | err_out_cleardev: | |
670 | pci_set_drvdata(pdev, NULL); | |
671 | pci_iounmap(pdev, ioaddr); | |
672 | err_out_res: | |
673 | pci_release_regions(pdev); | |
674 | err_out_netdev: | |
675 | free_netdev (dev); | |
676 | return -ENODEV; | |
677 | } | |
678 | ||
679 | static int change_mtu(struct net_device *dev, int new_mtu) | |
680 | { | |
681 | if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */ | |
682 | return -EINVAL; | |
683 | if (netif_running(dev)) | |
684 | return -EBUSY; | |
685 | dev->mtu = new_mtu; | |
686 | return 0; | |
687 | } | |
688 | ||
689 | #define eeprom_delay(ee_addr) ioread32(ee_addr) | |
690 | /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ | |
691 | static int __devinit eeprom_read(void __iomem *ioaddr, int location) | |
692 | { | |
693 | int boguscnt = 10000; /* Typical 1900 ticks. */ | |
694 | iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); | |
695 | do { | |
696 | eeprom_delay(ioaddr + EECtrl); | |
697 | if (! (ioread16(ioaddr + EECtrl) & 0x8000)) { | |
698 | return ioread16(ioaddr + EEData); | |
699 | } | |
700 | } while (--boguscnt > 0); | |
701 | return 0; | |
702 | } | |
703 | ||
704 | /* MII transceiver control section. | |
705 | Read and write the MII registers using software-generated serial | |
706 | MDIO protocol. See the MII specifications or DP83840A data sheet | |
707 | for details. | |
708 | ||
709 | The maximum data clock rate is 2.5 Mhz. The minimum timing is usually | |
710 | met by back-to-back 33Mhz PCI cycles. */ | |
711 | #define mdio_delay() ioread8(mdio_addr) | |
712 | ||
713 | enum mii_reg_bits { | |
714 | MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004, | |
715 | }; | |
716 | #define MDIO_EnbIn (0) | |
717 | #define MDIO_WRITE0 (MDIO_EnbOutput) | |
718 | #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput) | |
719 | ||
720 | /* Generate the preamble required for initial synchronization and | |
721 | a few older transceivers. */ | |
722 | static void mdio_sync(void __iomem *mdio_addr) | |
723 | { | |
724 | int bits = 32; | |
725 | ||
726 | /* Establish sync by sending at least 32 logic ones. */ | |
727 | while (--bits >= 0) { | |
728 | iowrite8(MDIO_WRITE1, mdio_addr); | |
729 | mdio_delay(); | |
730 | iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); | |
731 | mdio_delay(); | |
732 | } | |
733 | } | |
734 | ||
735 | static int mdio_read(struct net_device *dev, int phy_id, int location) | |
736 | { | |
737 | struct netdev_private *np = netdev_priv(dev); | |
738 | void __iomem *mdio_addr = np->base + MIICtrl; | |
739 | int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; | |
740 | int i, retval = 0; | |
741 | ||
742 | if (np->mii_preamble_required) | |
743 | mdio_sync(mdio_addr); | |
744 | ||
745 | /* Shift the read command bits out. */ | |
746 | for (i = 15; i >= 0; i--) { | |
747 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | |
748 | ||
749 | iowrite8(dataval, mdio_addr); | |
750 | mdio_delay(); | |
751 | iowrite8(dataval | MDIO_ShiftClk, mdio_addr); | |
752 | mdio_delay(); | |
753 | } | |
754 | /* Read the two transition, 16 data, and wire-idle bits. */ | |
755 | for (i = 19; i > 0; i--) { | |
756 | iowrite8(MDIO_EnbIn, mdio_addr); | |
757 | mdio_delay(); | |
758 | retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0); | |
759 | iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | |
760 | mdio_delay(); | |
761 | } | |
762 | return (retval>>1) & 0xffff; | |
763 | } | |
764 | ||
765 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value) | |
766 | { | |
767 | struct netdev_private *np = netdev_priv(dev); | |
768 | void __iomem *mdio_addr = np->base + MIICtrl; | |
769 | int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; | |
770 | int i; | |
771 | ||
772 | if (np->mii_preamble_required) | |
773 | mdio_sync(mdio_addr); | |
774 | ||
775 | /* Shift the command bits out. */ | |
776 | for (i = 31; i >= 0; i--) { | |
777 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | |
778 | ||
779 | iowrite8(dataval, mdio_addr); | |
780 | mdio_delay(); | |
781 | iowrite8(dataval | MDIO_ShiftClk, mdio_addr); | |
782 | mdio_delay(); | |
783 | } | |
784 | /* Clear out extra bits. */ | |
785 | for (i = 2; i > 0; i--) { | |
786 | iowrite8(MDIO_EnbIn, mdio_addr); | |
787 | mdio_delay(); | |
788 | iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | |
789 | mdio_delay(); | |
790 | } | |
1da177e4 LT |
791 | } |
792 | ||
50500155 DN |
793 | static int mdio_wait_link(struct net_device *dev, int wait) |
794 | { | |
795 | int bmsr; | |
796 | int phy_id; | |
797 | struct netdev_private *np; | |
798 | ||
799 | np = netdev_priv(dev); | |
800 | phy_id = np->phys[0]; | |
801 | ||
802 | do { | |
803 | bmsr = mdio_read(dev, phy_id, MII_BMSR); | |
804 | if (bmsr & 0x0004) | |
805 | return 0; | |
806 | mdelay(1); | |
807 | } while (--wait > 0); | |
808 | return -1; | |
809 | } | |
810 | ||
1da177e4 LT |
811 | static int netdev_open(struct net_device *dev) |
812 | { | |
813 | struct netdev_private *np = netdev_priv(dev); | |
814 | void __iomem *ioaddr = np->base; | |
acd70c2b | 815 | unsigned long flags; |
1da177e4 LT |
816 | int i; |
817 | ||
818 | /* Do we need to reset the chip??? */ | |
819 | ||
a0607fd3 | 820 | i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); |
1da177e4 LT |
821 | if (i) |
822 | return i; | |
823 | ||
824 | if (netif_msg_ifup(np)) | |
825 | printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", | |
826 | dev->name, dev->irq); | |
827 | init_ring(dev); | |
828 | ||
829 | iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); | |
830 | /* The Tx list pointer is written as packets are queued. */ | |
831 | ||
832 | /* Initialize other registers. */ | |
833 | __set_mac_addr(dev); | |
834 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | |
835 | iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); | |
836 | #else | |
837 | iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); | |
838 | #endif | |
839 | if (dev->mtu > 2047) | |
840 | iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl); | |
841 | ||
842 | /* Configure the PCI bus bursts and FIFO thresholds. */ | |
843 | ||
844 | if (dev->if_port == 0) | |
845 | dev->if_port = np->default_port; | |
846 | ||
847 | spin_lock_init(&np->mcastlock); | |
848 | ||
849 | set_rx_mode(dev); | |
850 | iowrite16(0, ioaddr + IntrEnable); | |
851 | iowrite16(0, ioaddr + DownCounter); | |
852 | /* Set the chip to poll every N*320nsec. */ | |
853 | iowrite8(100, ioaddr + RxDMAPollPeriod); | |
854 | iowrite8(127, ioaddr + TxDMAPollPeriod); | |
855 | /* Fix DFE-580TX packet drop issue */ | |
44c10138 | 856 | if (np->pci_dev->revision >= 0x14) |
1da177e4 LT |
857 | iowrite8(0x01, ioaddr + DebugCtrl1); |
858 | netif_start_queue(dev); | |
859 | ||
acd70c2b JH |
860 | spin_lock_irqsave(&np->lock, flags); |
861 | reset_tx(dev); | |
862 | spin_unlock_irqrestore(&np->lock, flags); | |
863 | ||
1da177e4 LT |
864 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); |
865 | ||
866 | if (netif_msg_ifup(np)) | |
867 | printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " | |
868 | "MAC Control %x, %4.4x %4.4x.\n", | |
869 | dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), | |
870 | ioread32(ioaddr + MACCtrl0), | |
871 | ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); | |
872 | ||
873 | /* Set the timer to check for link beat. */ | |
874 | init_timer(&np->timer); | |
875 | np->timer.expires = jiffies + 3*HZ; | |
876 | np->timer.data = (unsigned long)dev; | |
877 | np->timer.function = &netdev_timer; /* timer handler */ | |
878 | add_timer(&np->timer); | |
879 | ||
880 | /* Enable interrupts by setting the interrupt mask. */ | |
881 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); | |
882 | ||
883 | return 0; | |
884 | } | |
885 | ||
886 | static void check_duplex(struct net_device *dev) | |
887 | { | |
888 | struct netdev_private *np = netdev_priv(dev); | |
889 | void __iomem *ioaddr = np->base; | |
890 | int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); | |
891 | int negotiated = mii_lpa & np->mii_if.advertising; | |
892 | int duplex; | |
893 | ||
894 | /* Force media */ | |
895 | if (!np->an_enable || mii_lpa == 0xffff) { | |
896 | if (np->mii_if.full_duplex) | |
897 | iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex, | |
898 | ioaddr + MACCtrl0); | |
899 | return; | |
900 | } | |
901 | ||
902 | /* Autonegotiation */ | |
903 | duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; | |
904 | if (np->mii_if.full_duplex != duplex) { | |
905 | np->mii_if.full_duplex = duplex; | |
906 | if (netif_msg_link(np)) | |
907 | printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " | |
908 | "negotiated capability %4.4x.\n", dev->name, | |
909 | duplex ? "full" : "half", np->phys[0], negotiated); | |
62660e28 | 910 | iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0); |
1da177e4 LT |
911 | } |
912 | } | |
913 | ||
914 | static void netdev_timer(unsigned long data) | |
915 | { | |
916 | struct net_device *dev = (struct net_device *)data; | |
917 | struct netdev_private *np = netdev_priv(dev); | |
918 | void __iomem *ioaddr = np->base; | |
919 | int next_tick = 10*HZ; | |
920 | ||
921 | if (netif_msg_timer(np)) { | |
922 | printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " | |
923 | "Tx %x Rx %x.\n", | |
924 | dev->name, ioread16(ioaddr + IntrEnable), | |
925 | ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); | |
926 | } | |
927 | check_duplex(dev); | |
928 | np->timer.expires = jiffies + next_tick; | |
929 | add_timer(&np->timer); | |
930 | } | |
931 | ||
932 | static void tx_timeout(struct net_device *dev) | |
933 | { | |
934 | struct netdev_private *np = netdev_priv(dev); | |
935 | void __iomem *ioaddr = np->base; | |
936 | unsigned long flag; | |
6aa20a22 | 937 | |
1da177e4 LT |
938 | netif_stop_queue(dev); |
939 | tasklet_disable(&np->tx_tasklet); | |
940 | iowrite16(0, ioaddr + IntrEnable); | |
941 | printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " | |
942 | "TxFrameId %2.2x," | |
943 | " resetting...\n", dev->name, ioread8(ioaddr + TxStatus), | |
944 | ioread8(ioaddr + TxFrameId)); | |
945 | ||
946 | { | |
947 | int i; | |
948 | for (i=0; i<TX_RING_SIZE; i++) { | |
949 | printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i, | |
950 | (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), | |
951 | le32_to_cpu(np->tx_ring[i].next_desc), | |
952 | le32_to_cpu(np->tx_ring[i].status), | |
953 | (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, | |
6aa20a22 | 954 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1da177e4 LT |
955 | le32_to_cpu(np->tx_ring[i].frag[0].length)); |
956 | } | |
6aa20a22 JG |
957 | printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", |
958 | ioread32(np->base + TxListPtr), | |
1da177e4 | 959 | netif_queue_stopped(dev)); |
6aa20a22 | 960 | printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", |
1da177e4 LT |
961 | np->cur_tx, np->cur_tx % TX_RING_SIZE, |
962 | np->dirty_tx, np->dirty_tx % TX_RING_SIZE); | |
963 | printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); | |
964 | printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); | |
965 | } | |
966 | spin_lock_irqsave(&np->lock, flag); | |
967 | ||
968 | /* Stop and restart the chip's Tx processes . */ | |
969 | reset_tx(dev); | |
970 | spin_unlock_irqrestore(&np->lock, flag); | |
971 | ||
972 | dev->if_port = 0; | |
973 | ||
1ae5dc34 | 974 | dev->trans_start = jiffies; /* prevent tx timeout */ |
553e2335 | 975 | dev->stats.tx_errors++; |
1da177e4 LT |
976 | if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { |
977 | netif_wake_queue(dev); | |
978 | } | |
979 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); | |
980 | tasklet_enable(&np->tx_tasklet); | |
981 | } | |
982 | ||
983 | ||
984 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | |
985 | static void init_ring(struct net_device *dev) | |
986 | { | |
987 | struct netdev_private *np = netdev_priv(dev); | |
988 | int i; | |
989 | ||
990 | np->cur_rx = np->cur_tx = 0; | |
991 | np->dirty_rx = np->dirty_tx = 0; | |
992 | np->cur_task = 0; | |
993 | ||
994 | np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); | |
995 | ||
996 | /* Initialize all Rx descriptors. */ | |
997 | for (i = 0; i < RX_RING_SIZE; i++) { | |
998 | np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + | |
999 | ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); | |
1000 | np->rx_ring[i].status = 0; | |
1001 | np->rx_ring[i].frag[0].length = 0; | |
1002 | np->rx_skbuff[i] = NULL; | |
1003 | } | |
1004 | ||
1005 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | |
1006 | for (i = 0; i < RX_RING_SIZE; i++) { | |
1007 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); | |
1008 | np->rx_skbuff[i] = skb; | |
1009 | if (skb == NULL) | |
1010 | break; | |
1011 | skb->dev = dev; /* Mark as being used by this device. */ | |
1012 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | |
1013 | np->rx_ring[i].frag[0].addr = cpu_to_le32( | |
689be439 | 1014 | pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, |
1da177e4 LT |
1015 | PCI_DMA_FROMDEVICE)); |
1016 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); | |
1017 | } | |
1018 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | |
1019 | ||
1020 | for (i = 0; i < TX_RING_SIZE; i++) { | |
1021 | np->tx_skbuff[i] = NULL; | |
1022 | np->tx_ring[i].status = 0; | |
1023 | } | |
1da177e4 LT |
1024 | } |
1025 | ||
1026 | static void tx_poll (unsigned long data) | |
1027 | { | |
1028 | struct net_device *dev = (struct net_device *)data; | |
1029 | struct netdev_private *np = netdev_priv(dev); | |
1030 | unsigned head = np->cur_task % TX_RING_SIZE; | |
6aa20a22 | 1031 | struct netdev_desc *txdesc = |
1da177e4 | 1032 | &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; |
6aa20a22 | 1033 | |
1da177e4 LT |
1034 | /* Chain the next pointer */ |
1035 | for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { | |
1036 | int entry = np->cur_task % TX_RING_SIZE; | |
1037 | txdesc = &np->tx_ring[entry]; | |
1038 | if (np->last_tx) { | |
1039 | np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + | |
1040 | entry*sizeof(struct netdev_desc)); | |
1041 | } | |
1042 | np->last_tx = txdesc; | |
1043 | } | |
1044 | /* Indicate the latest descriptor of tx ring */ | |
1045 | txdesc->status |= cpu_to_le32(DescIntrOnTx); | |
1046 | ||
1047 | if (ioread32 (np->base + TxListPtr) == 0) | |
1048 | iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), | |
1049 | np->base + TxListPtr); | |
1da177e4 LT |
1050 | } |
1051 | ||
61357325 | 1052 | static netdev_tx_t |
1da177e4 LT |
1053 | start_tx (struct sk_buff *skb, struct net_device *dev) |
1054 | { | |
1055 | struct netdev_private *np = netdev_priv(dev); | |
1056 | struct netdev_desc *txdesc; | |
1057 | unsigned entry; | |
1058 | ||
1059 | /* Calculate the next Tx descriptor entry. */ | |
1060 | entry = np->cur_tx % TX_RING_SIZE; | |
1061 | np->tx_skbuff[entry] = skb; | |
1062 | txdesc = &np->tx_ring[entry]; | |
1063 | ||
1064 | txdesc->next_desc = 0; | |
1065 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); | |
1066 | txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data, | |
1067 | skb->len, | |
1068 | PCI_DMA_TODEVICE)); | |
1069 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); | |
1070 | ||
1071 | /* Increment cur_tx before tasklet_schedule() */ | |
1072 | np->cur_tx++; | |
1073 | mb(); | |
1074 | /* Schedule a tx_poll() task */ | |
1075 | tasklet_schedule(&np->tx_tasklet); | |
1076 | ||
1077 | /* On some architectures: explicitly flush cache lines here. */ | |
8e95a202 JP |
1078 | if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && |
1079 | !netif_queue_stopped(dev)) { | |
1da177e4 LT |
1080 | /* do nothing */ |
1081 | } else { | |
1082 | netif_stop_queue (dev); | |
1083 | } | |
1da177e4 LT |
1084 | if (netif_msg_tx_queued(np)) { |
1085 | printk (KERN_DEBUG | |
1086 | "%s: Transmit frame #%d queued in slot %d.\n", | |
1087 | dev->name, np->cur_tx, entry); | |
1088 | } | |
6ed10654 | 1089 | return NETDEV_TX_OK; |
1da177e4 LT |
1090 | } |
1091 | ||
1092 | /* Reset hardware tx and free all of tx buffers */ | |
1093 | static int | |
1094 | reset_tx (struct net_device *dev) | |
1095 | { | |
1096 | struct netdev_private *np = netdev_priv(dev); | |
1097 | void __iomem *ioaddr = np->base; | |
1098 | struct sk_buff *skb; | |
1099 | int i; | |
1100 | int irq = in_interrupt(); | |
6aa20a22 | 1101 | |
1da177e4 LT |
1102 | /* Reset tx logic, TxListPtr will be cleaned */ |
1103 | iowrite16 (TxDisable, ioaddr + MACCtrl1); | |
e714d99c PDM |
1104 | sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); |
1105 | ||
1da177e4 LT |
1106 | /* free all tx skbuff */ |
1107 | for (i = 0; i < TX_RING_SIZE; i++) { | |
2109f89f JH |
1108 | np->tx_ring[i].next_desc = 0; |
1109 | ||
1da177e4 LT |
1110 | skb = np->tx_skbuff[i]; |
1111 | if (skb) { | |
6aa20a22 | 1112 | pci_unmap_single(np->pci_dev, |
14c9d9b0 AV |
1113 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1114 | skb->len, PCI_DMA_TODEVICE); | |
1da177e4 LT |
1115 | if (irq) |
1116 | dev_kfree_skb_irq (skb); | |
1117 | else | |
1118 | dev_kfree_skb (skb); | |
1119 | np->tx_skbuff[i] = NULL; | |
553e2335 | 1120 | dev->stats.tx_dropped++; |
1da177e4 LT |
1121 | } |
1122 | } | |
1123 | np->cur_tx = np->dirty_tx = 0; | |
1124 | np->cur_task = 0; | |
2109f89f | 1125 | |
bca79eb7 | 1126 | np->last_tx = NULL; |
2109f89f JH |
1127 | iowrite8(127, ioaddr + TxDMAPollPeriod); |
1128 | ||
1da177e4 LT |
1129 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); |
1130 | return 0; | |
1131 | } | |
1132 | ||
6aa20a22 | 1133 | /* The interrupt handler cleans up after the Tx thread, |
1da177e4 | 1134 | and schedule a Rx thread work */ |
7d12e780 | 1135 | static irqreturn_t intr_handler(int irq, void *dev_instance) |
1da177e4 LT |
1136 | { |
1137 | struct net_device *dev = (struct net_device *)dev_instance; | |
1138 | struct netdev_private *np = netdev_priv(dev); | |
1139 | void __iomem *ioaddr = np->base; | |
1140 | int hw_frame_id; | |
1141 | int tx_cnt; | |
1142 | int tx_status; | |
1143 | int handled = 0; | |
e242040d | 1144 | int i; |
1da177e4 LT |
1145 | |
1146 | ||
1147 | do { | |
1148 | int intr_status = ioread16(ioaddr + IntrStatus); | |
1149 | iowrite16(intr_status, ioaddr + IntrStatus); | |
1150 | ||
1151 | if (netif_msg_intr(np)) | |
1152 | printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", | |
1153 | dev->name, intr_status); | |
1154 | ||
1155 | if (!(intr_status & DEFAULT_INTR)) | |
1156 | break; | |
1157 | ||
1158 | handled = 1; | |
1159 | ||
1160 | if (intr_status & (IntrRxDMADone)) { | |
1161 | iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), | |
1162 | ioaddr + IntrEnable); | |
1163 | if (np->budget < 0) | |
1164 | np->budget = RX_BUDGET; | |
1165 | tasklet_schedule(&np->rx_tasklet); | |
1166 | } | |
1167 | if (intr_status & (IntrTxDone | IntrDrvRqst)) { | |
1168 | tx_status = ioread16 (ioaddr + TxStatus); | |
1169 | for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { | |
1170 | if (netif_msg_tx_done(np)) | |
1171 | printk | |
1172 | ("%s: Transmit status is %2.2x.\n", | |
1173 | dev->name, tx_status); | |
1174 | if (tx_status & 0x1e) { | |
b71b95ef PDM |
1175 | if (netif_msg_tx_err(np)) |
1176 | printk("%s: Transmit error status %4.4x.\n", | |
1177 | dev->name, tx_status); | |
553e2335 | 1178 | dev->stats.tx_errors++; |
1da177e4 | 1179 | if (tx_status & 0x10) |
553e2335 | 1180 | dev->stats.tx_fifo_errors++; |
1da177e4 | 1181 | if (tx_status & 0x08) |
553e2335 | 1182 | dev->stats.collisions++; |
b71b95ef | 1183 | if (tx_status & 0x04) |
553e2335 | 1184 | dev->stats.tx_fifo_errors++; |
1da177e4 | 1185 | if (tx_status & 0x02) |
553e2335 | 1186 | dev->stats.tx_window_errors++; |
e242040d | 1187 | |
b71b95ef PDM |
1188 | /* |
1189 | ** This reset has been verified on | |
1190 | ** DFE-580TX boards ! phdm@macqel.be. | |
1191 | */ | |
1192 | if (tx_status & 0x10) { /* TxUnderrun */ | |
b71b95ef PDM |
1193 | /* Restart Tx FIFO and transmitter */ |
1194 | sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); | |
b71b95ef | 1195 | /* No need to reset the Tx pointer here */ |
1da177e4 | 1196 | } |
2109f89f JH |
1197 | /* Restart the Tx. Need to make sure tx enabled */ |
1198 | i = 10; | |
1199 | do { | |
1200 | iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); | |
1201 | if (ioread16(ioaddr + MACCtrl1) & TxEnabled) | |
1202 | break; | |
1203 | mdelay(1); | |
1204 | } while (--i); | |
1da177e4 LT |
1205 | } |
1206 | /* Yup, this is a documentation bug. It cost me *hours*. */ | |
1207 | iowrite16 (0, ioaddr + TxStatus); | |
1208 | if (tx_cnt < 0) { | |
1209 | iowrite32(5000, ioaddr + DownCounter); | |
1210 | break; | |
1211 | } | |
1212 | tx_status = ioread16 (ioaddr + TxStatus); | |
1213 | } | |
1214 | hw_frame_id = (tx_status >> 8) & 0xff; | |
1215 | } else { | |
1216 | hw_frame_id = ioread8(ioaddr + TxFrameId); | |
1217 | } | |
6aa20a22 | 1218 | |
44c10138 | 1219 | if (np->pci_dev->revision >= 0x14) { |
1da177e4 LT |
1220 | spin_lock(&np->lock); |
1221 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | |
1222 | int entry = np->dirty_tx % TX_RING_SIZE; | |
1223 | struct sk_buff *skb; | |
1224 | int sw_frame_id; | |
1225 | sw_frame_id = (le32_to_cpu( | |
1226 | np->tx_ring[entry].status) >> 2) & 0xff; | |
1227 | if (sw_frame_id == hw_frame_id && | |
1228 | !(le32_to_cpu(np->tx_ring[entry].status) | |
1229 | & 0x00010000)) | |
1230 | break; | |
6aa20a22 | 1231 | if (sw_frame_id == (hw_frame_id + 1) % |
1da177e4 LT |
1232 | TX_RING_SIZE) |
1233 | break; | |
1234 | skb = np->tx_skbuff[entry]; | |
1235 | /* Free the original skb. */ | |
1236 | pci_unmap_single(np->pci_dev, | |
14c9d9b0 | 1237 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1da177e4 LT |
1238 | skb->len, PCI_DMA_TODEVICE); |
1239 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | |
1240 | np->tx_skbuff[entry] = NULL; | |
1241 | np->tx_ring[entry].frag[0].addr = 0; | |
1242 | np->tx_ring[entry].frag[0].length = 0; | |
1243 | } | |
1244 | spin_unlock(&np->lock); | |
1245 | } else { | |
1246 | spin_lock(&np->lock); | |
1247 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | |
1248 | int entry = np->dirty_tx % TX_RING_SIZE; | |
1249 | struct sk_buff *skb; | |
6aa20a22 | 1250 | if (!(le32_to_cpu(np->tx_ring[entry].status) |
1da177e4 LT |
1251 | & 0x00010000)) |
1252 | break; | |
1253 | skb = np->tx_skbuff[entry]; | |
1254 | /* Free the original skb. */ | |
1255 | pci_unmap_single(np->pci_dev, | |
14c9d9b0 | 1256 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1da177e4 LT |
1257 | skb->len, PCI_DMA_TODEVICE); |
1258 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | |
1259 | np->tx_skbuff[entry] = NULL; | |
1260 | np->tx_ring[entry].frag[0].addr = 0; | |
1261 | np->tx_ring[entry].frag[0].length = 0; | |
1262 | } | |
1263 | spin_unlock(&np->lock); | |
1264 | } | |
6aa20a22 | 1265 | |
1da177e4 LT |
1266 | if (netif_queue_stopped(dev) && |
1267 | np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { | |
1268 | /* The ring is no longer full, clear busy flag. */ | |
1269 | netif_wake_queue (dev); | |
1270 | } | |
1271 | /* Abnormal error summary/uncommon events handlers. */ | |
1272 | if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) | |
1273 | netdev_error(dev, intr_status); | |
1274 | } while (0); | |
1275 | if (netif_msg_intr(np)) | |
1276 | printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", | |
1277 | dev->name, ioread16(ioaddr + IntrStatus)); | |
1278 | return IRQ_RETVAL(handled); | |
1279 | } | |
1280 | ||
1281 | static void rx_poll(unsigned long data) | |
1282 | { | |
1283 | struct net_device *dev = (struct net_device *)data; | |
1284 | struct netdev_private *np = netdev_priv(dev); | |
1285 | int entry = np->cur_rx % RX_RING_SIZE; | |
1286 | int boguscnt = np->budget; | |
1287 | void __iomem *ioaddr = np->base; | |
1288 | int received = 0; | |
1289 | ||
1290 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | |
1291 | while (1) { | |
1292 | struct netdev_desc *desc = &(np->rx_ring[entry]); | |
1293 | u32 frame_status = le32_to_cpu(desc->status); | |
1294 | int pkt_len; | |
1295 | ||
1296 | if (--boguscnt < 0) { | |
1297 | goto not_done; | |
1298 | } | |
1299 | if (!(frame_status & DescOwn)) | |
1300 | break; | |
1301 | pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ | |
1302 | if (netif_msg_rx_status(np)) | |
1303 | printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", | |
1304 | frame_status); | |
1305 | if (frame_status & 0x001f4000) { | |
1306 | /* There was a error. */ | |
1307 | if (netif_msg_rx_err(np)) | |
1308 | printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", | |
1309 | frame_status); | |
553e2335 ED |
1310 | dev->stats.rx_errors++; |
1311 | if (frame_status & 0x00100000) | |
1312 | dev->stats.rx_length_errors++; | |
1313 | if (frame_status & 0x00010000) | |
1314 | dev->stats.rx_fifo_errors++; | |
1315 | if (frame_status & 0x00060000) | |
1316 | dev->stats.rx_frame_errors++; | |
1317 | if (frame_status & 0x00080000) | |
1318 | dev->stats.rx_crc_errors++; | |
1da177e4 LT |
1319 | if (frame_status & 0x00100000) { |
1320 | printk(KERN_WARNING "%s: Oversized Ethernet frame," | |
1321 | " status %8.8x.\n", | |
1322 | dev->name, frame_status); | |
1323 | } | |
1324 | } else { | |
1325 | struct sk_buff *skb; | |
1326 | #ifndef final_version | |
1327 | if (netif_msg_rx_status(np)) | |
1328 | printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" | |
1329 | ", bogus_cnt %d.\n", | |
1330 | pkt_len, boguscnt); | |
1331 | #endif | |
1332 | /* Check if the packet is long enough to accept without copying | |
1333 | to a minimally-sized skbuff. */ | |
8e95a202 JP |
1334 | if (pkt_len < rx_copybreak && |
1335 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | |
1da177e4 LT |
1336 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1337 | pci_dma_sync_single_for_cpu(np->pci_dev, | |
14c9d9b0 | 1338 | le32_to_cpu(desc->frag[0].addr), |
1da177e4 LT |
1339 | np->rx_buf_sz, |
1340 | PCI_DMA_FROMDEVICE); | |
1341 | ||
8c7b7faa | 1342 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); |
1da177e4 | 1343 | pci_dma_sync_single_for_device(np->pci_dev, |
14c9d9b0 | 1344 | le32_to_cpu(desc->frag[0].addr), |
1da177e4 LT |
1345 | np->rx_buf_sz, |
1346 | PCI_DMA_FROMDEVICE); | |
1347 | skb_put(skb, pkt_len); | |
1348 | } else { | |
1349 | pci_unmap_single(np->pci_dev, | |
14c9d9b0 | 1350 | le32_to_cpu(desc->frag[0].addr), |
1da177e4 LT |
1351 | np->rx_buf_sz, |
1352 | PCI_DMA_FROMDEVICE); | |
1353 | skb_put(skb = np->rx_skbuff[entry], pkt_len); | |
1354 | np->rx_skbuff[entry] = NULL; | |
1355 | } | |
1356 | skb->protocol = eth_type_trans(skb, dev); | |
1357 | /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ | |
1358 | netif_rx(skb); | |
1da177e4 LT |
1359 | } |
1360 | entry = (entry + 1) % RX_RING_SIZE; | |
1361 | received++; | |
1362 | } | |
1363 | np->cur_rx = entry; | |
1364 | refill_rx (dev); | |
1365 | np->budget -= received; | |
1366 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); | |
1367 | return; | |
1368 | ||
1369 | not_done: | |
1370 | np->cur_rx = entry; | |
1371 | refill_rx (dev); | |
1372 | if (!received) | |
1373 | received = 1; | |
1374 | np->budget -= received; | |
1375 | if (np->budget <= 0) | |
1376 | np->budget = RX_BUDGET; | |
1377 | tasklet_schedule(&np->rx_tasklet); | |
1da177e4 LT |
1378 | } |
1379 | ||
1380 | static void refill_rx (struct net_device *dev) | |
1381 | { | |
1382 | struct netdev_private *np = netdev_priv(dev); | |
1383 | int entry; | |
1384 | int cnt = 0; | |
1385 | ||
1386 | /* Refill the Rx ring buffers. */ | |
1387 | for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; | |
1388 | np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { | |
1389 | struct sk_buff *skb; | |
1390 | entry = np->dirty_rx % RX_RING_SIZE; | |
1391 | if (np->rx_skbuff[entry] == NULL) { | |
1392 | skb = dev_alloc_skb(np->rx_buf_sz); | |
1393 | np->rx_skbuff[entry] = skb; | |
1394 | if (skb == NULL) | |
1395 | break; /* Better luck next round. */ | |
1396 | skb->dev = dev; /* Mark as being used by this device. */ | |
1397 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | |
1398 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( | |
689be439 | 1399 | pci_map_single(np->pci_dev, skb->data, |
1da177e4 LT |
1400 | np->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
1401 | } | |
1402 | /* Perhaps we need not reset this field. */ | |
1403 | np->rx_ring[entry].frag[0].length = | |
1404 | cpu_to_le32(np->rx_buf_sz | LastFrag); | |
1405 | np->rx_ring[entry].status = 0; | |
1406 | cnt++; | |
1407 | } | |
1da177e4 LT |
1408 | } |
1409 | static void netdev_error(struct net_device *dev, int intr_status) | |
1410 | { | |
1411 | struct netdev_private *np = netdev_priv(dev); | |
1412 | void __iomem *ioaddr = np->base; | |
1413 | u16 mii_ctl, mii_advertise, mii_lpa; | |
1414 | int speed; | |
1415 | ||
1416 | if (intr_status & LinkChange) { | |
50500155 DN |
1417 | if (mdio_wait_link(dev, 10) == 0) { |
1418 | printk(KERN_INFO "%s: Link up\n", dev->name); | |
1419 | if (np->an_enable) { | |
1420 | mii_advertise = mdio_read(dev, np->phys[0], | |
1421 | MII_ADVERTISE); | |
1422 | mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); | |
1423 | mii_advertise &= mii_lpa; | |
1424 | printk(KERN_INFO "%s: Link changed: ", | |
1425 | dev->name); | |
1426 | if (mii_advertise & ADVERTISE_100FULL) { | |
1427 | np->speed = 100; | |
1428 | printk("100Mbps, full duplex\n"); | |
1429 | } else if (mii_advertise & ADVERTISE_100HALF) { | |
1430 | np->speed = 100; | |
1431 | printk("100Mbps, half duplex\n"); | |
1432 | } else if (mii_advertise & ADVERTISE_10FULL) { | |
1433 | np->speed = 10; | |
1434 | printk("10Mbps, full duplex\n"); | |
1435 | } else if (mii_advertise & ADVERTISE_10HALF) { | |
1436 | np->speed = 10; | |
1437 | printk("10Mbps, half duplex\n"); | |
1438 | } else | |
1439 | printk("\n"); | |
1da177e4 | 1440 | |
50500155 DN |
1441 | } else { |
1442 | mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); | |
1443 | speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; | |
1444 | np->speed = speed; | |
1445 | printk(KERN_INFO "%s: Link changed: %dMbps ,", | |
1446 | dev->name, speed); | |
1447 | printk("%s duplex.\n", | |
1448 | (mii_ctl & BMCR_FULLDPLX) ? | |
1449 | "full" : "half"); | |
1450 | } | |
1451 | check_duplex(dev); | |
1452 | if (np->flowctrl && np->mii_if.full_duplex) { | |
1453 | iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, | |
1454 | ioaddr + MulticastFilter1+2); | |
1455 | iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, | |
1456 | ioaddr + MACCtrl0); | |
1457 | } | |
1458 | netif_carrier_on(dev); | |
1da177e4 | 1459 | } else { |
50500155 DN |
1460 | printk(KERN_INFO "%s: Link down\n", dev->name); |
1461 | netif_carrier_off(dev); | |
1da177e4 LT |
1462 | } |
1463 | } | |
1464 | if (intr_status & StatsMax) { | |
1465 | get_stats(dev); | |
1466 | } | |
1467 | if (intr_status & IntrPCIErr) { | |
1468 | printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", | |
1469 | dev->name, intr_status); | |
1470 | /* We must do a global reset of DMA to continue. */ | |
1471 | } | |
1472 | } | |
1473 | ||
1474 | static struct net_device_stats *get_stats(struct net_device *dev) | |
1475 | { | |
1476 | struct netdev_private *np = netdev_priv(dev); | |
1477 | void __iomem *ioaddr = np->base; | |
1478 | int i; | |
1479 | ||
1480 | /* We should lock this segment of code for SMP eventually, although | |
1481 | the vulnerability window is very small and statistics are | |
1482 | non-critical. */ | |
1483 | /* The chip only need report frame silently dropped. */ | |
553e2335 ED |
1484 | dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); |
1485 | dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); | |
1486 | dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); | |
1487 | dev->stats.collisions += ioread8(ioaddr + StatsLateColl); | |
1488 | dev->stats.collisions += ioread8(ioaddr + StatsMultiColl); | |
1489 | dev->stats.collisions += ioread8(ioaddr + StatsOneColl); | |
1490 | dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); | |
1da177e4 LT |
1491 | ioread8(ioaddr + StatsTxDefer); |
1492 | for (i = StatsTxDefer; i <= StatsMcastRx; i++) | |
1493 | ioread8(ioaddr + i); | |
553e2335 ED |
1494 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); |
1495 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; | |
1496 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); | |
1497 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; | |
1da177e4 | 1498 | |
553e2335 | 1499 | return &dev->stats; |
1da177e4 LT |
1500 | } |
1501 | ||
1502 | static void set_rx_mode(struct net_device *dev) | |
1503 | { | |
1504 | struct netdev_private *np = netdev_priv(dev); | |
1505 | void __iomem *ioaddr = np->base; | |
1506 | u16 mc_filter[4]; /* Multicast hash filter */ | |
1507 | u32 rx_mode; | |
1508 | int i; | |
1509 | ||
1510 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | |
1da177e4 LT |
1511 | memset(mc_filter, 0xff, sizeof(mc_filter)); |
1512 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; | |
4cd24eaf | 1513 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || |
8e95a202 | 1514 | (dev->flags & IFF_ALLMULTI)) { |
1da177e4 LT |
1515 | /* Too many to match, or accept all multicasts. */ |
1516 | memset(mc_filter, 0xff, sizeof(mc_filter)); | |
1517 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; | |
4cd24eaf | 1518 | } else if (!netdev_mc_empty(dev)) { |
22bedad3 | 1519 | struct netdev_hw_addr *ha; |
1da177e4 LT |
1520 | int bit; |
1521 | int index; | |
1522 | int crc; | |
1523 | memset (mc_filter, 0, sizeof (mc_filter)); | |
22bedad3 JP |
1524 | netdev_for_each_mc_addr(ha, dev) { |
1525 | crc = ether_crc_le(ETH_ALEN, ha->addr); | |
1da177e4 LT |
1526 | for (index=0, bit=0; bit < 6; bit++, crc <<= 1) |
1527 | if (crc & 0x80000000) index |= 1 << bit; | |
1528 | mc_filter[index/16] |= (1 << (index % 16)); | |
1529 | } | |
1530 | rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys; | |
1531 | } else { | |
1532 | iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); | |
1533 | return; | |
1534 | } | |
1535 | if (np->mii_if.full_duplex && np->flowctrl) | |
1536 | mc_filter[3] |= 0x0200; | |
1537 | ||
1538 | for (i = 0; i < 4; i++) | |
1539 | iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2); | |
1540 | iowrite8(rx_mode, ioaddr + RxMode); | |
1541 | } | |
1542 | ||
1543 | static int __set_mac_addr(struct net_device *dev) | |
1544 | { | |
1545 | struct netdev_private *np = netdev_priv(dev); | |
1546 | u16 addr16; | |
1547 | ||
1548 | addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); | |
1549 | iowrite16(addr16, np->base + StationAddr); | |
1550 | addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); | |
1551 | iowrite16(addr16, np->base + StationAddr+2); | |
1552 | addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); | |
1553 | iowrite16(addr16, np->base + StationAddr+4); | |
1554 | return 0; | |
1555 | } | |
1556 | ||
1557 | static int check_if_running(struct net_device *dev) | |
1558 | { | |
1559 | if (!netif_running(dev)) | |
1560 | return -EINVAL; | |
1561 | return 0; | |
1562 | } | |
1563 | ||
1564 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
1565 | { | |
1566 | struct netdev_private *np = netdev_priv(dev); | |
1567 | strcpy(info->driver, DRV_NAME); | |
1568 | strcpy(info->version, DRV_VERSION); | |
1569 | strcpy(info->bus_info, pci_name(np->pci_dev)); | |
1570 | } | |
1571 | ||
1572 | static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
1573 | { | |
1574 | struct netdev_private *np = netdev_priv(dev); | |
1575 | spin_lock_irq(&np->lock); | |
1576 | mii_ethtool_gset(&np->mii_if, ecmd); | |
1577 | spin_unlock_irq(&np->lock); | |
1578 | return 0; | |
1579 | } | |
1580 | ||
1581 | static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
1582 | { | |
1583 | struct netdev_private *np = netdev_priv(dev); | |
1584 | int res; | |
1585 | spin_lock_irq(&np->lock); | |
1586 | res = mii_ethtool_sset(&np->mii_if, ecmd); | |
1587 | spin_unlock_irq(&np->lock); | |
1588 | return res; | |
1589 | } | |
1590 | ||
1591 | static int nway_reset(struct net_device *dev) | |
1592 | { | |
1593 | struct netdev_private *np = netdev_priv(dev); | |
1594 | return mii_nway_restart(&np->mii_if); | |
1595 | } | |
1596 | ||
1597 | static u32 get_link(struct net_device *dev) | |
1598 | { | |
1599 | struct netdev_private *np = netdev_priv(dev); | |
1600 | return mii_link_ok(&np->mii_if); | |
1601 | } | |
1602 | ||
1603 | static u32 get_msglevel(struct net_device *dev) | |
1604 | { | |
1605 | struct netdev_private *np = netdev_priv(dev); | |
1606 | return np->msg_enable; | |
1607 | } | |
1608 | ||
1609 | static void set_msglevel(struct net_device *dev, u32 val) | |
1610 | { | |
1611 | struct netdev_private *np = netdev_priv(dev); | |
1612 | np->msg_enable = val; | |
1613 | } | |
1614 | ||
7282d491 | 1615 | static const struct ethtool_ops ethtool_ops = { |
1da177e4 LT |
1616 | .begin = check_if_running, |
1617 | .get_drvinfo = get_drvinfo, | |
1618 | .get_settings = get_settings, | |
1619 | .set_settings = set_settings, | |
1620 | .nway_reset = nway_reset, | |
1621 | .get_link = get_link, | |
1622 | .get_msglevel = get_msglevel, | |
1623 | .set_msglevel = set_msglevel, | |
1624 | }; | |
1625 | ||
1626 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1627 | { | |
1628 | struct netdev_private *np = netdev_priv(dev); | |
1da177e4 | 1629 | int rc; |
1da177e4 LT |
1630 | |
1631 | if (!netif_running(dev)) | |
1632 | return -EINVAL; | |
1633 | ||
1634 | spin_lock_irq(&np->lock); | |
1635 | rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); | |
1636 | spin_unlock_irq(&np->lock); | |
1da177e4 LT |
1637 | |
1638 | return rc; | |
1639 | } | |
1640 | ||
1641 | static int netdev_close(struct net_device *dev) | |
1642 | { | |
1643 | struct netdev_private *np = netdev_priv(dev); | |
1644 | void __iomem *ioaddr = np->base; | |
1645 | struct sk_buff *skb; | |
1646 | int i; | |
1647 | ||
31f817e9 JH |
1648 | /* Wait and kill tasklet */ |
1649 | tasklet_kill(&np->rx_tasklet); | |
1650 | tasklet_kill(&np->tx_tasklet); | |
1651 | np->cur_tx = 0; | |
1652 | np->dirty_tx = 0; | |
1653 | np->cur_task = 0; | |
bca79eb7 | 1654 | np->last_tx = NULL; |
31f817e9 | 1655 | |
1da177e4 LT |
1656 | netif_stop_queue(dev); |
1657 | ||
1658 | if (netif_msg_ifdown(np)) { | |
1659 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " | |
1660 | "Rx %4.4x Int %2.2x.\n", | |
1661 | dev->name, ioread8(ioaddr + TxStatus), | |
1662 | ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); | |
1663 | printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", | |
1664 | dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); | |
1665 | } | |
1666 | ||
1667 | /* Disable interrupts by clearing the interrupt mask. */ | |
1668 | iowrite16(0x0000, ioaddr + IntrEnable); | |
1669 | ||
acd70c2b JH |
1670 | /* Disable Rx and Tx DMA for safely release resource */ |
1671 | iowrite32(0x500, ioaddr + DMACtrl); | |
1672 | ||
1da177e4 LT |
1673 | /* Stop the chip's Tx and Rx processes. */ |
1674 | iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); | |
1675 | ||
31f817e9 JH |
1676 | for (i = 2000; i > 0; i--) { |
1677 | if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) | |
1678 | break; | |
1679 | mdelay(1); | |
1680 | } | |
1681 | ||
1682 | iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, | |
1683 | ioaddr +ASICCtrl + 2); | |
1684 | ||
1685 | for (i = 2000; i > 0; i--) { | |
1686 | if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0) | |
1687 | break; | |
1688 | mdelay(1); | |
1689 | } | |
1da177e4 LT |
1690 | |
1691 | #ifdef __i386__ | |
1692 | if (netif_msg_hw(np)) { | |
ad361c98 | 1693 | printk(KERN_DEBUG " Tx ring at %8.8x:\n", |
1da177e4 LT |
1694 | (int)(np->tx_ring_dma)); |
1695 | for (i = 0; i < TX_RING_SIZE; i++) | |
ad361c98 | 1696 | printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", |
1da177e4 LT |
1697 | i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, |
1698 | np->tx_ring[i].frag[0].length); | |
ad361c98 | 1699 | printk(KERN_DEBUG " Rx ring %8.8x:\n", |
1da177e4 LT |
1700 | (int)(np->rx_ring_dma)); |
1701 | for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { | |
1702 | printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", | |
1703 | i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr, | |
1704 | np->rx_ring[i].frag[0].length); | |
1705 | } | |
1706 | } | |
1707 | #endif /* __i386__ debugging only */ | |
1708 | ||
1709 | free_irq(dev->irq, dev); | |
1710 | ||
1711 | del_timer_sync(&np->timer); | |
1712 | ||
1713 | /* Free all the skbuffs in the Rx queue. */ | |
1714 | for (i = 0; i < RX_RING_SIZE; i++) { | |
1715 | np->rx_ring[i].status = 0; | |
1da177e4 LT |
1716 | skb = np->rx_skbuff[i]; |
1717 | if (skb) { | |
1718 | pci_unmap_single(np->pci_dev, | |
14c9d9b0 AV |
1719 | le32_to_cpu(np->rx_ring[i].frag[0].addr), |
1720 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | |
1da177e4 LT |
1721 | dev_kfree_skb(skb); |
1722 | np->rx_skbuff[i] = NULL; | |
1723 | } | |
14c9d9b0 | 1724 | np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */ |
1da177e4 LT |
1725 | } |
1726 | for (i = 0; i < TX_RING_SIZE; i++) { | |
31f817e9 | 1727 | np->tx_ring[i].next_desc = 0; |
1da177e4 LT |
1728 | skb = np->tx_skbuff[i]; |
1729 | if (skb) { | |
1730 | pci_unmap_single(np->pci_dev, | |
14c9d9b0 AV |
1731 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1732 | skb->len, PCI_DMA_TODEVICE); | |
1da177e4 LT |
1733 | dev_kfree_skb(skb); |
1734 | np->tx_skbuff[i] = NULL; | |
1735 | } | |
1736 | } | |
1737 | ||
1738 | return 0; | |
1739 | } | |
1740 | ||
1741 | static void __devexit sundance_remove1 (struct pci_dev *pdev) | |
1742 | { | |
1743 | struct net_device *dev = pci_get_drvdata(pdev); | |
1744 | ||
1745 | if (dev) { | |
1746 | struct netdev_private *np = netdev_priv(dev); | |
1747 | ||
1748 | unregister_netdev(dev); | |
1749 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, | |
1750 | np->rx_ring_dma); | |
1751 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, | |
1752 | np->tx_ring_dma); | |
1753 | pci_iounmap(pdev, np->base); | |
1754 | pci_release_regions(pdev); | |
1755 | free_netdev(dev); | |
1756 | pci_set_drvdata(pdev, NULL); | |
1757 | } | |
1758 | } | |
1759 | ||
1760 | static struct pci_driver sundance_driver = { | |
1761 | .name = DRV_NAME, | |
1762 | .id_table = sundance_pci_tbl, | |
1763 | .probe = sundance_probe1, | |
1764 | .remove = __devexit_p(sundance_remove1), | |
1765 | }; | |
1766 | ||
1767 | static int __init sundance_init(void) | |
1768 | { | |
1769 | /* when a module, this is printed whether or not devices are found in probe */ | |
1770 | #ifdef MODULE | |
1771 | printk(version); | |
1772 | #endif | |
29917620 | 1773 | return pci_register_driver(&sundance_driver); |
1da177e4 LT |
1774 | } |
1775 | ||
1776 | static void __exit sundance_exit(void) | |
1777 | { | |
1778 | pci_unregister_driver(&sundance_driver); | |
1779 | } | |
1780 | ||
1781 | module_init(sundance_init); | |
1782 | module_exit(sundance_exit); | |
1783 | ||
1784 |