Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */ |
2 | /* | |
3 | Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com> | |
4 | ||
5 | Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c] | |
6 | Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c] | |
7 | Copyright 2001 Manfred Spraul [natsemi.c] | |
8 | Copyright 1999-2001 by Donald Becker. [natsemi.c] | |
9 | Written 1997-2001 by Donald Becker. [8139too.c] | |
10 | Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c] | |
11 | ||
12 | This software may be used and distributed according to the terms of | |
13 | the GNU General Public License (GPL), incorporated herein by reference. | |
14 | Drivers based on or derived from this code fall under the GPL and must | |
15 | retain the authorship, copyright and license notice. This file is not | |
16 | a complete program and may only be used when the entire operating | |
17 | system is licensed under the GPL. | |
18 | ||
19 | See the file COPYING in this distribution for more information. | |
20 | ||
21 | Contributors: | |
22 | ||
23 | Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br> | |
24 | PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br> | |
25 | LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br> | |
26 | ||
27 | TODO: | |
28 | * Test Tx checksumming thoroughly | |
29 | * Implement dev->tx_timeout | |
30 | ||
31 | Low priority TODO: | |
32 | * Complete reset on PciErr | |
33 | * Consider Rx interrupt mitigation using TimerIntr | |
34 | * Investigate using skb->priority with h/w VLAN priority | |
35 | * Investigate using High Priority Tx Queue with skb->priority | |
36 | * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error | |
37 | * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error | |
38 | * Implement Tx software interrupt mitigation via | |
39 | Tx descriptor bit | |
40 | * The real minimum of CP_MIN_MTU is 4 bytes. However, | |
41 | for this to be supported, one must(?) turn on packet padding. | |
42 | * Support external MII transceivers (patch available) | |
43 | ||
44 | NOTES: | |
45 | * TX checksumming is considered experimental. It is off by | |
46 | default, use ethtool to turn it on. | |
47 | ||
48 | */ | |
49 | ||
50 | #define DRV_NAME "8139cp" | |
51 | #define DRV_VERSION "1.2" | |
52 | #define DRV_RELDATE "Mar 22, 2004" | |
53 | ||
54 | ||
55 | #include <linux/config.h> | |
56 | #include <linux/module.h> | |
57 | #include <linux/kernel.h> | |
58 | #include <linux/compiler.h> | |
59 | #include <linux/netdevice.h> | |
60 | #include <linux/etherdevice.h> | |
61 | #include <linux/init.h> | |
62 | #include <linux/pci.h> | |
63 | #include <linux/delay.h> | |
64 | #include <linux/ethtool.h> | |
65 | #include <linux/mii.h> | |
66 | #include <linux/if_vlan.h> | |
67 | #include <linux/crc32.h> | |
68 | #include <linux/in.h> | |
69 | #include <linux/ip.h> | |
70 | #include <linux/tcp.h> | |
71 | #include <linux/udp.h> | |
72 | #include <linux/cache.h> | |
73 | #include <asm/io.h> | |
74 | #include <asm/irq.h> | |
75 | #include <asm/uaccess.h> | |
76 | ||
77 | /* VLAN tagging feature enable/disable */ | |
78 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | |
79 | #define CP_VLAN_TAG_USED 1 | |
80 | #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ | |
81 | do { (tx_desc)->opts2 = (vlan_tag_value); } while (0) | |
82 | #else | |
83 | #define CP_VLAN_TAG_USED 0 | |
84 | #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ | |
85 | do { (tx_desc)->opts2 = 0; } while (0) | |
86 | #endif | |
87 | ||
88 | /* These identify the driver base version and may not be removed. */ | |
89 | static char version[] = | |
90 | KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; | |
91 | ||
92 | MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); | |
93 | MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); | |
94 | MODULE_LICENSE("GPL"); | |
95 | ||
96 | static int debug = -1; | |
97 | MODULE_PARM (debug, "i"); | |
98 | MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); | |
99 | ||
100 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | |
101 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ | |
102 | static int multicast_filter_limit = 32; | |
103 | MODULE_PARM (multicast_filter_limit, "i"); | |
104 | MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); | |
105 | ||
106 | #define PFX DRV_NAME ": " | |
107 | ||
108 | #ifndef TRUE | |
109 | #define FALSE 0 | |
110 | #define TRUE (!FALSE) | |
111 | #endif | |
112 | ||
113 | #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ | |
114 | NETIF_MSG_PROBE | \ | |
115 | NETIF_MSG_LINK) | |
116 | #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */ | |
117 | #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */ | |
118 | #define CP_REGS_SIZE (0xff + 1) | |
119 | #define CP_REGS_VER 1 /* version 1 */ | |
120 | #define CP_RX_RING_SIZE 64 | |
121 | #define CP_TX_RING_SIZE 64 | |
122 | #define CP_RING_BYTES \ | |
123 | ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \ | |
124 | (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \ | |
125 | CP_STATS_SIZE) | |
126 | #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1)) | |
127 | #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1)) | |
128 | #define TX_BUFFS_AVAIL(CP) \ | |
129 | (((CP)->tx_tail <= (CP)->tx_head) ? \ | |
130 | (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \ | |
131 | (CP)->tx_tail - (CP)->tx_head - 1) | |
132 | ||
133 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | |
134 | #define RX_OFFSET 2 | |
135 | #define CP_INTERNAL_PHY 32 | |
136 | ||
137 | /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ | |
138 | #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */ | |
139 | #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */ | |
140 | #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ | |
141 | #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */ | |
142 | ||
143 | /* Time in jiffies before concluding the transmitter is hung. */ | |
144 | #define TX_TIMEOUT (6*HZ) | |
145 | ||
146 | /* hardware minimum and maximum for a single frame's data payload */ | |
147 | #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */ | |
148 | #define CP_MAX_MTU 4096 | |
149 | ||
150 | enum { | |
151 | /* NIC register offsets */ | |
152 | MAC0 = 0x00, /* Ethernet hardware address. */ | |
153 | MAR0 = 0x08, /* Multicast filter. */ | |
154 | StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */ | |
155 | TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */ | |
156 | HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */ | |
157 | Cmd = 0x37, /* Command register */ | |
158 | IntrMask = 0x3C, /* Interrupt mask */ | |
159 | IntrStatus = 0x3E, /* Interrupt status */ | |
160 | TxConfig = 0x40, /* Tx configuration */ | |
161 | ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */ | |
162 | RxConfig = 0x44, /* Rx configuration */ | |
163 | RxMissed = 0x4C, /* 24 bits valid, write clears */ | |
164 | Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */ | |
165 | Config1 = 0x52, /* Config1 */ | |
166 | Config3 = 0x59, /* Config3 */ | |
167 | Config4 = 0x5A, /* Config4 */ | |
168 | MultiIntr = 0x5C, /* Multiple interrupt select */ | |
169 | BasicModeCtrl = 0x62, /* MII BMCR */ | |
170 | BasicModeStatus = 0x64, /* MII BMSR */ | |
171 | NWayAdvert = 0x66, /* MII ADVERTISE */ | |
172 | NWayLPAR = 0x68, /* MII LPA */ | |
173 | NWayExpansion = 0x6A, /* MII Expansion */ | |
174 | Config5 = 0xD8, /* Config5 */ | |
175 | TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ | |
176 | RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ | |
177 | CpCmd = 0xE0, /* C+ Command register (C+ mode only) */ | |
178 | IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */ | |
179 | RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */ | |
180 | TxThresh = 0xEC, /* Early Tx threshold */ | |
181 | OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */ | |
182 | OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */ | |
183 | ||
184 | /* Tx and Rx status descriptors */ | |
185 | DescOwn = (1 << 31), /* Descriptor is owned by NIC */ | |
186 | RingEnd = (1 << 30), /* End of descriptor ring */ | |
187 | FirstFrag = (1 << 29), /* First segment of a packet */ | |
188 | LastFrag = (1 << 28), /* Final segment of a packet */ | |
189 | TxError = (1 << 23), /* Tx error summary */ | |
190 | RxError = (1 << 20), /* Rx error summary */ | |
191 | IPCS = (1 << 18), /* Calculate IP checksum */ | |
192 | UDPCS = (1 << 17), /* Calculate UDP/IP checksum */ | |
193 | TCPCS = (1 << 16), /* Calculate TCP/IP checksum */ | |
194 | TxVlanTag = (1 << 17), /* Add VLAN tag */ | |
195 | RxVlanTagged = (1 << 16), /* Rx VLAN tag available */ | |
196 | IPFail = (1 << 15), /* IP checksum failed */ | |
197 | UDPFail = (1 << 14), /* UDP/IP checksum failed */ | |
198 | TCPFail = (1 << 13), /* TCP/IP checksum failed */ | |
199 | NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */ | |
200 | PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */ | |
201 | PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */ | |
202 | RxProtoTCP = 1, | |
203 | RxProtoUDP = 2, | |
204 | RxProtoIP = 3, | |
205 | TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */ | |
206 | TxOWC = (1 << 22), /* Tx Out-of-window collision */ | |
207 | TxLinkFail = (1 << 21), /* Link failed during Tx of packet */ | |
208 | TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */ | |
209 | TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */ | |
210 | TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */ | |
211 | RxErrFrame = (1 << 27), /* Rx frame alignment error */ | |
212 | RxMcast = (1 << 26), /* Rx multicast packet rcv'd */ | |
213 | RxErrCRC = (1 << 18), /* Rx CRC error */ | |
214 | RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */ | |
215 | RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */ | |
216 | RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */ | |
217 | ||
218 | /* StatsAddr register */ | |
219 | DumpStats = (1 << 3), /* Begin stats dump */ | |
220 | ||
221 | /* RxConfig register */ | |
222 | RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */ | |
223 | RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */ | |
224 | AcceptErr = 0x20, /* Accept packets with CRC errors */ | |
225 | AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */ | |
226 | AcceptBroadcast = 0x08, /* Accept broadcast packets */ | |
227 | AcceptMulticast = 0x04, /* Accept multicast packets */ | |
228 | AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */ | |
229 | AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */ | |
230 | ||
231 | /* IntrMask / IntrStatus registers */ | |
232 | PciErr = (1 << 15), /* System error on the PCI bus */ | |
233 | TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */ | |
234 | LenChg = (1 << 13), /* Cable length change */ | |
235 | SWInt = (1 << 8), /* Software-requested interrupt */ | |
236 | TxEmpty = (1 << 7), /* No Tx descriptors available */ | |
237 | RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */ | |
238 | LinkChg = (1 << 5), /* Packet underrun, or link change */ | |
239 | RxEmpty = (1 << 4), /* No Rx descriptors available */ | |
240 | TxErr = (1 << 3), /* Tx error */ | |
241 | TxOK = (1 << 2), /* Tx packet sent */ | |
242 | RxErr = (1 << 1), /* Rx error */ | |
243 | RxOK = (1 << 0), /* Rx packet received */ | |
244 | IntrResvd = (1 << 10), /* reserved, according to RealTek engineers, | |
245 | but hardware likes to raise it */ | |
246 | ||
247 | IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty | | |
248 | RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK | | |
249 | RxErr | RxOK | IntrResvd, | |
250 | ||
251 | /* C mode command register */ | |
252 | CmdReset = (1 << 4), /* Enable to reset; self-clearing */ | |
253 | RxOn = (1 << 3), /* Rx mode enable */ | |
254 | TxOn = (1 << 2), /* Tx mode enable */ | |
255 | ||
256 | /* C+ mode command register */ | |
257 | RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */ | |
258 | RxChkSum = (1 << 5), /* Rx checksum offload enable */ | |
259 | PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */ | |
260 | PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */ | |
261 | CpRxOn = (1 << 1), /* Rx mode enable */ | |
262 | CpTxOn = (1 << 0), /* Tx mode enable */ | |
263 | ||
264 | /* Cfg9436 EEPROM control register */ | |
265 | Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */ | |
266 | Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */ | |
267 | ||
268 | /* TxConfig register */ | |
269 | IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */ | |
270 | TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ | |
271 | ||
272 | /* Early Tx Threshold register */ | |
273 | TxThreshMask = 0x3f, /* Mask bits 5-0 */ | |
274 | TxThreshMax = 2048, /* Max early Tx threshold */ | |
275 | ||
276 | /* Config1 register */ | |
277 | DriverLoaded = (1 << 5), /* Software marker, driver is loaded */ | |
278 | LWACT = (1 << 4), /* LWAKE active mode */ | |
279 | PMEnable = (1 << 0), /* Enable various PM features of chip */ | |
280 | ||
281 | /* Config3 register */ | |
282 | PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */ | |
283 | MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ | |
284 | LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ | |
285 | ||
286 | /* Config4 register */ | |
287 | LWPTN = (1 << 1), /* LWAKE Pattern */ | |
288 | LWPME = (1 << 4), /* LANWAKE vs PMEB */ | |
289 | ||
290 | /* Config5 register */ | |
291 | BWF = (1 << 6), /* Accept Broadcast wakeup frame */ | |
292 | MWF = (1 << 5), /* Accept Multicast wakeup frame */ | |
293 | UWF = (1 << 4), /* Accept Unicast wakeup frame */ | |
294 | LANWake = (1 << 1), /* Enable LANWake signal */ | |
295 | PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ | |
296 | ||
297 | cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty, | |
298 | cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr, | |
299 | cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask, | |
300 | }; | |
301 | ||
302 | static const unsigned int cp_rx_config = | |
303 | (RX_FIFO_THRESH << RxCfgFIFOShift) | | |
304 | (RX_DMA_BURST << RxCfgDMAShift); | |
305 | ||
306 | struct cp_desc { | |
307 | u32 opts1; | |
308 | u32 opts2; | |
309 | u64 addr; | |
310 | }; | |
311 | ||
312 | struct ring_info { | |
313 | struct sk_buff *skb; | |
314 | dma_addr_t mapping; | |
315 | unsigned frag; | |
316 | }; | |
317 | ||
318 | struct cp_dma_stats { | |
319 | u64 tx_ok; | |
320 | u64 rx_ok; | |
321 | u64 tx_err; | |
322 | u32 rx_err; | |
323 | u16 rx_fifo; | |
324 | u16 frame_align; | |
325 | u32 tx_ok_1col; | |
326 | u32 tx_ok_mcol; | |
327 | u64 rx_ok_phys; | |
328 | u64 rx_ok_bcast; | |
329 | u32 rx_ok_mcast; | |
330 | u16 tx_abort; | |
331 | u16 tx_underrun; | |
332 | } __attribute__((packed)); | |
333 | ||
334 | struct cp_extra_stats { | |
335 | unsigned long rx_frags; | |
336 | }; | |
337 | ||
338 | struct cp_private { | |
339 | void __iomem *regs; | |
340 | struct net_device *dev; | |
341 | spinlock_t lock; | |
342 | u32 msg_enable; | |
343 | ||
344 | struct pci_dev *pdev; | |
345 | u32 rx_config; | |
346 | u16 cpcmd; | |
347 | ||
348 | struct net_device_stats net_stats; | |
349 | struct cp_extra_stats cp_stats; | |
350 | struct cp_dma_stats *nic_stats; | |
351 | dma_addr_t nic_stats_dma; | |
352 | ||
353 | unsigned rx_tail ____cacheline_aligned; | |
354 | struct cp_desc *rx_ring; | |
355 | struct ring_info rx_skb[CP_RX_RING_SIZE]; | |
356 | unsigned rx_buf_sz; | |
357 | ||
358 | unsigned tx_head ____cacheline_aligned; | |
359 | unsigned tx_tail; | |
360 | ||
361 | struct cp_desc *tx_ring; | |
362 | struct ring_info tx_skb[CP_TX_RING_SIZE]; | |
363 | dma_addr_t ring_dma; | |
364 | ||
365 | #if CP_VLAN_TAG_USED | |
366 | struct vlan_group *vlgrp; | |
367 | #endif | |
368 | ||
369 | unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | |
370 | ||
371 | struct mii_if_info mii_if; | |
372 | }; | |
373 | ||
374 | #define cpr8(reg) readb(cp->regs + (reg)) | |
375 | #define cpr16(reg) readw(cp->regs + (reg)) | |
376 | #define cpr32(reg) readl(cp->regs + (reg)) | |
377 | #define cpw8(reg,val) writeb((val), cp->regs + (reg)) | |
378 | #define cpw16(reg,val) writew((val), cp->regs + (reg)) | |
379 | #define cpw32(reg,val) writel((val), cp->regs + (reg)) | |
380 | #define cpw8_f(reg,val) do { \ | |
381 | writeb((val), cp->regs + (reg)); \ | |
382 | readb(cp->regs + (reg)); \ | |
383 | } while (0) | |
384 | #define cpw16_f(reg,val) do { \ | |
385 | writew((val), cp->regs + (reg)); \ | |
386 | readw(cp->regs + (reg)); \ | |
387 | } while (0) | |
388 | #define cpw32_f(reg,val) do { \ | |
389 | writel((val), cp->regs + (reg)); \ | |
390 | readl(cp->regs + (reg)); \ | |
391 | } while (0) | |
392 | ||
393 | ||
394 | static void __cp_set_rx_mode (struct net_device *dev); | |
395 | static void cp_tx (struct cp_private *cp); | |
396 | static void cp_clean_rings (struct cp_private *cp); | |
397 | ||
398 | static struct pci_device_id cp_pci_tbl[] = { | |
399 | { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, | |
400 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | |
401 | { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322, | |
402 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | |
403 | { }, | |
404 | }; | |
405 | MODULE_DEVICE_TABLE(pci, cp_pci_tbl); | |
406 | ||
407 | static struct { | |
408 | const char str[ETH_GSTRING_LEN]; | |
409 | } ethtool_stats_keys[] = { | |
410 | { "tx_ok" }, | |
411 | { "rx_ok" }, | |
412 | { "tx_err" }, | |
413 | { "rx_err" }, | |
414 | { "rx_fifo" }, | |
415 | { "frame_align" }, | |
416 | { "tx_ok_1col" }, | |
417 | { "tx_ok_mcol" }, | |
418 | { "rx_ok_phys" }, | |
419 | { "rx_ok_bcast" }, | |
420 | { "rx_ok_mcast" }, | |
421 | { "tx_abort" }, | |
422 | { "tx_underrun" }, | |
423 | { "rx_frags" }, | |
424 | }; | |
425 | ||
426 | ||
427 | #if CP_VLAN_TAG_USED | |
428 | static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | |
429 | { | |
430 | struct cp_private *cp = netdev_priv(dev); | |
431 | unsigned long flags; | |
432 | ||
433 | spin_lock_irqsave(&cp->lock, flags); | |
434 | cp->vlgrp = grp; | |
435 | cp->cpcmd |= RxVlanOn; | |
436 | cpw16(CpCmd, cp->cpcmd); | |
437 | spin_unlock_irqrestore(&cp->lock, flags); | |
438 | } | |
439 | ||
440 | static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |
441 | { | |
442 | struct cp_private *cp = netdev_priv(dev); | |
443 | unsigned long flags; | |
444 | ||
445 | spin_lock_irqsave(&cp->lock, flags); | |
446 | cp->cpcmd &= ~RxVlanOn; | |
447 | cpw16(CpCmd, cp->cpcmd); | |
448 | if (cp->vlgrp) | |
449 | cp->vlgrp->vlan_devices[vid] = NULL; | |
450 | spin_unlock_irqrestore(&cp->lock, flags); | |
451 | } | |
452 | #endif /* CP_VLAN_TAG_USED */ | |
453 | ||
454 | static inline void cp_set_rxbufsize (struct cp_private *cp) | |
455 | { | |
456 | unsigned int mtu = cp->dev->mtu; | |
457 | ||
458 | if (mtu > ETH_DATA_LEN) | |
459 | /* MTU + ethernet header + FCS + optional VLAN tag */ | |
460 | cp->rx_buf_sz = mtu + ETH_HLEN + 8; | |
461 | else | |
462 | cp->rx_buf_sz = PKT_BUF_SZ; | |
463 | } | |
464 | ||
465 | static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, | |
466 | struct cp_desc *desc) | |
467 | { | |
468 | skb->protocol = eth_type_trans (skb, cp->dev); | |
469 | ||
470 | cp->net_stats.rx_packets++; | |
471 | cp->net_stats.rx_bytes += skb->len; | |
472 | cp->dev->last_rx = jiffies; | |
473 | ||
474 | #if CP_VLAN_TAG_USED | |
475 | if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) { | |
476 | vlan_hwaccel_receive_skb(skb, cp->vlgrp, | |
477 | be16_to_cpu(desc->opts2 & 0xffff)); | |
478 | } else | |
479 | #endif | |
480 | netif_receive_skb(skb); | |
481 | } | |
482 | ||
483 | static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, | |
484 | u32 status, u32 len) | |
485 | { | |
486 | if (netif_msg_rx_err (cp)) | |
487 | printk (KERN_DEBUG | |
488 | "%s: rx err, slot %d status 0x%x len %d\n", | |
489 | cp->dev->name, rx_tail, status, len); | |
490 | cp->net_stats.rx_errors++; | |
491 | if (status & RxErrFrame) | |
492 | cp->net_stats.rx_frame_errors++; | |
493 | if (status & RxErrCRC) | |
494 | cp->net_stats.rx_crc_errors++; | |
495 | if ((status & RxErrRunt) || (status & RxErrLong)) | |
496 | cp->net_stats.rx_length_errors++; | |
497 | if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) | |
498 | cp->net_stats.rx_length_errors++; | |
499 | if (status & RxErrFIFO) | |
500 | cp->net_stats.rx_fifo_errors++; | |
501 | } | |
502 | ||
503 | static inline unsigned int cp_rx_csum_ok (u32 status) | |
504 | { | |
505 | unsigned int protocol = (status >> 16) & 0x3; | |
506 | ||
507 | if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) | |
508 | return 1; | |
509 | else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) | |
510 | return 1; | |
511 | else if ((protocol == RxProtoIP) && (!(status & IPFail))) | |
512 | return 1; | |
513 | return 0; | |
514 | } | |
515 | ||
516 | static int cp_rx_poll (struct net_device *dev, int *budget) | |
517 | { | |
518 | struct cp_private *cp = netdev_priv(dev); | |
519 | unsigned rx_tail = cp->rx_tail; | |
520 | unsigned rx_work = dev->quota; | |
521 | unsigned rx; | |
522 | ||
523 | rx_status_loop: | |
524 | rx = 0; | |
525 | cpw16(IntrStatus, cp_rx_intr_mask); | |
526 | ||
527 | while (1) { | |
528 | u32 status, len; | |
529 | dma_addr_t mapping; | |
530 | struct sk_buff *skb, *new_skb; | |
531 | struct cp_desc *desc; | |
532 | unsigned buflen; | |
533 | ||
534 | skb = cp->rx_skb[rx_tail].skb; | |
535 | if (!skb) | |
536 | BUG(); | |
537 | ||
538 | desc = &cp->rx_ring[rx_tail]; | |
539 | status = le32_to_cpu(desc->opts1); | |
540 | if (status & DescOwn) | |
541 | break; | |
542 | ||
543 | len = (status & 0x1fff) - 4; | |
544 | mapping = cp->rx_skb[rx_tail].mapping; | |
545 | ||
546 | if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { | |
547 | /* we don't support incoming fragmented frames. | |
548 | * instead, we attempt to ensure that the | |
549 | * pre-allocated RX skbs are properly sized such | |
550 | * that RX fragments are never encountered | |
551 | */ | |
552 | cp_rx_err_acct(cp, rx_tail, status, len); | |
553 | cp->net_stats.rx_dropped++; | |
554 | cp->cp_stats.rx_frags++; | |
555 | goto rx_next; | |
556 | } | |
557 | ||
558 | if (status & (RxError | RxErrFIFO)) { | |
559 | cp_rx_err_acct(cp, rx_tail, status, len); | |
560 | goto rx_next; | |
561 | } | |
562 | ||
563 | if (netif_msg_rx_status(cp)) | |
564 | printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", | |
565 | cp->dev->name, rx_tail, status, len); | |
566 | ||
567 | buflen = cp->rx_buf_sz + RX_OFFSET; | |
568 | new_skb = dev_alloc_skb (buflen); | |
569 | if (!new_skb) { | |
570 | cp->net_stats.rx_dropped++; | |
571 | goto rx_next; | |
572 | } | |
573 | ||
574 | skb_reserve(new_skb, RX_OFFSET); | |
575 | new_skb->dev = cp->dev; | |
576 | ||
577 | pci_unmap_single(cp->pdev, mapping, | |
578 | buflen, PCI_DMA_FROMDEVICE); | |
579 | ||
580 | /* Handle checksum offloading for incoming packets. */ | |
581 | if (cp_rx_csum_ok(status)) | |
582 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
583 | else | |
584 | skb->ip_summed = CHECKSUM_NONE; | |
585 | ||
586 | skb_put(skb, len); | |
587 | ||
588 | mapping = | |
589 | cp->rx_skb[rx_tail].mapping = | |
590 | pci_map_single(cp->pdev, new_skb->tail, | |
591 | buflen, PCI_DMA_FROMDEVICE); | |
592 | cp->rx_skb[rx_tail].skb = new_skb; | |
593 | ||
594 | cp_rx_skb(cp, skb, desc); | |
595 | rx++; | |
596 | ||
597 | rx_next: | |
598 | cp->rx_ring[rx_tail].opts2 = 0; | |
599 | cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); | |
600 | if (rx_tail == (CP_RX_RING_SIZE - 1)) | |
601 | desc->opts1 = cpu_to_le32(DescOwn | RingEnd | | |
602 | cp->rx_buf_sz); | |
603 | else | |
604 | desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); | |
605 | rx_tail = NEXT_RX(rx_tail); | |
606 | ||
607 | if (!rx_work--) | |
608 | break; | |
609 | } | |
610 | ||
611 | cp->rx_tail = rx_tail; | |
612 | ||
613 | dev->quota -= rx; | |
614 | *budget -= rx; | |
615 | ||
616 | /* if we did not reach work limit, then we're done with | |
617 | * this round of polling | |
618 | */ | |
619 | if (rx_work) { | |
620 | if (cpr16(IntrStatus) & cp_rx_intr_mask) | |
621 | goto rx_status_loop; | |
622 | ||
623 | local_irq_disable(); | |
624 | cpw16_f(IntrMask, cp_intr_mask); | |
625 | __netif_rx_complete(dev); | |
626 | local_irq_enable(); | |
627 | ||
628 | return 0; /* done */ | |
629 | } | |
630 | ||
631 | return 1; /* not done */ | |
632 | } | |
633 | ||
634 | static irqreturn_t | |
635 | cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |
636 | { | |
637 | struct net_device *dev = dev_instance; | |
638 | struct cp_private *cp; | |
639 | u16 status; | |
640 | ||
641 | if (unlikely(dev == NULL)) | |
642 | return IRQ_NONE; | |
643 | cp = netdev_priv(dev); | |
644 | ||
645 | status = cpr16(IntrStatus); | |
646 | if (!status || (status == 0xFFFF)) | |
647 | return IRQ_NONE; | |
648 | ||
649 | if (netif_msg_intr(cp)) | |
650 | printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n", | |
651 | dev->name, status, cpr8(Cmd), cpr16(CpCmd)); | |
652 | ||
653 | cpw16(IntrStatus, status & ~cp_rx_intr_mask); | |
654 | ||
655 | spin_lock(&cp->lock); | |
656 | ||
657 | /* close possible race's with dev_close */ | |
658 | if (unlikely(!netif_running(dev))) { | |
659 | cpw16(IntrMask, 0); | |
660 | spin_unlock(&cp->lock); | |
661 | return IRQ_HANDLED; | |
662 | } | |
663 | ||
664 | if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) | |
665 | if (netif_rx_schedule_prep(dev)) { | |
666 | cpw16_f(IntrMask, cp_norx_intr_mask); | |
667 | __netif_rx_schedule(dev); | |
668 | } | |
669 | ||
670 | if (status & (TxOK | TxErr | TxEmpty | SWInt)) | |
671 | cp_tx(cp); | |
672 | if (status & LinkChg) | |
673 | mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); | |
674 | ||
675 | spin_unlock(&cp->lock); | |
676 | ||
677 | if (status & PciErr) { | |
678 | u16 pci_status; | |
679 | ||
680 | pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); | |
681 | pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); | |
682 | printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n", | |
683 | dev->name, status, pci_status); | |
684 | ||
685 | /* TODO: reset hardware */ | |
686 | } | |
687 | ||
688 | return IRQ_HANDLED; | |
689 | } | |
690 | ||
691 | static void cp_tx (struct cp_private *cp) | |
692 | { | |
693 | unsigned tx_head = cp->tx_head; | |
694 | unsigned tx_tail = cp->tx_tail; | |
695 | ||
696 | while (tx_tail != tx_head) { | |
697 | struct sk_buff *skb; | |
698 | u32 status; | |
699 | ||
700 | rmb(); | |
701 | status = le32_to_cpu(cp->tx_ring[tx_tail].opts1); | |
702 | if (status & DescOwn) | |
703 | break; | |
704 | ||
705 | skb = cp->tx_skb[tx_tail].skb; | |
706 | if (!skb) | |
707 | BUG(); | |
708 | ||
709 | pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, | |
710 | skb->len, PCI_DMA_TODEVICE); | |
711 | ||
712 | if (status & LastFrag) { | |
713 | if (status & (TxError | TxFIFOUnder)) { | |
714 | if (netif_msg_tx_err(cp)) | |
715 | printk(KERN_DEBUG "%s: tx err, status 0x%x\n", | |
716 | cp->dev->name, status); | |
717 | cp->net_stats.tx_errors++; | |
718 | if (status & TxOWC) | |
719 | cp->net_stats.tx_window_errors++; | |
720 | if (status & TxMaxCol) | |
721 | cp->net_stats.tx_aborted_errors++; | |
722 | if (status & TxLinkFail) | |
723 | cp->net_stats.tx_carrier_errors++; | |
724 | if (status & TxFIFOUnder) | |
725 | cp->net_stats.tx_fifo_errors++; | |
726 | } else { | |
727 | cp->net_stats.collisions += | |
728 | ((status >> TxColCntShift) & TxColCntMask); | |
729 | cp->net_stats.tx_packets++; | |
730 | cp->net_stats.tx_bytes += skb->len; | |
731 | if (netif_msg_tx_done(cp)) | |
732 | printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail); | |
733 | } | |
734 | dev_kfree_skb_irq(skb); | |
735 | } | |
736 | ||
737 | cp->tx_skb[tx_tail].skb = NULL; | |
738 | ||
739 | tx_tail = NEXT_TX(tx_tail); | |
740 | } | |
741 | ||
742 | cp->tx_tail = tx_tail; | |
743 | ||
744 | if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) | |
745 | netif_wake_queue(cp->dev); | |
746 | } | |
747 | ||
748 | static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |
749 | { | |
750 | struct cp_private *cp = netdev_priv(dev); | |
751 | unsigned entry; | |
752 | u32 eor; | |
753 | #if CP_VLAN_TAG_USED | |
754 | u32 vlan_tag = 0; | |
755 | #endif | |
756 | ||
757 | spin_lock_irq(&cp->lock); | |
758 | ||
759 | /* This is a hard error, log it. */ | |
760 | if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { | |
761 | netif_stop_queue(dev); | |
762 | spin_unlock_irq(&cp->lock); | |
763 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", | |
764 | dev->name); | |
765 | return 1; | |
766 | } | |
767 | ||
768 | #if CP_VLAN_TAG_USED | |
769 | if (cp->vlgrp && vlan_tx_tag_present(skb)) | |
770 | vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb)); | |
771 | #endif | |
772 | ||
773 | entry = cp->tx_head; | |
774 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | |
775 | if (skb_shinfo(skb)->nr_frags == 0) { | |
776 | struct cp_desc *txd = &cp->tx_ring[entry]; | |
777 | u32 len; | |
778 | dma_addr_t mapping; | |
779 | ||
780 | len = skb->len; | |
781 | mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); | |
782 | CP_VLAN_TX_TAG(txd, vlan_tag); | |
783 | txd->addr = cpu_to_le64(mapping); | |
784 | wmb(); | |
785 | ||
786 | if (skb->ip_summed == CHECKSUM_HW) { | |
787 | const struct iphdr *ip = skb->nh.iph; | |
788 | if (ip->protocol == IPPROTO_TCP) | |
789 | txd->opts1 = cpu_to_le32(eor | len | DescOwn | | |
790 | FirstFrag | LastFrag | | |
791 | IPCS | TCPCS); | |
792 | else if (ip->protocol == IPPROTO_UDP) | |
793 | txd->opts1 = cpu_to_le32(eor | len | DescOwn | | |
794 | FirstFrag | LastFrag | | |
795 | IPCS | UDPCS); | |
796 | else | |
797 | BUG(); | |
798 | } else | |
799 | txd->opts1 = cpu_to_le32(eor | len | DescOwn | | |
800 | FirstFrag | LastFrag); | |
801 | wmb(); | |
802 | ||
803 | cp->tx_skb[entry].skb = skb; | |
804 | cp->tx_skb[entry].mapping = mapping; | |
805 | cp->tx_skb[entry].frag = 0; | |
806 | entry = NEXT_TX(entry); | |
807 | } else { | |
808 | struct cp_desc *txd; | |
809 | u32 first_len, first_eor; | |
810 | dma_addr_t first_mapping; | |
811 | int frag, first_entry = entry; | |
812 | const struct iphdr *ip = skb->nh.iph; | |
813 | ||
814 | /* We must give this initial chunk to the device last. | |
815 | * Otherwise we could race with the device. | |
816 | */ | |
817 | first_eor = eor; | |
818 | first_len = skb_headlen(skb); | |
819 | first_mapping = pci_map_single(cp->pdev, skb->data, | |
820 | first_len, PCI_DMA_TODEVICE); | |
821 | cp->tx_skb[entry].skb = skb; | |
822 | cp->tx_skb[entry].mapping = first_mapping; | |
823 | cp->tx_skb[entry].frag = 1; | |
824 | entry = NEXT_TX(entry); | |
825 | ||
826 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | |
827 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | |
828 | u32 len; | |
829 | u32 ctrl; | |
830 | dma_addr_t mapping; | |
831 | ||
832 | len = this_frag->size; | |
833 | mapping = pci_map_single(cp->pdev, | |
834 | ((void *) page_address(this_frag->page) + | |
835 | this_frag->page_offset), | |
836 | len, PCI_DMA_TODEVICE); | |
837 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | |
838 | ||
839 | if (skb->ip_summed == CHECKSUM_HW) { | |
840 | ctrl = eor | len | DescOwn | IPCS; | |
841 | if (ip->protocol == IPPROTO_TCP) | |
842 | ctrl |= TCPCS; | |
843 | else if (ip->protocol == IPPROTO_UDP) | |
844 | ctrl |= UDPCS; | |
845 | else | |
846 | BUG(); | |
847 | } else | |
848 | ctrl = eor | len | DescOwn; | |
849 | ||
850 | if (frag == skb_shinfo(skb)->nr_frags - 1) | |
851 | ctrl |= LastFrag; | |
852 | ||
853 | txd = &cp->tx_ring[entry]; | |
854 | CP_VLAN_TX_TAG(txd, vlan_tag); | |
855 | txd->addr = cpu_to_le64(mapping); | |
856 | wmb(); | |
857 | ||
858 | txd->opts1 = cpu_to_le32(ctrl); | |
859 | wmb(); | |
860 | ||
861 | cp->tx_skb[entry].skb = skb; | |
862 | cp->tx_skb[entry].mapping = mapping; | |
863 | cp->tx_skb[entry].frag = frag + 2; | |
864 | entry = NEXT_TX(entry); | |
865 | } | |
866 | ||
867 | txd = &cp->tx_ring[first_entry]; | |
868 | CP_VLAN_TX_TAG(txd, vlan_tag); | |
869 | txd->addr = cpu_to_le64(first_mapping); | |
870 | wmb(); | |
871 | ||
872 | if (skb->ip_summed == CHECKSUM_HW) { | |
873 | if (ip->protocol == IPPROTO_TCP) | |
874 | txd->opts1 = cpu_to_le32(first_eor | first_len | | |
875 | FirstFrag | DescOwn | | |
876 | IPCS | TCPCS); | |
877 | else if (ip->protocol == IPPROTO_UDP) | |
878 | txd->opts1 = cpu_to_le32(first_eor | first_len | | |
879 | FirstFrag | DescOwn | | |
880 | IPCS | UDPCS); | |
881 | else | |
882 | BUG(); | |
883 | } else | |
884 | txd->opts1 = cpu_to_le32(first_eor | first_len | | |
885 | FirstFrag | DescOwn); | |
886 | wmb(); | |
887 | } | |
888 | cp->tx_head = entry; | |
889 | if (netif_msg_tx_queued(cp)) | |
890 | printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", | |
891 | dev->name, entry, skb->len); | |
892 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) | |
893 | netif_stop_queue(dev); | |
894 | ||
895 | spin_unlock_irq(&cp->lock); | |
896 | ||
897 | cpw8(TxPoll, NormalTxPoll); | |
898 | dev->trans_start = jiffies; | |
899 | ||
900 | return 0; | |
901 | } | |
902 | ||
903 | /* Set or clear the multicast filter for this adaptor. | |
904 | This routine is not state sensitive and need not be SMP locked. */ | |
905 | ||
906 | static void __cp_set_rx_mode (struct net_device *dev) | |
907 | { | |
908 | struct cp_private *cp = netdev_priv(dev); | |
909 | u32 mc_filter[2]; /* Multicast hash filter */ | |
910 | int i, rx_mode; | |
911 | u32 tmp; | |
912 | ||
913 | /* Note: do not reorder, GCC is clever about common statements. */ | |
914 | if (dev->flags & IFF_PROMISC) { | |
915 | /* Unconditionally log net taps. */ | |
916 | printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", | |
917 | dev->name); | |
918 | rx_mode = | |
919 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | | |
920 | AcceptAllPhys; | |
921 | mc_filter[1] = mc_filter[0] = 0xffffffff; | |
922 | } else if ((dev->mc_count > multicast_filter_limit) | |
923 | || (dev->flags & IFF_ALLMULTI)) { | |
924 | /* Too many to filter perfectly -- accept all multicasts. */ | |
925 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; | |
926 | mc_filter[1] = mc_filter[0] = 0xffffffff; | |
927 | } else { | |
928 | struct dev_mc_list *mclist; | |
929 | rx_mode = AcceptBroadcast | AcceptMyPhys; | |
930 | mc_filter[1] = mc_filter[0] = 0; | |
931 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | |
932 | i++, mclist = mclist->next) { | |
933 | int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; | |
934 | ||
935 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | |
936 | rx_mode |= AcceptMulticast; | |
937 | } | |
938 | } | |
939 | ||
940 | /* We can safely update without stopping the chip. */ | |
941 | tmp = cp_rx_config | rx_mode; | |
942 | if (cp->rx_config != tmp) { | |
943 | cpw32_f (RxConfig, tmp); | |
944 | cp->rx_config = tmp; | |
945 | } | |
946 | cpw32_f (MAR0 + 0, mc_filter[0]); | |
947 | cpw32_f (MAR0 + 4, mc_filter[1]); | |
948 | } | |
949 | ||
950 | static void cp_set_rx_mode (struct net_device *dev) | |
951 | { | |
952 | unsigned long flags; | |
953 | struct cp_private *cp = netdev_priv(dev); | |
954 | ||
955 | spin_lock_irqsave (&cp->lock, flags); | |
956 | __cp_set_rx_mode(dev); | |
957 | spin_unlock_irqrestore (&cp->lock, flags); | |
958 | } | |
959 | ||
960 | static void __cp_get_stats(struct cp_private *cp) | |
961 | { | |
962 | /* only lower 24 bits valid; write any value to clear */ | |
963 | cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); | |
964 | cpw32 (RxMissed, 0); | |
965 | } | |
966 | ||
967 | static struct net_device_stats *cp_get_stats(struct net_device *dev) | |
968 | { | |
969 | struct cp_private *cp = netdev_priv(dev); | |
970 | unsigned long flags; | |
971 | ||
972 | /* The chip only need report frame silently dropped. */ | |
973 | spin_lock_irqsave(&cp->lock, flags); | |
974 | if (netif_running(dev) && netif_device_present(dev)) | |
975 | __cp_get_stats(cp); | |
976 | spin_unlock_irqrestore(&cp->lock, flags); | |
977 | ||
978 | return &cp->net_stats; | |
979 | } | |
980 | ||
981 | static void cp_stop_hw (struct cp_private *cp) | |
982 | { | |
983 | cpw16(IntrStatus, ~(cpr16(IntrStatus))); | |
984 | cpw16_f(IntrMask, 0); | |
985 | cpw8(Cmd, 0); | |
986 | cpw16_f(CpCmd, 0); | |
987 | cpw16_f(IntrStatus, ~(cpr16(IntrStatus))); | |
988 | ||
989 | cp->rx_tail = 0; | |
990 | cp->tx_head = cp->tx_tail = 0; | |
991 | } | |
992 | ||
993 | static void cp_reset_hw (struct cp_private *cp) | |
994 | { | |
995 | unsigned work = 1000; | |
996 | ||
997 | cpw8(Cmd, CmdReset); | |
998 | ||
999 | while (work--) { | |
1000 | if (!(cpr8(Cmd) & CmdReset)) | |
1001 | return; | |
1002 | ||
1003 | set_current_state(TASK_UNINTERRUPTIBLE); | |
1004 | schedule_timeout(10); | |
1005 | } | |
1006 | ||
1007 | printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name); | |
1008 | } | |
1009 | ||
1010 | static inline void cp_start_hw (struct cp_private *cp) | |
1011 | { | |
1012 | cpw16(CpCmd, cp->cpcmd); | |
1013 | cpw8(Cmd, RxOn | TxOn); | |
1014 | } | |
1015 | ||
1016 | static void cp_init_hw (struct cp_private *cp) | |
1017 | { | |
1018 | struct net_device *dev = cp->dev; | |
1019 | dma_addr_t ring_dma; | |
1020 | ||
1021 | cp_reset_hw(cp); | |
1022 | ||
1023 | cpw8_f (Cfg9346, Cfg9346_Unlock); | |
1024 | ||
1025 | /* Restore our idea of the MAC address. */ | |
1026 | cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); | |
1027 | cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); | |
1028 | ||
1029 | cp_start_hw(cp); | |
1030 | cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ | |
1031 | ||
1032 | __cp_set_rx_mode(dev); | |
1033 | cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift)); | |
1034 | ||
1035 | cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable); | |
1036 | /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */ | |
1037 | cpw8(Config3, PARMEnable); | |
1038 | cp->wol_enabled = 0; | |
1039 | ||
1040 | cpw8(Config5, cpr8(Config5) & PMEStatus); | |
1041 | ||
1042 | cpw32_f(HiTxRingAddr, 0); | |
1043 | cpw32_f(HiTxRingAddr + 4, 0); | |
1044 | ||
1045 | ring_dma = cp->ring_dma; | |
1046 | cpw32_f(RxRingAddr, ring_dma & 0xffffffff); | |
1047 | cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); | |
1048 | ||
1049 | ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; | |
1050 | cpw32_f(TxRingAddr, ring_dma & 0xffffffff); | |
1051 | cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); | |
1052 | ||
1053 | cpw16(MultiIntr, 0); | |
1054 | ||
1055 | cpw16_f(IntrMask, cp_intr_mask); | |
1056 | ||
1057 | cpw8_f(Cfg9346, Cfg9346_Lock); | |
1058 | } | |
1059 | ||
1060 | static int cp_refill_rx (struct cp_private *cp) | |
1061 | { | |
1062 | unsigned i; | |
1063 | ||
1064 | for (i = 0; i < CP_RX_RING_SIZE; i++) { | |
1065 | struct sk_buff *skb; | |
1066 | ||
1067 | skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); | |
1068 | if (!skb) | |
1069 | goto err_out; | |
1070 | ||
1071 | skb->dev = cp->dev; | |
1072 | skb_reserve(skb, RX_OFFSET); | |
1073 | ||
1074 | cp->rx_skb[i].mapping = pci_map_single(cp->pdev, | |
1075 | skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | |
1076 | cp->rx_skb[i].skb = skb; | |
1077 | cp->rx_skb[i].frag = 0; | |
1078 | ||
1079 | cp->rx_ring[i].opts2 = 0; | |
1080 | cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); | |
1081 | if (i == (CP_RX_RING_SIZE - 1)) | |
1082 | cp->rx_ring[i].opts1 = | |
1083 | cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); | |
1084 | else | |
1085 | cp->rx_ring[i].opts1 = | |
1086 | cpu_to_le32(DescOwn | cp->rx_buf_sz); | |
1087 | } | |
1088 | ||
1089 | return 0; | |
1090 | ||
1091 | err_out: | |
1092 | cp_clean_rings(cp); | |
1093 | return -ENOMEM; | |
1094 | } | |
1095 | ||
1096 | static int cp_init_rings (struct cp_private *cp) | |
1097 | { | |
1098 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | |
1099 | cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); | |
1100 | ||
1101 | cp->rx_tail = 0; | |
1102 | cp->tx_head = cp->tx_tail = 0; | |
1103 | ||
1104 | return cp_refill_rx (cp); | |
1105 | } | |
1106 | ||
1107 | static int cp_alloc_rings (struct cp_private *cp) | |
1108 | { | |
1109 | void *mem; | |
1110 | ||
1111 | mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma); | |
1112 | if (!mem) | |
1113 | return -ENOMEM; | |
1114 | ||
1115 | cp->rx_ring = mem; | |
1116 | cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; | |
1117 | ||
1118 | mem += (CP_RING_BYTES - CP_STATS_SIZE); | |
1119 | cp->nic_stats = mem; | |
1120 | cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE); | |
1121 | ||
1122 | return cp_init_rings(cp); | |
1123 | } | |
1124 | ||
1125 | static void cp_clean_rings (struct cp_private *cp) | |
1126 | { | |
1127 | unsigned i; | |
1128 | ||
1129 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); | |
1130 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | |
1131 | ||
1132 | for (i = 0; i < CP_RX_RING_SIZE; i++) { | |
1133 | if (cp->rx_skb[i].skb) { | |
1134 | pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, | |
1135 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | |
1136 | dev_kfree_skb(cp->rx_skb[i].skb); | |
1137 | } | |
1138 | } | |
1139 | ||
1140 | for (i = 0; i < CP_TX_RING_SIZE; i++) { | |
1141 | if (cp->tx_skb[i].skb) { | |
1142 | struct sk_buff *skb = cp->tx_skb[i].skb; | |
1143 | pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, | |
1144 | skb->len, PCI_DMA_TODEVICE); | |
1145 | dev_kfree_skb(skb); | |
1146 | cp->net_stats.tx_dropped++; | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); | |
1151 | memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); | |
1152 | } | |
1153 | ||
1154 | static void cp_free_rings (struct cp_private *cp) | |
1155 | { | |
1156 | cp_clean_rings(cp); | |
1157 | pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); | |
1158 | cp->rx_ring = NULL; | |
1159 | cp->tx_ring = NULL; | |
1160 | cp->nic_stats = NULL; | |
1161 | } | |
1162 | ||
1163 | static int cp_open (struct net_device *dev) | |
1164 | { | |
1165 | struct cp_private *cp = netdev_priv(dev); | |
1166 | int rc; | |
1167 | ||
1168 | if (netif_msg_ifup(cp)) | |
1169 | printk(KERN_DEBUG "%s: enabling interface\n", dev->name); | |
1170 | ||
1171 | rc = cp_alloc_rings(cp); | |
1172 | if (rc) | |
1173 | return rc; | |
1174 | ||
1175 | cp_init_hw(cp); | |
1176 | ||
1177 | rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev); | |
1178 | if (rc) | |
1179 | goto err_out_hw; | |
1180 | ||
1181 | netif_carrier_off(dev); | |
1182 | mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE); | |
1183 | netif_start_queue(dev); | |
1184 | ||
1185 | return 0; | |
1186 | ||
1187 | err_out_hw: | |
1188 | cp_stop_hw(cp); | |
1189 | cp_free_rings(cp); | |
1190 | return rc; | |
1191 | } | |
1192 | ||
1193 | static int cp_close (struct net_device *dev) | |
1194 | { | |
1195 | struct cp_private *cp = netdev_priv(dev); | |
1196 | unsigned long flags; | |
1197 | ||
1198 | if (netif_msg_ifdown(cp)) | |
1199 | printk(KERN_DEBUG "%s: disabling interface\n", dev->name); | |
1200 | ||
1201 | spin_lock_irqsave(&cp->lock, flags); | |
1202 | ||
1203 | netif_stop_queue(dev); | |
1204 | netif_carrier_off(dev); | |
1205 | ||
1206 | cp_stop_hw(cp); | |
1207 | ||
1208 | spin_unlock_irqrestore(&cp->lock, flags); | |
1209 | ||
1210 | synchronize_irq(dev->irq); | |
1211 | free_irq(dev->irq, dev); | |
1212 | ||
1213 | cp_free_rings(cp); | |
1214 | return 0; | |
1215 | } | |
1216 | ||
1217 | #ifdef BROKEN | |
1218 | static int cp_change_mtu(struct net_device *dev, int new_mtu) | |
1219 | { | |
1220 | struct cp_private *cp = netdev_priv(dev); | |
1221 | int rc; | |
1222 | unsigned long flags; | |
1223 | ||
1224 | /* check for invalid MTU, according to hardware limits */ | |
1225 | if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) | |
1226 | return -EINVAL; | |
1227 | ||
1228 | /* if network interface not up, no need for complexity */ | |
1229 | if (!netif_running(dev)) { | |
1230 | dev->mtu = new_mtu; | |
1231 | cp_set_rxbufsize(cp); /* set new rx buf size */ | |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | spin_lock_irqsave(&cp->lock, flags); | |
1236 | ||
1237 | cp_stop_hw(cp); /* stop h/w and free rings */ | |
1238 | cp_clean_rings(cp); | |
1239 | ||
1240 | dev->mtu = new_mtu; | |
1241 | cp_set_rxbufsize(cp); /* set new rx buf size */ | |
1242 | ||
1243 | rc = cp_init_rings(cp); /* realloc and restart h/w */ | |
1244 | cp_start_hw(cp); | |
1245 | ||
1246 | spin_unlock_irqrestore(&cp->lock, flags); | |
1247 | ||
1248 | return rc; | |
1249 | } | |
1250 | #endif /* BROKEN */ | |
1251 | ||
1252 | static char mii_2_8139_map[8] = { | |
1253 | BasicModeCtrl, | |
1254 | BasicModeStatus, | |
1255 | 0, | |
1256 | 0, | |
1257 | NWayAdvert, | |
1258 | NWayLPAR, | |
1259 | NWayExpansion, | |
1260 | 0 | |
1261 | }; | |
1262 | ||
1263 | static int mdio_read(struct net_device *dev, int phy_id, int location) | |
1264 | { | |
1265 | struct cp_private *cp = netdev_priv(dev); | |
1266 | ||
1267 | return location < 8 && mii_2_8139_map[location] ? | |
1268 | readw(cp->regs + mii_2_8139_map[location]) : 0; | |
1269 | } | |
1270 | ||
1271 | ||
1272 | static void mdio_write(struct net_device *dev, int phy_id, int location, | |
1273 | int value) | |
1274 | { | |
1275 | struct cp_private *cp = netdev_priv(dev); | |
1276 | ||
1277 | if (location == 0) { | |
1278 | cpw8(Cfg9346, Cfg9346_Unlock); | |
1279 | cpw16(BasicModeCtrl, value); | |
1280 | cpw8(Cfg9346, Cfg9346_Lock); | |
1281 | } else if (location < 8 && mii_2_8139_map[location]) | |
1282 | cpw16(mii_2_8139_map[location], value); | |
1283 | } | |
1284 | ||
1285 | /* Set the ethtool Wake-on-LAN settings */ | |
1286 | static int netdev_set_wol (struct cp_private *cp, | |
1287 | const struct ethtool_wolinfo *wol) | |
1288 | { | |
1289 | u8 options; | |
1290 | ||
1291 | options = cpr8 (Config3) & ~(LinkUp | MagicPacket); | |
1292 | /* If WOL is being disabled, no need for complexity */ | |
1293 | if (wol->wolopts) { | |
1294 | if (wol->wolopts & WAKE_PHY) options |= LinkUp; | |
1295 | if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket; | |
1296 | } | |
1297 | ||
1298 | cpw8 (Cfg9346, Cfg9346_Unlock); | |
1299 | cpw8 (Config3, options); | |
1300 | cpw8 (Cfg9346, Cfg9346_Lock); | |
1301 | ||
1302 | options = 0; /* Paranoia setting */ | |
1303 | options = cpr8 (Config5) & ~(UWF | MWF | BWF); | |
1304 | /* If WOL is being disabled, no need for complexity */ | |
1305 | if (wol->wolopts) { | |
1306 | if (wol->wolopts & WAKE_UCAST) options |= UWF; | |
1307 | if (wol->wolopts & WAKE_BCAST) options |= BWF; | |
1308 | if (wol->wolopts & WAKE_MCAST) options |= MWF; | |
1309 | } | |
1310 | ||
1311 | cpw8 (Config5, options); | |
1312 | ||
1313 | cp->wol_enabled = (wol->wolopts) ? 1 : 0; | |
1314 | ||
1315 | return 0; | |
1316 | } | |
1317 | ||
1318 | /* Get the ethtool Wake-on-LAN settings */ | |
1319 | static void netdev_get_wol (struct cp_private *cp, | |
1320 | struct ethtool_wolinfo *wol) | |
1321 | { | |
1322 | u8 options; | |
1323 | ||
1324 | wol->wolopts = 0; /* Start from scratch */ | |
1325 | wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC | | |
1326 | WAKE_MCAST | WAKE_UCAST; | |
1327 | /* We don't need to go on if WOL is disabled */ | |
1328 | if (!cp->wol_enabled) return; | |
1329 | ||
1330 | options = cpr8 (Config3); | |
1331 | if (options & LinkUp) wol->wolopts |= WAKE_PHY; | |
1332 | if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; | |
1333 | ||
1334 | options = 0; /* Paranoia setting */ | |
1335 | options = cpr8 (Config5); | |
1336 | if (options & UWF) wol->wolopts |= WAKE_UCAST; | |
1337 | if (options & BWF) wol->wolopts |= WAKE_BCAST; | |
1338 | if (options & MWF) wol->wolopts |= WAKE_MCAST; | |
1339 | } | |
1340 | ||
1341 | static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) | |
1342 | { | |
1343 | struct cp_private *cp = netdev_priv(dev); | |
1344 | ||
1345 | strcpy (info->driver, DRV_NAME); | |
1346 | strcpy (info->version, DRV_VERSION); | |
1347 | strcpy (info->bus_info, pci_name(cp->pdev)); | |
1348 | } | |
1349 | ||
1350 | static int cp_get_regs_len(struct net_device *dev) | |
1351 | { | |
1352 | return CP_REGS_SIZE; | |
1353 | } | |
1354 | ||
1355 | static int cp_get_stats_count (struct net_device *dev) | |
1356 | { | |
1357 | return CP_NUM_STATS; | |
1358 | } | |
1359 | ||
1360 | static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1361 | { | |
1362 | struct cp_private *cp = netdev_priv(dev); | |
1363 | int rc; | |
1364 | unsigned long flags; | |
1365 | ||
1366 | spin_lock_irqsave(&cp->lock, flags); | |
1367 | rc = mii_ethtool_gset(&cp->mii_if, cmd); | |
1368 | spin_unlock_irqrestore(&cp->lock, flags); | |
1369 | ||
1370 | return rc; | |
1371 | } | |
1372 | ||
1373 | static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1374 | { | |
1375 | struct cp_private *cp = netdev_priv(dev); | |
1376 | int rc; | |
1377 | unsigned long flags; | |
1378 | ||
1379 | spin_lock_irqsave(&cp->lock, flags); | |
1380 | rc = mii_ethtool_sset(&cp->mii_if, cmd); | |
1381 | spin_unlock_irqrestore(&cp->lock, flags); | |
1382 | ||
1383 | return rc; | |
1384 | } | |
1385 | ||
1386 | static int cp_nway_reset(struct net_device *dev) | |
1387 | { | |
1388 | struct cp_private *cp = netdev_priv(dev); | |
1389 | return mii_nway_restart(&cp->mii_if); | |
1390 | } | |
1391 | ||
1392 | static u32 cp_get_msglevel(struct net_device *dev) | |
1393 | { | |
1394 | struct cp_private *cp = netdev_priv(dev); | |
1395 | return cp->msg_enable; | |
1396 | } | |
1397 | ||
1398 | static void cp_set_msglevel(struct net_device *dev, u32 value) | |
1399 | { | |
1400 | struct cp_private *cp = netdev_priv(dev); | |
1401 | cp->msg_enable = value; | |
1402 | } | |
1403 | ||
1404 | static u32 cp_get_rx_csum(struct net_device *dev) | |
1405 | { | |
1406 | struct cp_private *cp = netdev_priv(dev); | |
1407 | return (cpr16(CpCmd) & RxChkSum) ? 1 : 0; | |
1408 | } | |
1409 | ||
1410 | static int cp_set_rx_csum(struct net_device *dev, u32 data) | |
1411 | { | |
1412 | struct cp_private *cp = netdev_priv(dev); | |
1413 | u16 cmd = cp->cpcmd, newcmd; | |
1414 | ||
1415 | newcmd = cmd; | |
1416 | ||
1417 | if (data) | |
1418 | newcmd |= RxChkSum; | |
1419 | else | |
1420 | newcmd &= ~RxChkSum; | |
1421 | ||
1422 | if (newcmd != cmd) { | |
1423 | unsigned long flags; | |
1424 | ||
1425 | spin_lock_irqsave(&cp->lock, flags); | |
1426 | cp->cpcmd = newcmd; | |
1427 | cpw16_f(CpCmd, newcmd); | |
1428 | spin_unlock_irqrestore(&cp->lock, flags); | |
1429 | } | |
1430 | ||
1431 | return 0; | |
1432 | } | |
1433 | ||
1434 | static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |
1435 | void *p) | |
1436 | { | |
1437 | struct cp_private *cp = netdev_priv(dev); | |
1438 | unsigned long flags; | |
1439 | ||
1440 | if (regs->len < CP_REGS_SIZE) | |
1441 | return /* -EINVAL */; | |
1442 | ||
1443 | regs->version = CP_REGS_VER; | |
1444 | ||
1445 | spin_lock_irqsave(&cp->lock, flags); | |
1446 | memcpy_fromio(p, cp->regs, CP_REGS_SIZE); | |
1447 | spin_unlock_irqrestore(&cp->lock, flags); | |
1448 | } | |
1449 | ||
1450 | static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol) | |
1451 | { | |
1452 | struct cp_private *cp = netdev_priv(dev); | |
1453 | unsigned long flags; | |
1454 | ||
1455 | spin_lock_irqsave (&cp->lock, flags); | |
1456 | netdev_get_wol (cp, wol); | |
1457 | spin_unlock_irqrestore (&cp->lock, flags); | |
1458 | } | |
1459 | ||
1460 | static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol) | |
1461 | { | |
1462 | struct cp_private *cp = netdev_priv(dev); | |
1463 | unsigned long flags; | |
1464 | int rc; | |
1465 | ||
1466 | spin_lock_irqsave (&cp->lock, flags); | |
1467 | rc = netdev_set_wol (cp, wol); | |
1468 | spin_unlock_irqrestore (&cp->lock, flags); | |
1469 | ||
1470 | return rc; | |
1471 | } | |
1472 | ||
1473 | static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf) | |
1474 | { | |
1475 | switch (stringset) { | |
1476 | case ETH_SS_STATS: | |
1477 | memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); | |
1478 | break; | |
1479 | default: | |
1480 | BUG(); | |
1481 | break; | |
1482 | } | |
1483 | } | |
1484 | ||
1485 | static void cp_get_ethtool_stats (struct net_device *dev, | |
1486 | struct ethtool_stats *estats, u64 *tmp_stats) | |
1487 | { | |
1488 | struct cp_private *cp = netdev_priv(dev); | |
1489 | unsigned int work = 100; | |
1490 | int i; | |
1491 | ||
1492 | /* begin NIC statistics dump */ | |
1493 | cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); | |
1494 | cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); | |
1495 | cpr32(StatsAddr); | |
1496 | ||
1497 | while (work-- > 0) { | |
1498 | if ((cpr32(StatsAddr) & DumpStats) == 0) | |
1499 | break; | |
1500 | cpu_relax(); | |
1501 | } | |
1502 | ||
1503 | if (cpr32(StatsAddr) & DumpStats) | |
1504 | return /* -EIO */; | |
1505 | ||
1506 | i = 0; | |
1507 | tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); | |
1508 | tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok); | |
1509 | tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err); | |
1510 | tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err); | |
1511 | tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo); | |
1512 | tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align); | |
1513 | tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col); | |
1514 | tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol); | |
1515 | tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys); | |
1516 | tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast); | |
1517 | tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast); | |
1518 | tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort); | |
1519 | tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun); | |
1520 | tmp_stats[i++] = cp->cp_stats.rx_frags; | |
1521 | if (i != CP_NUM_STATS) | |
1522 | BUG(); | |
1523 | } | |
1524 | ||
1525 | static struct ethtool_ops cp_ethtool_ops = { | |
1526 | .get_drvinfo = cp_get_drvinfo, | |
1527 | .get_regs_len = cp_get_regs_len, | |
1528 | .get_stats_count = cp_get_stats_count, | |
1529 | .get_settings = cp_get_settings, | |
1530 | .set_settings = cp_set_settings, | |
1531 | .nway_reset = cp_nway_reset, | |
1532 | .get_link = ethtool_op_get_link, | |
1533 | .get_msglevel = cp_get_msglevel, | |
1534 | .set_msglevel = cp_set_msglevel, | |
1535 | .get_rx_csum = cp_get_rx_csum, | |
1536 | .set_rx_csum = cp_set_rx_csum, | |
1537 | .get_tx_csum = ethtool_op_get_tx_csum, | |
1538 | .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ | |
1539 | .get_sg = ethtool_op_get_sg, | |
1540 | .set_sg = ethtool_op_set_sg, | |
1541 | .get_regs = cp_get_regs, | |
1542 | .get_wol = cp_get_wol, | |
1543 | .set_wol = cp_set_wol, | |
1544 | .get_strings = cp_get_strings, | |
1545 | .get_ethtool_stats = cp_get_ethtool_stats, | |
1546 | }; | |
1547 | ||
1548 | static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | |
1549 | { | |
1550 | struct cp_private *cp = netdev_priv(dev); | |
1551 | int rc; | |
1552 | unsigned long flags; | |
1553 | ||
1554 | if (!netif_running(dev)) | |
1555 | return -EINVAL; | |
1556 | ||
1557 | spin_lock_irqsave(&cp->lock, flags); | |
1558 | rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); | |
1559 | spin_unlock_irqrestore(&cp->lock, flags); | |
1560 | return rc; | |
1561 | } | |
1562 | ||
1563 | /* Serial EEPROM section. */ | |
1564 | ||
1565 | /* EEPROM_Ctrl bits. */ | |
1566 | #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ | |
1567 | #define EE_CS 0x08 /* EEPROM chip select. */ | |
1568 | #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ | |
1569 | #define EE_WRITE_0 0x00 | |
1570 | #define EE_WRITE_1 0x02 | |
1571 | #define EE_DATA_READ 0x01 /* EEPROM chip data out. */ | |
1572 | #define EE_ENB (0x80 | EE_CS) | |
1573 | ||
1574 | /* Delay between EEPROM clock transitions. | |
1575 | No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. | |
1576 | */ | |
1577 | ||
1578 | #define eeprom_delay() readl(ee_addr) | |
1579 | ||
1580 | /* The EEPROM commands include the alway-set leading bit. */ | |
1581 | #define EE_WRITE_CMD (5) | |
1582 | #define EE_READ_CMD (6) | |
1583 | #define EE_ERASE_CMD (7) | |
1584 | ||
1585 | static int read_eeprom (void __iomem *ioaddr, int location, int addr_len) | |
1586 | { | |
1587 | int i; | |
1588 | unsigned retval = 0; | |
1589 | void __iomem *ee_addr = ioaddr + Cfg9346; | |
1590 | int read_cmd = location | (EE_READ_CMD << addr_len); | |
1591 | ||
1592 | writeb (EE_ENB & ~EE_CS, ee_addr); | |
1593 | writeb (EE_ENB, ee_addr); | |
1594 | eeprom_delay (); | |
1595 | ||
1596 | /* Shift the read command bits out. */ | |
1597 | for (i = 4 + addr_len; i >= 0; i--) { | |
1598 | int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; | |
1599 | writeb (EE_ENB | dataval, ee_addr); | |
1600 | eeprom_delay (); | |
1601 | writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); | |
1602 | eeprom_delay (); | |
1603 | } | |
1604 | writeb (EE_ENB, ee_addr); | |
1605 | eeprom_delay (); | |
1606 | ||
1607 | for (i = 16; i > 0; i--) { | |
1608 | writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); | |
1609 | eeprom_delay (); | |
1610 | retval = | |
1611 | (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 : | |
1612 | 0); | |
1613 | writeb (EE_ENB, ee_addr); | |
1614 | eeprom_delay (); | |
1615 | } | |
1616 | ||
1617 | /* Terminate the EEPROM access. */ | |
1618 | writeb (~EE_CS, ee_addr); | |
1619 | eeprom_delay (); | |
1620 | ||
1621 | return retval; | |
1622 | } | |
1623 | ||
1624 | /* Put the board into D3cold state and wait for WakeUp signal */ | |
1625 | static void cp_set_d3_state (struct cp_private *cp) | |
1626 | { | |
1627 | pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */ | |
1628 | pci_set_power_state (cp->pdev, PCI_D3hot); | |
1629 | } | |
1630 | ||
1631 | static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |
1632 | { | |
1633 | struct net_device *dev; | |
1634 | struct cp_private *cp; | |
1635 | int rc; | |
1636 | void __iomem *regs; | |
1637 | long pciaddr; | |
1638 | unsigned int addr_len, i, pci_using_dac; | |
1639 | u8 pci_rev; | |
1640 | ||
1641 | #ifndef MODULE | |
1642 | static int version_printed; | |
1643 | if (version_printed++ == 0) | |
1644 | printk("%s", version); | |
1645 | #endif | |
1646 | ||
1647 | pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); | |
1648 | ||
1649 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | |
1650 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { | |
1651 | printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", | |
1652 | pci_name(pdev), pdev->vendor, pdev->device, pci_rev); | |
1653 | printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n"); | |
1654 | return -ENODEV; | |
1655 | } | |
1656 | ||
1657 | dev = alloc_etherdev(sizeof(struct cp_private)); | |
1658 | if (!dev) | |
1659 | return -ENOMEM; | |
1660 | SET_MODULE_OWNER(dev); | |
1661 | SET_NETDEV_DEV(dev, &pdev->dev); | |
1662 | ||
1663 | cp = netdev_priv(dev); | |
1664 | cp->pdev = pdev; | |
1665 | cp->dev = dev; | |
1666 | cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); | |
1667 | spin_lock_init (&cp->lock); | |
1668 | cp->mii_if.dev = dev; | |
1669 | cp->mii_if.mdio_read = mdio_read; | |
1670 | cp->mii_if.mdio_write = mdio_write; | |
1671 | cp->mii_if.phy_id = CP_INTERNAL_PHY; | |
1672 | cp->mii_if.phy_id_mask = 0x1f; | |
1673 | cp->mii_if.reg_num_mask = 0x1f; | |
1674 | cp_set_rxbufsize(cp); | |
1675 | ||
1676 | rc = pci_enable_device(pdev); | |
1677 | if (rc) | |
1678 | goto err_out_free; | |
1679 | ||
1680 | rc = pci_set_mwi(pdev); | |
1681 | if (rc) | |
1682 | goto err_out_disable; | |
1683 | ||
1684 | rc = pci_request_regions(pdev, DRV_NAME); | |
1685 | if (rc) | |
1686 | goto err_out_mwi; | |
1687 | ||
1688 | pciaddr = pci_resource_start(pdev, 1); | |
1689 | if (!pciaddr) { | |
1690 | rc = -EIO; | |
1691 | printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", | |
1692 | pci_name(pdev)); | |
1693 | goto err_out_res; | |
1694 | } | |
1695 | if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { | |
1696 | rc = -EIO; | |
1697 | printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n", | |
1698 | pci_resource_len(pdev, 1), pci_name(pdev)); | |
1699 | goto err_out_res; | |
1700 | } | |
1701 | ||
1702 | /* Configure DMA attributes. */ | |
1703 | if ((sizeof(dma_addr_t) > 4) && | |
1704 | !pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL) && | |
1705 | !pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { | |
1706 | pci_using_dac = 1; | |
1707 | } else { | |
1708 | pci_using_dac = 0; | |
1709 | ||
1710 | rc = pci_set_dma_mask(pdev, 0xffffffffULL); | |
1711 | if (rc) { | |
1712 | printk(KERN_ERR PFX "No usable DMA configuration, " | |
1713 | "aborting.\n"); | |
1714 | goto err_out_res; | |
1715 | } | |
1716 | rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL); | |
1717 | if (rc) { | |
1718 | printk(KERN_ERR PFX "No usable consistent DMA configuration, " | |
1719 | "aborting.\n"); | |
1720 | goto err_out_res; | |
1721 | } | |
1722 | } | |
1723 | ||
1724 | cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | | |
1725 | PCIMulRW | RxChkSum | CpRxOn | CpTxOn; | |
1726 | ||
1727 | regs = ioremap(pciaddr, CP_REGS_SIZE); | |
1728 | if (!regs) { | |
1729 | rc = -EIO; | |
1730 | printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n", | |
1731 | pci_resource_len(pdev, 1), pciaddr, pci_name(pdev)); | |
1732 | goto err_out_res; | |
1733 | } | |
1734 | dev->base_addr = (unsigned long) regs; | |
1735 | cp->regs = regs; | |
1736 | ||
1737 | cp_stop_hw(cp); | |
1738 | ||
1739 | /* read MAC address from EEPROM */ | |
1740 | addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; | |
1741 | for (i = 0; i < 3; i++) | |
1742 | ((u16 *) (dev->dev_addr))[i] = | |
1743 | le16_to_cpu (read_eeprom (regs, i + 7, addr_len)); | |
1744 | ||
1745 | dev->open = cp_open; | |
1746 | dev->stop = cp_close; | |
1747 | dev->set_multicast_list = cp_set_rx_mode; | |
1748 | dev->hard_start_xmit = cp_start_xmit; | |
1749 | dev->get_stats = cp_get_stats; | |
1750 | dev->do_ioctl = cp_ioctl; | |
1751 | dev->poll = cp_rx_poll; | |
1752 | dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ | |
1753 | #ifdef BROKEN | |
1754 | dev->change_mtu = cp_change_mtu; | |
1755 | #endif | |
1756 | dev->ethtool_ops = &cp_ethtool_ops; | |
1757 | #if 0 | |
1758 | dev->tx_timeout = cp_tx_timeout; | |
1759 | dev->watchdog_timeo = TX_TIMEOUT; | |
1760 | #endif | |
1761 | ||
1762 | #if CP_VLAN_TAG_USED | |
1763 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | |
1764 | dev->vlan_rx_register = cp_vlan_rx_register; | |
1765 | dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid; | |
1766 | #endif | |
1767 | ||
1768 | if (pci_using_dac) | |
1769 | dev->features |= NETIF_F_HIGHDMA; | |
1770 | ||
1771 | dev->irq = pdev->irq; | |
1772 | ||
1773 | rc = register_netdev(dev); | |
1774 | if (rc) | |
1775 | goto err_out_iomap; | |
1776 | ||
1777 | printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, " | |
1778 | "%02x:%02x:%02x:%02x:%02x:%02x, " | |
1779 | "IRQ %d\n", | |
1780 | dev->name, | |
1781 | dev->base_addr, | |
1782 | dev->dev_addr[0], dev->dev_addr[1], | |
1783 | dev->dev_addr[2], dev->dev_addr[3], | |
1784 | dev->dev_addr[4], dev->dev_addr[5], | |
1785 | dev->irq); | |
1786 | ||
1787 | pci_set_drvdata(pdev, dev); | |
1788 | ||
1789 | /* enable busmastering and memory-write-invalidate */ | |
1790 | pci_set_master(pdev); | |
1791 | ||
1792 | if (cp->wol_enabled) cp_set_d3_state (cp); | |
1793 | ||
1794 | return 0; | |
1795 | ||
1796 | err_out_iomap: | |
1797 | iounmap(regs); | |
1798 | err_out_res: | |
1799 | pci_release_regions(pdev); | |
1800 | err_out_mwi: | |
1801 | pci_clear_mwi(pdev); | |
1802 | err_out_disable: | |
1803 | pci_disable_device(pdev); | |
1804 | err_out_free: | |
1805 | free_netdev(dev); | |
1806 | return rc; | |
1807 | } | |
1808 | ||
1809 | static void cp_remove_one (struct pci_dev *pdev) | |
1810 | { | |
1811 | struct net_device *dev = pci_get_drvdata(pdev); | |
1812 | struct cp_private *cp = netdev_priv(dev); | |
1813 | ||
1814 | if (!dev) | |
1815 | BUG(); | |
1816 | unregister_netdev(dev); | |
1817 | iounmap(cp->regs); | |
1818 | if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); | |
1819 | pci_release_regions(pdev); | |
1820 | pci_clear_mwi(pdev); | |
1821 | pci_disable_device(pdev); | |
1822 | pci_set_drvdata(pdev, NULL); | |
1823 | free_netdev(dev); | |
1824 | } | |
1825 | ||
1826 | #ifdef CONFIG_PM | |
1827 | static int cp_suspend (struct pci_dev *pdev, u32 state) | |
1828 | { | |
1829 | struct net_device *dev; | |
1830 | struct cp_private *cp; | |
1831 | unsigned long flags; | |
1832 | ||
1833 | dev = pci_get_drvdata (pdev); | |
1834 | cp = netdev_priv(dev); | |
1835 | ||
1836 | if (!dev || !netif_running (dev)) return 0; | |
1837 | ||
1838 | netif_device_detach (dev); | |
1839 | netif_stop_queue (dev); | |
1840 | ||
1841 | spin_lock_irqsave (&cp->lock, flags); | |
1842 | ||
1843 | /* Disable Rx and Tx */ | |
1844 | cpw16 (IntrMask, 0); | |
1845 | cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn)); | |
1846 | ||
1847 | spin_unlock_irqrestore (&cp->lock, flags); | |
1848 | ||
1849 | if (cp->pdev && cp->wol_enabled) { | |
1850 | pci_save_state (cp->pdev); | |
1851 | cp_set_d3_state (cp); | |
1852 | } | |
1853 | ||
1854 | return 0; | |
1855 | } | |
1856 | ||
1857 | static int cp_resume (struct pci_dev *pdev) | |
1858 | { | |
1859 | struct net_device *dev; | |
1860 | struct cp_private *cp; | |
1861 | ||
1862 | dev = pci_get_drvdata (pdev); | |
1863 | cp = netdev_priv(dev); | |
1864 | ||
1865 | netif_device_attach (dev); | |
1866 | ||
1867 | if (cp->pdev && cp->wol_enabled) { | |
1868 | pci_set_power_state (cp->pdev, PCI_D0); | |
1869 | pci_restore_state (cp->pdev); | |
1870 | } | |
1871 | ||
1872 | cp_init_hw (cp); | |
1873 | netif_start_queue (dev); | |
1874 | ||
1875 | return 0; | |
1876 | } | |
1877 | #endif /* CONFIG_PM */ | |
1878 | ||
1879 | static struct pci_driver cp_driver = { | |
1880 | .name = DRV_NAME, | |
1881 | .id_table = cp_pci_tbl, | |
1882 | .probe = cp_init_one, | |
1883 | .remove = cp_remove_one, | |
1884 | #ifdef CONFIG_PM | |
1885 | .resume = cp_resume, | |
1886 | .suspend = cp_suspend, | |
1887 | #endif | |
1888 | }; | |
1889 | ||
1890 | static int __init cp_init (void) | |
1891 | { | |
1892 | #ifdef MODULE | |
1893 | printk("%s", version); | |
1894 | #endif | |
1895 | return pci_module_init (&cp_driver); | |
1896 | } | |
1897 | ||
1898 | static void __exit cp_exit (void) | |
1899 | { | |
1900 | pci_unregister_driver (&cp_driver); | |
1901 | } | |
1902 | ||
1903 | module_init(cp_init); | |
1904 | module_exit(cp_exit); |