2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.61"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
68 #define dprintk printk
70 #define dprintk(x...) do { } while (0)
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x00040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
91 #define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
92 #define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
93 #define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
94 #define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
95 #define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
96 #define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
97 #define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
98 #define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */
101 NvRegIrqStatus
= 0x000,
102 #define NVREG_IRQSTAT_MIIEVENT 0x040
103 #define NVREG_IRQSTAT_MASK 0x81ff
104 NvRegIrqMask
= 0x004,
105 #define NVREG_IRQ_RX_ERROR 0x0001
106 #define NVREG_IRQ_RX 0x0002
107 #define NVREG_IRQ_RX_NOBUF 0x0004
108 #define NVREG_IRQ_TX_ERR 0x0008
109 #define NVREG_IRQ_TX_OK 0x0010
110 #define NVREG_IRQ_TIMER 0x0020
111 #define NVREG_IRQ_LINK 0x0040
112 #define NVREG_IRQ_RX_FORCED 0x0080
113 #define NVREG_IRQ_TX_FORCED 0x0100
114 #define NVREG_IRQ_RECOVER_ERROR 0x8000
115 #define NVREG_IRQMASK_THROUGHPUT 0x00df
116 #define NVREG_IRQMASK_CPU 0x0060
117 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
118 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
119 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
121 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
122 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
123 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
125 NvRegUnknownSetupReg6
= 0x008,
126 #define NVREG_UNKSETUP6_VAL 3
129 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
130 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
132 NvRegPollingInterval
= 0x00c,
133 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
134 #define NVREG_POLL_DEFAULT_CPU 13
135 NvRegMSIMap0
= 0x020,
136 NvRegMSIMap1
= 0x024,
137 NvRegMSIIrqMask
= 0x030,
138 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
140 #define NVREG_MISC1_PAUSE_TX 0x01
141 #define NVREG_MISC1_HD 0x02
142 #define NVREG_MISC1_FORCE 0x3b0f3c
144 NvRegMacReset
= 0x34,
145 #define NVREG_MAC_RESET_ASSERT 0x0F3
146 NvRegTransmitterControl
= 0x084,
147 #define NVREG_XMITCTL_START 0x01
148 #define NVREG_XMITCTL_MGMT_ST 0x40000000
149 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
150 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
151 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
152 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
153 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
154 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
155 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
156 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
157 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
158 NvRegTransmitterStatus
= 0x088,
159 #define NVREG_XMITSTAT_BUSY 0x01
161 NvRegPacketFilterFlags
= 0x8c,
162 #define NVREG_PFF_PAUSE_RX 0x08
163 #define NVREG_PFF_ALWAYS 0x7F0000
164 #define NVREG_PFF_PROMISC 0x80
165 #define NVREG_PFF_MYADDR 0x20
166 #define NVREG_PFF_LOOPBACK 0x10
168 NvRegOffloadConfig
= 0x90,
169 #define NVREG_OFFLOAD_HOMEPHY 0x601
170 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
171 NvRegReceiverControl
= 0x094,
172 #define NVREG_RCVCTL_START 0x01
173 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
174 NvRegReceiverStatus
= 0x98,
175 #define NVREG_RCVSTAT_BUSY 0x01
177 NvRegRandomSeed
= 0x9c,
178 #define NVREG_RNDSEED_MASK 0x00ff
179 #define NVREG_RNDSEED_FORCE 0x7f00
180 #define NVREG_RNDSEED_FORCE2 0x2d00
181 #define NVREG_RNDSEED_FORCE3 0x7400
183 NvRegTxDeferral
= 0xA0,
184 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
185 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
186 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
187 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
188 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
189 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
190 NvRegRxDeferral
= 0xA4,
191 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
192 NvRegMacAddrA
= 0xA8,
193 NvRegMacAddrB
= 0xAC,
194 NvRegMulticastAddrA
= 0xB0,
195 #define NVREG_MCASTADDRA_FORCE 0x01
196 NvRegMulticastAddrB
= 0xB4,
197 NvRegMulticastMaskA
= 0xB8,
198 #define NVREG_MCASTMASKA_NONE 0xffffffff
199 NvRegMulticastMaskB
= 0xBC,
200 #define NVREG_MCASTMASKB_NONE 0xffff
202 NvRegPhyInterface
= 0xC0,
203 #define PHY_RGMII 0x10000000
205 NvRegTxRingPhysAddr
= 0x100,
206 NvRegRxRingPhysAddr
= 0x104,
207 NvRegRingSizes
= 0x108,
208 #define NVREG_RINGSZ_TXSHIFT 0
209 #define NVREG_RINGSZ_RXSHIFT 16
210 NvRegTransmitPoll
= 0x10c,
211 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
212 NvRegLinkSpeed
= 0x110,
213 #define NVREG_LINKSPEED_FORCE 0x10000
214 #define NVREG_LINKSPEED_10 1000
215 #define NVREG_LINKSPEED_100 100
216 #define NVREG_LINKSPEED_1000 50
217 #define NVREG_LINKSPEED_MASK (0xFFF)
218 NvRegUnknownSetupReg5
= 0x130,
219 #define NVREG_UNKSETUP5_BIT31 (1<<31)
220 NvRegTxWatermark
= 0x13c,
221 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
222 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
223 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
224 NvRegTxRxControl
= 0x144,
225 #define NVREG_TXRXCTL_KICK 0x0001
226 #define NVREG_TXRXCTL_BIT1 0x0002
227 #define NVREG_TXRXCTL_BIT2 0x0004
228 #define NVREG_TXRXCTL_IDLE 0x0008
229 #define NVREG_TXRXCTL_RESET 0x0010
230 #define NVREG_TXRXCTL_RXCHECK 0x0400
231 #define NVREG_TXRXCTL_DESC_1 0
232 #define NVREG_TXRXCTL_DESC_2 0x002100
233 #define NVREG_TXRXCTL_DESC_3 0xc02200
234 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
235 #define NVREG_TXRXCTL_VLANINS 0x00080
236 NvRegTxRingPhysAddrHigh
= 0x148,
237 NvRegRxRingPhysAddrHigh
= 0x14C,
238 NvRegTxPauseFrame
= 0x170,
239 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
240 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
241 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
242 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
243 NvRegMIIStatus
= 0x180,
244 #define NVREG_MIISTAT_ERROR 0x0001
245 #define NVREG_MIISTAT_LINKCHANGE 0x0008
246 #define NVREG_MIISTAT_MASK_RW 0x0007
247 #define NVREG_MIISTAT_MASK_ALL 0x000f
248 NvRegMIIMask
= 0x184,
249 #define NVREG_MII_LINKCHANGE 0x0008
251 NvRegAdapterControl
= 0x188,
252 #define NVREG_ADAPTCTL_START 0x02
253 #define NVREG_ADAPTCTL_LINKUP 0x04
254 #define NVREG_ADAPTCTL_PHYVALID 0x40000
255 #define NVREG_ADAPTCTL_RUNNING 0x100000
256 #define NVREG_ADAPTCTL_PHYSHIFT 24
257 NvRegMIISpeed
= 0x18c,
258 #define NVREG_MIISPEED_BIT8 (1<<8)
259 #define NVREG_MIIDELAY 5
260 NvRegMIIControl
= 0x190,
261 #define NVREG_MIICTL_INUSE 0x08000
262 #define NVREG_MIICTL_WRITE 0x00400
263 #define NVREG_MIICTL_ADDRSHIFT 5
264 NvRegMIIData
= 0x194,
265 NvRegWakeUpFlags
= 0x200,
266 #define NVREG_WAKEUPFLAGS_VAL 0x7770
267 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
268 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
269 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
270 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
271 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
272 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
273 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
274 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
275 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
276 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
278 NvRegPatternCRC
= 0x204,
279 NvRegPatternMask
= 0x208,
280 NvRegPowerCap
= 0x268,
281 #define NVREG_POWERCAP_D3SUPP (1<<30)
282 #define NVREG_POWERCAP_D2SUPP (1<<26)
283 #define NVREG_POWERCAP_D1SUPP (1<<25)
284 NvRegPowerState
= 0x26c,
285 #define NVREG_POWERSTATE_POWEREDUP 0x8000
286 #define NVREG_POWERSTATE_VALID 0x0100
287 #define NVREG_POWERSTATE_MASK 0x0003
288 #define NVREG_POWERSTATE_D0 0x0000
289 #define NVREG_POWERSTATE_D1 0x0001
290 #define NVREG_POWERSTATE_D2 0x0002
291 #define NVREG_POWERSTATE_D3 0x0003
293 NvRegTxZeroReXmt
= 0x284,
294 NvRegTxOneReXmt
= 0x288,
295 NvRegTxManyReXmt
= 0x28c,
296 NvRegTxLateCol
= 0x290,
297 NvRegTxUnderflow
= 0x294,
298 NvRegTxLossCarrier
= 0x298,
299 NvRegTxExcessDef
= 0x29c,
300 NvRegTxRetryErr
= 0x2a0,
301 NvRegRxFrameErr
= 0x2a4,
302 NvRegRxExtraByte
= 0x2a8,
303 NvRegRxLateCol
= 0x2ac,
305 NvRegRxFrameTooLong
= 0x2b4,
306 NvRegRxOverflow
= 0x2b8,
307 NvRegRxFCSErr
= 0x2bc,
308 NvRegRxFrameAlignErr
= 0x2c0,
309 NvRegRxLenErr
= 0x2c4,
310 NvRegRxUnicast
= 0x2c8,
311 NvRegRxMulticast
= 0x2cc,
312 NvRegRxBroadcast
= 0x2d0,
314 NvRegTxFrame
= 0x2d8,
316 NvRegTxPause
= 0x2e0,
317 NvRegRxPause
= 0x2e4,
318 NvRegRxDropFrame
= 0x2e8,
319 NvRegVlanControl
= 0x300,
320 #define NVREG_VLANCONTROL_ENABLE 0x2000
321 NvRegMSIXMap0
= 0x3e0,
322 NvRegMSIXMap1
= 0x3e4,
323 NvRegMSIXIrqStatus
= 0x3f0,
325 NvRegPowerState2
= 0x600,
326 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
327 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
330 /* Big endian: should work, but is untested */
336 struct ring_desc_ex
{
344 struct ring_desc
* orig
;
345 struct ring_desc_ex
* ex
;
348 #define FLAG_MASK_V1 0xffff0000
349 #define FLAG_MASK_V2 0xffffc000
350 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
351 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
353 #define NV_TX_LASTPACKET (1<<16)
354 #define NV_TX_RETRYERROR (1<<19)
355 #define NV_TX_FORCED_INTERRUPT (1<<24)
356 #define NV_TX_DEFERRED (1<<26)
357 #define NV_TX_CARRIERLOST (1<<27)
358 #define NV_TX_LATECOLLISION (1<<28)
359 #define NV_TX_UNDERFLOW (1<<29)
360 #define NV_TX_ERROR (1<<30)
361 #define NV_TX_VALID (1<<31)
363 #define NV_TX2_LASTPACKET (1<<29)
364 #define NV_TX2_RETRYERROR (1<<18)
365 #define NV_TX2_FORCED_INTERRUPT (1<<30)
366 #define NV_TX2_DEFERRED (1<<25)
367 #define NV_TX2_CARRIERLOST (1<<26)
368 #define NV_TX2_LATECOLLISION (1<<27)
369 #define NV_TX2_UNDERFLOW (1<<28)
370 /* error and valid are the same for both */
371 #define NV_TX2_ERROR (1<<30)
372 #define NV_TX2_VALID (1<<31)
373 #define NV_TX2_TSO (1<<28)
374 #define NV_TX2_TSO_SHIFT 14
375 #define NV_TX2_TSO_MAX_SHIFT 14
376 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
377 #define NV_TX2_CHECKSUM_L3 (1<<27)
378 #define NV_TX2_CHECKSUM_L4 (1<<26)
380 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
382 #define NV_RX_DESCRIPTORVALID (1<<16)
383 #define NV_RX_MISSEDFRAME (1<<17)
384 #define NV_RX_SUBSTRACT1 (1<<18)
385 #define NV_RX_ERROR1 (1<<23)
386 #define NV_RX_ERROR2 (1<<24)
387 #define NV_RX_ERROR3 (1<<25)
388 #define NV_RX_ERROR4 (1<<26)
389 #define NV_RX_CRCERR (1<<27)
390 #define NV_RX_OVERFLOW (1<<28)
391 #define NV_RX_FRAMINGERR (1<<29)
392 #define NV_RX_ERROR (1<<30)
393 #define NV_RX_AVAIL (1<<31)
395 #define NV_RX2_CHECKSUMMASK (0x1C000000)
396 #define NV_RX2_CHECKSUM_IP (0x10000000)
397 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
398 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
399 #define NV_RX2_DESCRIPTORVALID (1<<29)
400 #define NV_RX2_SUBSTRACT1 (1<<25)
401 #define NV_RX2_ERROR1 (1<<18)
402 #define NV_RX2_ERROR2 (1<<19)
403 #define NV_RX2_ERROR3 (1<<20)
404 #define NV_RX2_ERROR4 (1<<21)
405 #define NV_RX2_CRCERR (1<<22)
406 #define NV_RX2_OVERFLOW (1<<23)
407 #define NV_RX2_FRAMINGERR (1<<24)
408 /* error and avail are the same for both */
409 #define NV_RX2_ERROR (1<<30)
410 #define NV_RX2_AVAIL (1<<31)
412 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
413 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
415 /* Miscelaneous hardware related defines: */
416 #define NV_PCI_REGSZ_VER1 0x270
417 #define NV_PCI_REGSZ_VER2 0x2d4
418 #define NV_PCI_REGSZ_VER3 0x604
420 /* various timeout delays: all in usec */
421 #define NV_TXRX_RESET_DELAY 4
422 #define NV_TXSTOP_DELAY1 10
423 #define NV_TXSTOP_DELAY1MAX 500000
424 #define NV_TXSTOP_DELAY2 100
425 #define NV_RXSTOP_DELAY1 10
426 #define NV_RXSTOP_DELAY1MAX 500000
427 #define NV_RXSTOP_DELAY2 100
428 #define NV_SETUP5_DELAY 5
429 #define NV_SETUP5_DELAYMAX 50000
430 #define NV_POWERUP_DELAY 5
431 #define NV_POWERUP_DELAYMAX 5000
432 #define NV_MIIBUSY_DELAY 50
433 #define NV_MIIPHY_DELAY 10
434 #define NV_MIIPHY_DELAYMAX 10000
435 #define NV_MAC_RESET_DELAY 64
437 #define NV_WAKEUPPATTERNS 5
438 #define NV_WAKEUPMASKENTRIES 4
440 /* General driver defaults */
441 #define NV_WATCHDOG_TIMEO (5*HZ)
443 #define RX_RING_DEFAULT 128
444 #define TX_RING_DEFAULT 256
445 #define RX_RING_MIN 128
446 #define TX_RING_MIN 64
447 #define RING_MAX_DESC_VER_1 1024
448 #define RING_MAX_DESC_VER_2_3 16384
450 /* rx/tx mac addr + type + vlan + align + slack*/
451 #define NV_RX_HEADERS (64)
452 /* even more slack. */
453 #define NV_RX_ALLOC_PAD (64)
455 /* maximum mtu size */
456 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
457 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
459 #define OOM_REFILL (1+HZ/20)
460 #define POLL_WAIT (1+HZ/100)
461 #define LINK_TIMEOUT (3*HZ)
462 #define STATS_INTERVAL (10*HZ)
466 * The nic supports three different descriptor types:
467 * - DESC_VER_1: Original
468 * - DESC_VER_2: support for jumbo frames.
469 * - DESC_VER_3: 64-bit format.
476 #define PHY_OUI_MARVELL 0x5043
477 #define PHY_OUI_CICADA 0x03f1
478 #define PHY_OUI_VITESSE 0x01c1
479 #define PHY_OUI_REALTEK 0x0732
480 #define PHYID1_OUI_MASK 0x03ff
481 #define PHYID1_OUI_SHFT 6
482 #define PHYID2_OUI_MASK 0xfc00
483 #define PHYID2_OUI_SHFT 10
484 #define PHYID2_MODEL_MASK 0x03f0
485 #define PHY_MODEL_MARVELL_E3016 0x220
486 #define PHY_MARVELL_E3016_INITMASK 0x0300
487 #define PHY_CICADA_INIT1 0x0f000
488 #define PHY_CICADA_INIT2 0x0e00
489 #define PHY_CICADA_INIT3 0x01000
490 #define PHY_CICADA_INIT4 0x0200
491 #define PHY_CICADA_INIT5 0x0004
492 #define PHY_CICADA_INIT6 0x02000
493 #define PHY_VITESSE_INIT_REG1 0x1f
494 #define PHY_VITESSE_INIT_REG2 0x10
495 #define PHY_VITESSE_INIT_REG3 0x11
496 #define PHY_VITESSE_INIT_REG4 0x12
497 #define PHY_VITESSE_INIT_MSK1 0xc
498 #define PHY_VITESSE_INIT_MSK2 0x0180
499 #define PHY_VITESSE_INIT1 0x52b5
500 #define PHY_VITESSE_INIT2 0xaf8a
501 #define PHY_VITESSE_INIT3 0x8
502 #define PHY_VITESSE_INIT4 0x8f8a
503 #define PHY_VITESSE_INIT5 0xaf86
504 #define PHY_VITESSE_INIT6 0x8f86
505 #define PHY_VITESSE_INIT7 0xaf82
506 #define PHY_VITESSE_INIT8 0x0100
507 #define PHY_VITESSE_INIT9 0x8f82
508 #define PHY_VITESSE_INIT10 0x0
509 #define PHY_REALTEK_INIT_REG1 0x1f
510 #define PHY_REALTEK_INIT_REG2 0x19
511 #define PHY_REALTEK_INIT_REG3 0x13
512 #define PHY_REALTEK_INIT1 0x0000
513 #define PHY_REALTEK_INIT2 0x8e00
514 #define PHY_REALTEK_INIT3 0x0001
515 #define PHY_REALTEK_INIT4 0xad17
517 #define PHY_GIGABIT 0x0100
519 #define PHY_TIMEOUT 0x1
520 #define PHY_ERROR 0x2
524 #define PHY_HALF 0x100
526 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
527 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
528 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
529 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
530 #define NV_PAUSEFRAME_RX_REQ 0x0010
531 #define NV_PAUSEFRAME_TX_REQ 0x0020
532 #define NV_PAUSEFRAME_AUTONEG 0x0040
534 /* MSI/MSI-X defines */
535 #define NV_MSI_X_MAX_VECTORS 8
536 #define NV_MSI_X_VECTORS_MASK 0x000f
537 #define NV_MSI_CAPABLE 0x0010
538 #define NV_MSI_X_CAPABLE 0x0020
539 #define NV_MSI_ENABLED 0x0040
540 #define NV_MSI_X_ENABLED 0x0080
542 #define NV_MSI_X_VECTOR_ALL 0x0
543 #define NV_MSI_X_VECTOR_RX 0x0
544 #define NV_MSI_X_VECTOR_TX 0x1
545 #define NV_MSI_X_VECTOR_OTHER 0x2
547 #define NV_RESTART_TX 0x1
548 #define NV_RESTART_RX 0x2
550 #define NV_TX_LIMIT_COUNT 16
553 struct nv_ethtool_str
{
554 char name
[ETH_GSTRING_LEN
];
557 static const struct nv_ethtool_str nv_estats_str
[] = {
562 { "tx_late_collision" },
563 { "tx_fifo_errors" },
564 { "tx_carrier_errors" },
565 { "tx_excess_deferral" },
566 { "tx_retry_error" },
567 { "rx_frame_error" },
569 { "rx_late_collision" },
571 { "rx_frame_too_long" },
572 { "rx_over_errors" },
574 { "rx_frame_align_error" },
575 { "rx_length_error" },
580 { "rx_errors_total" },
581 { "tx_errors_total" },
583 /* version 2 stats */
592 struct nv_ethtool_stats
{
597 u64 tx_late_collision
;
599 u64 tx_carrier_errors
;
600 u64 tx_excess_deferral
;
604 u64 rx_late_collision
;
606 u64 rx_frame_too_long
;
609 u64 rx_frame_align_error
;
618 /* version 2 stats */
627 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
628 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
631 #define NV_TEST_COUNT_BASE 3
632 #define NV_TEST_COUNT_EXTENDED 4
634 static const struct nv_ethtool_str nv_etests_str
[] = {
635 { "link (online/offline)" },
636 { "register (offline) " },
637 { "interrupt (offline) " },
638 { "loopback (offline) " }
641 struct register_test
{
646 static const struct register_test nv_registers_test
[] = {
647 { NvRegUnknownSetupReg6
, 0x01 },
648 { NvRegMisc1
, 0x03c },
649 { NvRegOffloadConfig
, 0x03ff },
650 { NvRegMulticastAddrA
, 0xffffffff },
651 { NvRegTxWatermark
, 0x0ff },
652 { NvRegWakeUpFlags
, 0x07777 },
659 unsigned int dma_len
;
660 struct ring_desc_ex
*first_tx_desc
;
661 struct nv_skb_map
*next_tx_ctx
;
666 * All hardware access under dev->priv->lock, except the performance
668 * - rx is (pseudo-) lockless: it relies on the single-threading provided
669 * by the arch code for interrupts.
670 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
671 * needs dev->priv->lock :-(
672 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
675 /* in dev: base, irq */
679 struct net_device
*dev
;
680 struct napi_struct napi
;
683 * Locking: spin_lock(&np->lock); */
684 struct nv_ethtool_stats estats
;
692 unsigned int phy_oui
;
693 unsigned int phy_model
;
698 /* General data: RO fields */
699 dma_addr_t ring_addr
;
700 struct pci_dev
*pci_dev
;
713 /* rx specific fields.
714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
716 union ring_type get_rx
, put_rx
, first_rx
, last_rx
;
717 struct nv_skb_map
*get_rx_ctx
, *put_rx_ctx
;
718 struct nv_skb_map
*first_rx_ctx
, *last_rx_ctx
;
719 struct nv_skb_map
*rx_skb
;
721 union ring_type rx_ring
;
722 unsigned int rx_buf_sz
;
723 unsigned int pkt_limit
;
724 struct timer_list oom_kick
;
725 struct timer_list nic_poll
;
726 struct timer_list stats_poll
;
730 /* media detection workaround.
731 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
734 unsigned long link_timeout
;
736 * tx specific fields.
738 union ring_type get_tx
, put_tx
, first_tx
, last_tx
;
739 struct nv_skb_map
*get_tx_ctx
, *put_tx_ctx
;
740 struct nv_skb_map
*first_tx_ctx
, *last_tx_ctx
;
741 struct nv_skb_map
*tx_skb
;
743 union ring_type tx_ring
;
747 u32 tx_pkts_in_progress
;
748 struct nv_skb_map
*tx_change_owner
;
749 struct nv_skb_map
*tx_end_flip
;
753 struct vlan_group
*vlangrp
;
755 /* msi/msi-x fields */
757 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
764 * Maximum number of loops until we assume that a bit in the irq mask
765 * is stuck. Overridable with module param.
767 static int max_interrupt_work
= 5;
770 * Optimization can be either throuput mode or cpu mode
772 * Throughput Mode: Every tx and rx packet will generate an interrupt.
773 * CPU Mode: Interrupts are controlled by a timer.
776 NV_OPTIMIZATION_MODE_THROUGHPUT
,
777 NV_OPTIMIZATION_MODE_CPU
779 static int optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
782 * Poll interval for timer irq
784 * This interval determines how frequent an interrupt is generated.
785 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
786 * Min = 0, and Max = 65535
788 static int poll_interval
= -1;
797 static int msi
= NV_MSI_INT_ENABLED
;
803 NV_MSIX_INT_DISABLED
,
806 static int msix
= NV_MSIX_INT_DISABLED
;
812 NV_DMA_64BIT_DISABLED
,
815 static int dma_64bit
= NV_DMA_64BIT_ENABLED
;
817 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
819 return netdev_priv(dev
);
822 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
824 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
827 static inline void pci_push(u8 __iomem
*base
)
829 /* force out pending posted writes */
833 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
835 return le32_to_cpu(prd
->flaglen
)
836 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
839 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
841 return le32_to_cpu(prd
->flaglen
) & LEN_MASK_V2
;
844 static bool nv_optimized(struct fe_priv
*np
)
846 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
851 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
852 int delay
, int delaymax
, const char *msg
)
854 u8 __iomem
*base
= get_hwbase(dev
);
865 } while ((readl(base
+ offset
) & mask
) != target
);
869 #define NV_SETUP_RX_RING 0x01
870 #define NV_SETUP_TX_RING 0x02
872 static inline u32
dma_low(dma_addr_t addr
)
877 static inline u32
dma_high(dma_addr_t addr
)
879 return addr
>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
882 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
884 struct fe_priv
*np
= get_nvpriv(dev
);
885 u8 __iomem
*base
= get_hwbase(dev
);
887 if (!nv_optimized(np
)) {
888 if (rxtx_flags
& NV_SETUP_RX_RING
) {
889 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
891 if (rxtx_flags
& NV_SETUP_TX_RING
) {
892 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
895 if (rxtx_flags
& NV_SETUP_RX_RING
) {
896 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
897 writel(dma_high(np
->ring_addr
), base
+ NvRegRxRingPhysAddrHigh
);
899 if (rxtx_flags
& NV_SETUP_TX_RING
) {
900 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
901 writel(dma_high(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddrHigh
);
906 static void free_rings(struct net_device
*dev
)
908 struct fe_priv
*np
= get_nvpriv(dev
);
910 if (!nv_optimized(np
)) {
911 if (np
->rx_ring
.orig
)
912 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
913 np
->rx_ring
.orig
, np
->ring_addr
);
916 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
917 np
->rx_ring
.ex
, np
->ring_addr
);
925 static int using_multi_irqs(struct net_device
*dev
)
927 struct fe_priv
*np
= get_nvpriv(dev
);
929 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
930 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
931 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
937 static void nv_enable_irq(struct net_device
*dev
)
939 struct fe_priv
*np
= get_nvpriv(dev
);
941 if (!using_multi_irqs(dev
)) {
942 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
943 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
945 enable_irq(np
->pci_dev
->irq
);
947 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
948 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
949 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
953 static void nv_disable_irq(struct net_device
*dev
)
955 struct fe_priv
*np
= get_nvpriv(dev
);
957 if (!using_multi_irqs(dev
)) {
958 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
959 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
961 disable_irq(np
->pci_dev
->irq
);
963 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
964 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
965 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
969 /* In MSIX mode, a write to irqmask behaves as XOR */
970 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
972 u8 __iomem
*base
= get_hwbase(dev
);
974 writel(mask
, base
+ NvRegIrqMask
);
977 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
979 struct fe_priv
*np
= get_nvpriv(dev
);
980 u8 __iomem
*base
= get_hwbase(dev
);
982 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
983 writel(mask
, base
+ NvRegIrqMask
);
985 if (np
->msi_flags
& NV_MSI_ENABLED
)
986 writel(0, base
+ NvRegMSIIrqMask
);
987 writel(0, base
+ NvRegIrqMask
);
991 #define MII_READ (-1)
992 /* mii_rw: read/write a register on the PHY.
994 * Caller must guarantee serialization
996 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
998 u8 __iomem
*base
= get_hwbase(dev
);
1002 writel(NVREG_MIISTAT_MASK_RW
, base
+ NvRegMIIStatus
);
1004 reg
= readl(base
+ NvRegMIIControl
);
1005 if (reg
& NVREG_MIICTL_INUSE
) {
1006 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
1007 udelay(NV_MIIBUSY_DELAY
);
1010 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
1011 if (value
!= MII_READ
) {
1012 writel(value
, base
+ NvRegMIIData
);
1013 reg
|= NVREG_MIICTL_WRITE
;
1015 writel(reg
, base
+ NvRegMIIControl
);
1017 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
1018 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
1019 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
1020 dev
->name
, miireg
, addr
);
1022 } else if (value
!= MII_READ
) {
1023 /* it was a write operation - fewer failures are detectable */
1024 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1025 dev
->name
, value
, miireg
, addr
);
1027 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
1028 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
1029 dev
->name
, miireg
, addr
);
1032 retval
= readl(base
+ NvRegMIIData
);
1033 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1034 dev
->name
, miireg
, addr
, retval
);
1040 static int phy_reset(struct net_device
*dev
, u32 bmcr_setup
)
1042 struct fe_priv
*np
= netdev_priv(dev
);
1044 unsigned int tries
= 0;
1046 miicontrol
= BMCR_RESET
| bmcr_setup
;
1047 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
1051 /* wait for 500ms */
1054 /* must wait till reset is deasserted */
1055 while (miicontrol
& BMCR_RESET
) {
1057 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1058 /* FIXME: 100 tries seem excessive */
1065 static int phy_init(struct net_device
*dev
)
1067 struct fe_priv
*np
= get_nvpriv(dev
);
1068 u8 __iomem
*base
= get_hwbase(dev
);
1069 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
1071 /* phy errata for E3016 phy */
1072 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
1073 reg
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1074 reg
&= ~PHY_MARVELL_E3016_INITMASK
;
1075 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, reg
)) {
1076 printk(KERN_INFO
"%s: phy write to errata reg failed.\n", pci_name(np
->pci_dev
));
1080 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1081 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1082 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1085 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1086 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1089 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1090 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1093 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1094 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1097 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1098 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1103 /* set advertise register */
1104 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1105 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|ADVERTISE_PAUSE_ASYM
|ADVERTISE_PAUSE_CAP
);
1106 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
1107 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
1111 /* get phy interface type */
1112 phyinterface
= readl(base
+ NvRegPhyInterface
);
1114 /* see if gigabit phy */
1115 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1116 if (mii_status
& PHY_GIGABIT
) {
1117 np
->gigabit
= PHY_GIGABIT
;
1118 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
1119 mii_control_1000
&= ~ADVERTISE_1000HALF
;
1120 if (phyinterface
& PHY_RGMII
)
1121 mii_control_1000
|= ADVERTISE_1000FULL
;
1123 mii_control_1000
&= ~ADVERTISE_1000FULL
;
1125 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
1126 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1133 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1134 mii_control
|= BMCR_ANENABLE
;
1137 * (certain phys need bmcr to be setup with reset)
1139 if (phy_reset(dev
, mii_control
)) {
1140 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
1144 /* phy vendor specific configuration */
1145 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
1146 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
1147 phy_reserved
&= ~(PHY_CICADA_INIT1
| PHY_CICADA_INIT2
);
1148 phy_reserved
|= (PHY_CICADA_INIT3
| PHY_CICADA_INIT4
);
1149 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
1150 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1153 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1154 phy_reserved
|= PHY_CICADA_INIT5
;
1155 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
1156 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1160 if (np
->phy_oui
== PHY_OUI_CICADA
) {
1161 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
1162 phy_reserved
|= PHY_CICADA_INIT6
;
1163 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
1164 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1168 if (np
->phy_oui
== PHY_OUI_VITESSE
) {
1169 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT1
)) {
1170 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1173 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT2
)) {
1174 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1177 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1178 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1179 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1182 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1183 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1184 phy_reserved
|= PHY_VITESSE_INIT3
;
1185 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1186 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1189 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT4
)) {
1190 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1193 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT5
)) {
1194 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1197 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1198 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1199 phy_reserved
|= PHY_VITESSE_INIT3
;
1200 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1201 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1204 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1205 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1206 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1209 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT6
)) {
1210 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1213 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT7
)) {
1214 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1217 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1218 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1219 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1222 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1223 phy_reserved
&= ~PHY_VITESSE_INIT_MSK2
;
1224 phy_reserved
|= PHY_VITESSE_INIT8
;
1225 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1226 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1229 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT9
)) {
1230 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1233 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT10
)) {
1234 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1238 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1239 /* reset could have cleared these out, set them back */
1240 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1241 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1244 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1245 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1248 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1249 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1252 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1253 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1256 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1257 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1262 /* some phys clear out pause advertisment on reset, set it back */
1263 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
1265 /* restart auto negotiation */
1266 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1267 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
1268 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1275 static void nv_start_rx(struct net_device
*dev
)
1277 struct fe_priv
*np
= netdev_priv(dev
);
1278 u8 __iomem
*base
= get_hwbase(dev
);
1279 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1281 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
1282 /* Already running? Stop it. */
1283 if ((readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) && !np
->mac_in_use
) {
1284 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1285 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1288 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1290 rx_ctrl
|= NVREG_RCVCTL_START
;
1292 rx_ctrl
&= ~NVREG_RCVCTL_RX_PATH_EN
;
1293 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1294 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1295 dev
->name
, np
->duplex
, np
->linkspeed
);
1299 static void nv_stop_rx(struct net_device
*dev
)
1301 struct fe_priv
*np
= netdev_priv(dev
);
1302 u8 __iomem
*base
= get_hwbase(dev
);
1303 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1305 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
1306 if (!np
->mac_in_use
)
1307 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1309 rx_ctrl
|= NVREG_RCVCTL_RX_PATH_EN
;
1310 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1311 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
1312 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
1313 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
1315 udelay(NV_RXSTOP_DELAY2
);
1316 if (!np
->mac_in_use
)
1317 writel(0, base
+ NvRegLinkSpeed
);
1320 static void nv_start_tx(struct net_device
*dev
)
1322 struct fe_priv
*np
= netdev_priv(dev
);
1323 u8 __iomem
*base
= get_hwbase(dev
);
1324 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1326 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
1327 tx_ctrl
|= NVREG_XMITCTL_START
;
1329 tx_ctrl
&= ~NVREG_XMITCTL_TX_PATH_EN
;
1330 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1334 static void nv_stop_tx(struct net_device
*dev
)
1336 struct fe_priv
*np
= netdev_priv(dev
);
1337 u8 __iomem
*base
= get_hwbase(dev
);
1338 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1340 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
1341 if (!np
->mac_in_use
)
1342 tx_ctrl
&= ~NVREG_XMITCTL_START
;
1344 tx_ctrl
|= NVREG_XMITCTL_TX_PATH_EN
;
1345 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1346 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
1347 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
1348 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
1350 udelay(NV_TXSTOP_DELAY2
);
1351 if (!np
->mac_in_use
)
1352 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
1353 base
+ NvRegTransmitPoll
);
1356 static void nv_start_rxtx(struct net_device
*dev
)
1362 static void nv_stop_rxtx(struct net_device
*dev
)
1368 static void nv_txrx_reset(struct net_device
*dev
)
1370 struct fe_priv
*np
= netdev_priv(dev
);
1371 u8 __iomem
*base
= get_hwbase(dev
);
1373 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
1374 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1376 udelay(NV_TXRX_RESET_DELAY
);
1377 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1381 static void nv_mac_reset(struct net_device
*dev
)
1383 struct fe_priv
*np
= netdev_priv(dev
);
1384 u8 __iomem
*base
= get_hwbase(dev
);
1385 u32 temp1
, temp2
, temp3
;
1387 dprintk(KERN_DEBUG
"%s: nv_mac_reset\n", dev
->name
);
1389 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1392 /* save registers since they will be cleared on reset */
1393 temp1
= readl(base
+ NvRegMacAddrA
);
1394 temp2
= readl(base
+ NvRegMacAddrB
);
1395 temp3
= readl(base
+ NvRegTransmitPoll
);
1397 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1399 udelay(NV_MAC_RESET_DELAY
);
1400 writel(0, base
+ NvRegMacReset
);
1402 udelay(NV_MAC_RESET_DELAY
);
1404 /* restore saved registers */
1405 writel(temp1
, base
+ NvRegMacAddrA
);
1406 writel(temp2
, base
+ NvRegMacAddrB
);
1407 writel(temp3
, base
+ NvRegTransmitPoll
);
1409 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1413 static void nv_get_hw_stats(struct net_device
*dev
)
1415 struct fe_priv
*np
= netdev_priv(dev
);
1416 u8 __iomem
*base
= get_hwbase(dev
);
1418 np
->estats
.tx_bytes
+= readl(base
+ NvRegTxCnt
);
1419 np
->estats
.tx_zero_rexmt
+= readl(base
+ NvRegTxZeroReXmt
);
1420 np
->estats
.tx_one_rexmt
+= readl(base
+ NvRegTxOneReXmt
);
1421 np
->estats
.tx_many_rexmt
+= readl(base
+ NvRegTxManyReXmt
);
1422 np
->estats
.tx_late_collision
+= readl(base
+ NvRegTxLateCol
);
1423 np
->estats
.tx_fifo_errors
+= readl(base
+ NvRegTxUnderflow
);
1424 np
->estats
.tx_carrier_errors
+= readl(base
+ NvRegTxLossCarrier
);
1425 np
->estats
.tx_excess_deferral
+= readl(base
+ NvRegTxExcessDef
);
1426 np
->estats
.tx_retry_error
+= readl(base
+ NvRegTxRetryErr
);
1427 np
->estats
.rx_frame_error
+= readl(base
+ NvRegRxFrameErr
);
1428 np
->estats
.rx_extra_byte
+= readl(base
+ NvRegRxExtraByte
);
1429 np
->estats
.rx_late_collision
+= readl(base
+ NvRegRxLateCol
);
1430 np
->estats
.rx_runt
+= readl(base
+ NvRegRxRunt
);
1431 np
->estats
.rx_frame_too_long
+= readl(base
+ NvRegRxFrameTooLong
);
1432 np
->estats
.rx_over_errors
+= readl(base
+ NvRegRxOverflow
);
1433 np
->estats
.rx_crc_errors
+= readl(base
+ NvRegRxFCSErr
);
1434 np
->estats
.rx_frame_align_error
+= readl(base
+ NvRegRxFrameAlignErr
);
1435 np
->estats
.rx_length_error
+= readl(base
+ NvRegRxLenErr
);
1436 np
->estats
.rx_unicast
+= readl(base
+ NvRegRxUnicast
);
1437 np
->estats
.rx_multicast
+= readl(base
+ NvRegRxMulticast
);
1438 np
->estats
.rx_broadcast
+= readl(base
+ NvRegRxBroadcast
);
1439 np
->estats
.rx_packets
=
1440 np
->estats
.rx_unicast
+
1441 np
->estats
.rx_multicast
+
1442 np
->estats
.rx_broadcast
;
1443 np
->estats
.rx_errors_total
=
1444 np
->estats
.rx_crc_errors
+
1445 np
->estats
.rx_over_errors
+
1446 np
->estats
.rx_frame_error
+
1447 (np
->estats
.rx_frame_align_error
- np
->estats
.rx_extra_byte
) +
1448 np
->estats
.rx_late_collision
+
1449 np
->estats
.rx_runt
+
1450 np
->estats
.rx_frame_too_long
;
1451 np
->estats
.tx_errors_total
=
1452 np
->estats
.tx_late_collision
+
1453 np
->estats
.tx_fifo_errors
+
1454 np
->estats
.tx_carrier_errors
+
1455 np
->estats
.tx_excess_deferral
+
1456 np
->estats
.tx_retry_error
;
1458 if (np
->driver_data
& DEV_HAS_STATISTICS_V2
) {
1459 np
->estats
.tx_deferral
+= readl(base
+ NvRegTxDef
);
1460 np
->estats
.tx_packets
+= readl(base
+ NvRegTxFrame
);
1461 np
->estats
.rx_bytes
+= readl(base
+ NvRegRxCnt
);
1462 np
->estats
.tx_pause
+= readl(base
+ NvRegTxPause
);
1463 np
->estats
.rx_pause
+= readl(base
+ NvRegRxPause
);
1464 np
->estats
.rx_drop_frame
+= readl(base
+ NvRegRxDropFrame
);
1469 * nv_get_stats: dev->get_stats function
1470 * Get latest stats value from the nic.
1471 * Called with read_lock(&dev_base_lock) held for read -
1472 * only synchronized against unregister_netdevice.
1474 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
1476 struct fe_priv
*np
= netdev_priv(dev
);
1478 /* If the nic supports hw counters then retrieve latest values */
1479 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
)) {
1480 nv_get_hw_stats(dev
);
1482 /* copy to net_device stats */
1483 dev
->stats
.tx_bytes
= np
->estats
.tx_bytes
;
1484 dev
->stats
.tx_fifo_errors
= np
->estats
.tx_fifo_errors
;
1485 dev
->stats
.tx_carrier_errors
= np
->estats
.tx_carrier_errors
;
1486 dev
->stats
.rx_crc_errors
= np
->estats
.rx_crc_errors
;
1487 dev
->stats
.rx_over_errors
= np
->estats
.rx_over_errors
;
1488 dev
->stats
.rx_errors
= np
->estats
.rx_errors_total
;
1489 dev
->stats
.tx_errors
= np
->estats
.tx_errors_total
;
1496 * nv_alloc_rx: fill rx ring entries.
1497 * Return 1 if the allocations for the skbs failed and the
1498 * rx engine is without Available descriptors
1500 static int nv_alloc_rx(struct net_device
*dev
)
1502 struct fe_priv
*np
= netdev_priv(dev
);
1503 struct ring_desc
* less_rx
;
1505 less_rx
= np
->get_rx
.orig
;
1506 if (less_rx
-- == np
->first_rx
.orig
)
1507 less_rx
= np
->last_rx
.orig
;
1509 while (np
->put_rx
.orig
!= less_rx
) {
1510 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1512 np
->put_rx_ctx
->skb
= skb
;
1513 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1516 PCI_DMA_FROMDEVICE
);
1517 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1518 np
->put_rx
.orig
->buf
= cpu_to_le32(np
->put_rx_ctx
->dma
);
1520 np
->put_rx
.orig
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1521 if (unlikely(np
->put_rx
.orig
++ == np
->last_rx
.orig
))
1522 np
->put_rx
.orig
= np
->first_rx
.orig
;
1523 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1524 np
->put_rx_ctx
= np
->first_rx_ctx
;
1532 static int nv_alloc_rx_optimized(struct net_device
*dev
)
1534 struct fe_priv
*np
= netdev_priv(dev
);
1535 struct ring_desc_ex
* less_rx
;
1537 less_rx
= np
->get_rx
.ex
;
1538 if (less_rx
-- == np
->first_rx
.ex
)
1539 less_rx
= np
->last_rx
.ex
;
1541 while (np
->put_rx
.ex
!= less_rx
) {
1542 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1544 np
->put_rx_ctx
->skb
= skb
;
1545 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1548 PCI_DMA_FROMDEVICE
);
1549 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1550 np
->put_rx
.ex
->bufhigh
= cpu_to_le32(dma_high(np
->put_rx_ctx
->dma
));
1551 np
->put_rx
.ex
->buflow
= cpu_to_le32(dma_low(np
->put_rx_ctx
->dma
));
1553 np
->put_rx
.ex
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1554 if (unlikely(np
->put_rx
.ex
++ == np
->last_rx
.ex
))
1555 np
->put_rx
.ex
= np
->first_rx
.ex
;
1556 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1557 np
->put_rx_ctx
= np
->first_rx_ctx
;
1565 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1566 #ifdef CONFIG_FORCEDETH_NAPI
1567 static void nv_do_rx_refill(unsigned long data
)
1569 struct net_device
*dev
= (struct net_device
*) data
;
1570 struct fe_priv
*np
= netdev_priv(dev
);
1572 /* Just reschedule NAPI rx processing */
1573 netif_rx_schedule(dev
, &np
->napi
);
1576 static void nv_do_rx_refill(unsigned long data
)
1578 struct net_device
*dev
= (struct net_device
*) data
;
1579 struct fe_priv
*np
= netdev_priv(dev
);
1582 if (!using_multi_irqs(dev
)) {
1583 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1584 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1586 disable_irq(np
->pci_dev
->irq
);
1588 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1590 if (!nv_optimized(np
))
1591 retcode
= nv_alloc_rx(dev
);
1593 retcode
= nv_alloc_rx_optimized(dev
);
1595 spin_lock_irq(&np
->lock
);
1596 if (!np
->in_shutdown
)
1597 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1598 spin_unlock_irq(&np
->lock
);
1600 if (!using_multi_irqs(dev
)) {
1601 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1602 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1604 enable_irq(np
->pci_dev
->irq
);
1606 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1611 static void nv_init_rx(struct net_device
*dev
)
1613 struct fe_priv
*np
= netdev_priv(dev
);
1616 np
->get_rx
= np
->put_rx
= np
->first_rx
= np
->rx_ring
;
1618 if (!nv_optimized(np
))
1619 np
->last_rx
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
-1];
1621 np
->last_rx
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
-1];
1622 np
->get_rx_ctx
= np
->put_rx_ctx
= np
->first_rx_ctx
= np
->rx_skb
;
1623 np
->last_rx_ctx
= &np
->rx_skb
[np
->rx_ring_size
-1];
1625 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1626 if (!nv_optimized(np
)) {
1627 np
->rx_ring
.orig
[i
].flaglen
= 0;
1628 np
->rx_ring
.orig
[i
].buf
= 0;
1630 np
->rx_ring
.ex
[i
].flaglen
= 0;
1631 np
->rx_ring
.ex
[i
].txvlan
= 0;
1632 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1633 np
->rx_ring
.ex
[i
].buflow
= 0;
1635 np
->rx_skb
[i
].skb
= NULL
;
1636 np
->rx_skb
[i
].dma
= 0;
1640 static void nv_init_tx(struct net_device
*dev
)
1642 struct fe_priv
*np
= netdev_priv(dev
);
1645 np
->get_tx
= np
->put_tx
= np
->first_tx
= np
->tx_ring
;
1647 if (!nv_optimized(np
))
1648 np
->last_tx
.orig
= &np
->tx_ring
.orig
[np
->tx_ring_size
-1];
1650 np
->last_tx
.ex
= &np
->tx_ring
.ex
[np
->tx_ring_size
-1];
1651 np
->get_tx_ctx
= np
->put_tx_ctx
= np
->first_tx_ctx
= np
->tx_skb
;
1652 np
->last_tx_ctx
= &np
->tx_skb
[np
->tx_ring_size
-1];
1653 np
->tx_pkts_in_progress
= 0;
1654 np
->tx_change_owner
= NULL
;
1655 np
->tx_end_flip
= NULL
;
1657 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1658 if (!nv_optimized(np
)) {
1659 np
->tx_ring
.orig
[i
].flaglen
= 0;
1660 np
->tx_ring
.orig
[i
].buf
= 0;
1662 np
->tx_ring
.ex
[i
].flaglen
= 0;
1663 np
->tx_ring
.ex
[i
].txvlan
= 0;
1664 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1665 np
->tx_ring
.ex
[i
].buflow
= 0;
1667 np
->tx_skb
[i
].skb
= NULL
;
1668 np
->tx_skb
[i
].dma
= 0;
1669 np
->tx_skb
[i
].dma_len
= 0;
1670 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1671 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1675 static int nv_init_ring(struct net_device
*dev
)
1677 struct fe_priv
*np
= netdev_priv(dev
);
1682 if (!nv_optimized(np
))
1683 return nv_alloc_rx(dev
);
1685 return nv_alloc_rx_optimized(dev
);
1688 static int nv_release_txskb(struct net_device
*dev
, struct nv_skb_map
* tx_skb
)
1690 struct fe_priv
*np
= netdev_priv(dev
);
1693 pci_unmap_page(np
->pci_dev
, tx_skb
->dma
,
1699 dev_kfree_skb_any(tx_skb
->skb
);
1707 static void nv_drain_tx(struct net_device
*dev
)
1709 struct fe_priv
*np
= netdev_priv(dev
);
1712 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1713 if (!nv_optimized(np
)) {
1714 np
->tx_ring
.orig
[i
].flaglen
= 0;
1715 np
->tx_ring
.orig
[i
].buf
= 0;
1717 np
->tx_ring
.ex
[i
].flaglen
= 0;
1718 np
->tx_ring
.ex
[i
].txvlan
= 0;
1719 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1720 np
->tx_ring
.ex
[i
].buflow
= 0;
1722 if (nv_release_txskb(dev
, &np
->tx_skb
[i
]))
1723 dev
->stats
.tx_dropped
++;
1724 np
->tx_skb
[i
].dma
= 0;
1725 np
->tx_skb
[i
].dma_len
= 0;
1726 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1727 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1729 np
->tx_pkts_in_progress
= 0;
1730 np
->tx_change_owner
= NULL
;
1731 np
->tx_end_flip
= NULL
;
1734 static void nv_drain_rx(struct net_device
*dev
)
1736 struct fe_priv
*np
= netdev_priv(dev
);
1739 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1740 if (!nv_optimized(np
)) {
1741 np
->rx_ring
.orig
[i
].flaglen
= 0;
1742 np
->rx_ring
.orig
[i
].buf
= 0;
1744 np
->rx_ring
.ex
[i
].flaglen
= 0;
1745 np
->rx_ring
.ex
[i
].txvlan
= 0;
1746 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1747 np
->rx_ring
.ex
[i
].buflow
= 0;
1750 if (np
->rx_skb
[i
].skb
) {
1751 pci_unmap_single(np
->pci_dev
, np
->rx_skb
[i
].dma
,
1752 (skb_end_pointer(np
->rx_skb
[i
].skb
) -
1753 np
->rx_skb
[i
].skb
->data
),
1754 PCI_DMA_FROMDEVICE
);
1755 dev_kfree_skb(np
->rx_skb
[i
].skb
);
1756 np
->rx_skb
[i
].skb
= NULL
;
1761 static void nv_drain_rxtx(struct net_device
*dev
)
1767 static inline u32
nv_get_empty_tx_slots(struct fe_priv
*np
)
1769 return (u32
)(np
->tx_ring_size
- ((np
->tx_ring_size
+ (np
->put_tx_ctx
- np
->get_tx_ctx
)) % np
->tx_ring_size
));
1773 * nv_start_xmit: dev->hard_start_xmit function
1774 * Called with netif_tx_lock held.
1776 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1778 struct fe_priv
*np
= netdev_priv(dev
);
1780 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
1781 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1785 u32 size
= skb
->len
-skb
->data_len
;
1786 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1788 struct ring_desc
* put_tx
;
1789 struct ring_desc
* start_tx
;
1790 struct ring_desc
* prev_tx
;
1791 struct nv_skb_map
* prev_tx_ctx
;
1792 unsigned long flags
;
1794 /* add fragments to entries count */
1795 for (i
= 0; i
< fragments
; i
++) {
1796 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
1797 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1800 empty_slots
= nv_get_empty_tx_slots(np
);
1801 if (unlikely(empty_slots
<= entries
)) {
1802 spin_lock_irqsave(&np
->lock
, flags
);
1803 netif_stop_queue(dev
);
1805 spin_unlock_irqrestore(&np
->lock
, flags
);
1806 return NETDEV_TX_BUSY
;
1809 start_tx
= put_tx
= np
->put_tx
.orig
;
1811 /* setup the header buffer */
1814 prev_tx_ctx
= np
->put_tx_ctx
;
1815 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1816 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
1818 np
->put_tx_ctx
->dma_len
= bcnt
;
1819 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
1820 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1822 tx_flags
= np
->tx_flags
;
1825 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
1826 put_tx
= np
->first_tx
.orig
;
1827 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1828 np
->put_tx_ctx
= np
->first_tx_ctx
;
1831 /* setup the fragments */
1832 for (i
= 0; i
< fragments
; i
++) {
1833 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1834 u32 size
= frag
->size
;
1839 prev_tx_ctx
= np
->put_tx_ctx
;
1840 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1841 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
1843 np
->put_tx_ctx
->dma_len
= bcnt
;
1844 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
1845 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1849 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
1850 put_tx
= np
->first_tx
.orig
;
1851 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1852 np
->put_tx_ctx
= np
->first_tx_ctx
;
1856 /* set last fragment flag */
1857 prev_tx
->flaglen
|= cpu_to_le32(tx_flags_extra
);
1859 /* save skb in this slot's context area */
1860 prev_tx_ctx
->skb
= skb
;
1862 if (skb_is_gso(skb
))
1863 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
1865 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
1866 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
1868 spin_lock_irqsave(&np
->lock
, flags
);
1871 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
1872 np
->put_tx
.orig
= put_tx
;
1874 spin_unlock_irqrestore(&np
->lock
, flags
);
1876 dprintk(KERN_DEBUG
"%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1877 dev
->name
, entries
, tx_flags_extra
);
1880 for (j
=0; j
<64; j
++) {
1882 dprintk("\n%03x:", j
);
1883 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
1888 dev
->trans_start
= jiffies
;
1889 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
1890 return NETDEV_TX_OK
;
1893 static int nv_start_xmit_optimized(struct sk_buff
*skb
, struct net_device
*dev
)
1895 struct fe_priv
*np
= netdev_priv(dev
);
1898 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1902 u32 size
= skb
->len
-skb
->data_len
;
1903 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1905 struct ring_desc_ex
* put_tx
;
1906 struct ring_desc_ex
* start_tx
;
1907 struct ring_desc_ex
* prev_tx
;
1908 struct nv_skb_map
* prev_tx_ctx
;
1909 struct nv_skb_map
* start_tx_ctx
;
1910 unsigned long flags
;
1912 /* add fragments to entries count */
1913 for (i
= 0; i
< fragments
; i
++) {
1914 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
1915 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1918 empty_slots
= nv_get_empty_tx_slots(np
);
1919 if (unlikely(empty_slots
<= entries
)) {
1920 spin_lock_irqsave(&np
->lock
, flags
);
1921 netif_stop_queue(dev
);
1923 spin_unlock_irqrestore(&np
->lock
, flags
);
1924 return NETDEV_TX_BUSY
;
1927 start_tx
= put_tx
= np
->put_tx
.ex
;
1928 start_tx_ctx
= np
->put_tx_ctx
;
1930 /* setup the header buffer */
1933 prev_tx_ctx
= np
->put_tx_ctx
;
1934 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1935 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
1937 np
->put_tx_ctx
->dma_len
= bcnt
;
1938 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
1939 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
1940 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1942 tx_flags
= NV_TX2_VALID
;
1945 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
1946 put_tx
= np
->first_tx
.ex
;
1947 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1948 np
->put_tx_ctx
= np
->first_tx_ctx
;
1951 /* setup the fragments */
1952 for (i
= 0; i
< fragments
; i
++) {
1953 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1954 u32 size
= frag
->size
;
1959 prev_tx_ctx
= np
->put_tx_ctx
;
1960 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1961 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
1963 np
->put_tx_ctx
->dma_len
= bcnt
;
1964 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
1965 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
1966 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1970 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
1971 put_tx
= np
->first_tx
.ex
;
1972 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1973 np
->put_tx_ctx
= np
->first_tx_ctx
;
1977 /* set last fragment flag */
1978 prev_tx
->flaglen
|= cpu_to_le32(NV_TX2_LASTPACKET
);
1980 /* save skb in this slot's context area */
1981 prev_tx_ctx
->skb
= skb
;
1983 if (skb_is_gso(skb
))
1984 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
1986 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
1987 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
1990 if (likely(!np
->vlangrp
)) {
1991 start_tx
->txvlan
= 0;
1993 if (vlan_tx_tag_present(skb
))
1994 start_tx
->txvlan
= cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT
| vlan_tx_tag_get(skb
));
1996 start_tx
->txvlan
= 0;
1999 spin_lock_irqsave(&np
->lock
, flags
);
2002 /* Limit the number of outstanding tx. Setup all fragments, but
2003 * do not set the VALID bit on the first descriptor. Save a pointer
2004 * to that descriptor and also for next skb_map element.
2007 if (np
->tx_pkts_in_progress
== NV_TX_LIMIT_COUNT
) {
2008 if (!np
->tx_change_owner
)
2009 np
->tx_change_owner
= start_tx_ctx
;
2011 /* remove VALID bit */
2012 tx_flags
&= ~NV_TX2_VALID
;
2013 start_tx_ctx
->first_tx_desc
= start_tx
;
2014 start_tx_ctx
->next_tx_ctx
= np
->put_tx_ctx
;
2015 np
->tx_end_flip
= np
->put_tx_ctx
;
2017 np
->tx_pkts_in_progress
++;
2022 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2023 np
->put_tx
.ex
= put_tx
;
2025 spin_unlock_irqrestore(&np
->lock
, flags
);
2027 dprintk(KERN_DEBUG
"%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2028 dev
->name
, entries
, tx_flags_extra
);
2031 for (j
=0; j
<64; j
++) {
2033 dprintk("\n%03x:", j
);
2034 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2039 dev
->trans_start
= jiffies
;
2040 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2041 return NETDEV_TX_OK
;
2044 static inline void nv_tx_flip_ownership(struct net_device
*dev
)
2046 struct fe_priv
*np
= netdev_priv(dev
);
2048 np
->tx_pkts_in_progress
--;
2049 if (np
->tx_change_owner
) {
2050 np
->tx_change_owner
->first_tx_desc
->flaglen
|=
2051 cpu_to_le32(NV_TX2_VALID
);
2052 np
->tx_pkts_in_progress
++;
2054 np
->tx_change_owner
= np
->tx_change_owner
->next_tx_ctx
;
2055 if (np
->tx_change_owner
== np
->tx_end_flip
)
2056 np
->tx_change_owner
= NULL
;
2058 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2063 * nv_tx_done: check for completed packets, release the skbs.
2065 * Caller must own np->lock.
2067 static void nv_tx_done(struct net_device
*dev
)
2069 struct fe_priv
*np
= netdev_priv(dev
);
2071 struct ring_desc
* orig_get_tx
= np
->get_tx
.orig
;
2073 while ((np
->get_tx
.orig
!= np
->put_tx
.orig
) &&
2074 !((flags
= le32_to_cpu(np
->get_tx
.orig
->flaglen
)) & NV_TX_VALID
)) {
2076 dprintk(KERN_DEBUG
"%s: nv_tx_done: flags 0x%x.\n",
2079 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2080 np
->get_tx_ctx
->dma_len
,
2082 np
->get_tx_ctx
->dma
= 0;
2084 if (np
->desc_ver
== DESC_VER_1
) {
2085 if (flags
& NV_TX_LASTPACKET
) {
2086 if (flags
& NV_TX_ERROR
) {
2087 if (flags
& NV_TX_UNDERFLOW
)
2088 dev
->stats
.tx_fifo_errors
++;
2089 if (flags
& NV_TX_CARRIERLOST
)
2090 dev
->stats
.tx_carrier_errors
++;
2091 dev
->stats
.tx_errors
++;
2093 dev
->stats
.tx_packets
++;
2094 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2096 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2097 np
->get_tx_ctx
->skb
= NULL
;
2100 if (flags
& NV_TX2_LASTPACKET
) {
2101 if (flags
& NV_TX2_ERROR
) {
2102 if (flags
& NV_TX2_UNDERFLOW
)
2103 dev
->stats
.tx_fifo_errors
++;
2104 if (flags
& NV_TX2_CARRIERLOST
)
2105 dev
->stats
.tx_carrier_errors
++;
2106 dev
->stats
.tx_errors
++;
2108 dev
->stats
.tx_packets
++;
2109 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2111 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2112 np
->get_tx_ctx
->skb
= NULL
;
2115 if (unlikely(np
->get_tx
.orig
++ == np
->last_tx
.orig
))
2116 np
->get_tx
.orig
= np
->first_tx
.orig
;
2117 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2118 np
->get_tx_ctx
= np
->first_tx_ctx
;
2120 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.orig
!= orig_get_tx
))) {
2122 netif_wake_queue(dev
);
2126 static void nv_tx_done_optimized(struct net_device
*dev
, int limit
)
2128 struct fe_priv
*np
= netdev_priv(dev
);
2130 struct ring_desc_ex
* orig_get_tx
= np
->get_tx
.ex
;
2132 while ((np
->get_tx
.ex
!= np
->put_tx
.ex
) &&
2133 !((flags
= le32_to_cpu(np
->get_tx
.ex
->flaglen
)) & NV_TX_VALID
) &&
2136 dprintk(KERN_DEBUG
"%s: nv_tx_done_optimized: flags 0x%x.\n",
2139 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2140 np
->get_tx_ctx
->dma_len
,
2142 np
->get_tx_ctx
->dma
= 0;
2144 if (flags
& NV_TX2_LASTPACKET
) {
2145 if (!(flags
& NV_TX2_ERROR
))
2146 dev
->stats
.tx_packets
++;
2147 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2148 np
->get_tx_ctx
->skb
= NULL
;
2151 nv_tx_flip_ownership(dev
);
2154 if (unlikely(np
->get_tx
.ex
++ == np
->last_tx
.ex
))
2155 np
->get_tx
.ex
= np
->first_tx
.ex
;
2156 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2157 np
->get_tx_ctx
= np
->first_tx_ctx
;
2159 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.ex
!= orig_get_tx
))) {
2161 netif_wake_queue(dev
);
2166 * nv_tx_timeout: dev->tx_timeout function
2167 * Called with netif_tx_lock held.
2169 static void nv_tx_timeout(struct net_device
*dev
)
2171 struct fe_priv
*np
= netdev_priv(dev
);
2172 u8 __iomem
*base
= get_hwbase(dev
);
2175 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2176 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2178 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2180 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
, status
);
2185 printk(KERN_INFO
"%s: Ring at %lx\n",
2186 dev
->name
, (unsigned long)np
->ring_addr
);
2187 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
2188 for (i
=0;i
<=np
->register_size
;i
+= 32) {
2189 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2191 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
2192 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
2193 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
2194 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
2196 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
2197 for (i
=0;i
<np
->tx_ring_size
;i
+= 4) {
2198 if (!nv_optimized(np
)) {
2199 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2201 le32_to_cpu(np
->tx_ring
.orig
[i
].buf
),
2202 le32_to_cpu(np
->tx_ring
.orig
[i
].flaglen
),
2203 le32_to_cpu(np
->tx_ring
.orig
[i
+1].buf
),
2204 le32_to_cpu(np
->tx_ring
.orig
[i
+1].flaglen
),
2205 le32_to_cpu(np
->tx_ring
.orig
[i
+2].buf
),
2206 le32_to_cpu(np
->tx_ring
.orig
[i
+2].flaglen
),
2207 le32_to_cpu(np
->tx_ring
.orig
[i
+3].buf
),
2208 le32_to_cpu(np
->tx_ring
.orig
[i
+3].flaglen
));
2210 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2212 le32_to_cpu(np
->tx_ring
.ex
[i
].bufhigh
),
2213 le32_to_cpu(np
->tx_ring
.ex
[i
].buflow
),
2214 le32_to_cpu(np
->tx_ring
.ex
[i
].flaglen
),
2215 le32_to_cpu(np
->tx_ring
.ex
[i
+1].bufhigh
),
2216 le32_to_cpu(np
->tx_ring
.ex
[i
+1].buflow
),
2217 le32_to_cpu(np
->tx_ring
.ex
[i
+1].flaglen
),
2218 le32_to_cpu(np
->tx_ring
.ex
[i
+2].bufhigh
),
2219 le32_to_cpu(np
->tx_ring
.ex
[i
+2].buflow
),
2220 le32_to_cpu(np
->tx_ring
.ex
[i
+2].flaglen
),
2221 le32_to_cpu(np
->tx_ring
.ex
[i
+3].bufhigh
),
2222 le32_to_cpu(np
->tx_ring
.ex
[i
+3].buflow
),
2223 le32_to_cpu(np
->tx_ring
.ex
[i
+3].flaglen
));
2228 spin_lock_irq(&np
->lock
);
2230 /* 1) stop tx engine */
2233 /* 2) check that the packets were not sent already: */
2234 if (!nv_optimized(np
))
2237 nv_tx_done_optimized(dev
, np
->tx_ring_size
);
2239 /* 3) if there are dead entries: clear everything */
2240 if (np
->get_tx_ctx
!= np
->put_tx_ctx
) {
2241 printk(KERN_DEBUG
"%s: tx_timeout: dead entries!\n", dev
->name
);
2244 setup_hw_rings(dev
, NV_SETUP_TX_RING
);
2247 netif_wake_queue(dev
);
2249 /* 4) restart tx engine */
2251 spin_unlock_irq(&np
->lock
);
2255 * Called when the nic notices a mismatch between the actual data len on the
2256 * wire and the len indicated in the 802 header
2258 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
2260 int hdrlen
; /* length of the 802 header */
2261 int protolen
; /* length as stored in the proto field */
2263 /* 1) calculate len according to header */
2264 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== htons(ETH_P_8021Q
)) {
2265 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
2268 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
2271 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2272 dev
->name
, datalen
, protolen
, hdrlen
);
2273 if (protolen
> ETH_DATA_LEN
)
2274 return datalen
; /* Value in proto field not a len, no checks possible */
2277 /* consistency checks: */
2278 if (datalen
> ETH_ZLEN
) {
2279 if (datalen
>= protolen
) {
2280 /* more data on wire than in 802 header, trim of
2283 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2284 dev
->name
, protolen
);
2287 /* less data on wire than mentioned in header.
2288 * Discard the packet.
2290 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
2295 /* short packet. Accept only if 802 values are also short */
2296 if (protolen
> ETH_ZLEN
) {
2297 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
2301 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2302 dev
->name
, datalen
);
2307 static int nv_rx_process(struct net_device
*dev
, int limit
)
2309 struct fe_priv
*np
= netdev_priv(dev
);
2312 struct sk_buff
*skb
;
2315 while((np
->get_rx
.orig
!= np
->put_rx
.orig
) &&
2316 !((flags
= le32_to_cpu(np
->get_rx
.orig
->flaglen
)) & NV_RX_AVAIL
) &&
2317 (rx_work
< limit
)) {
2319 dprintk(KERN_DEBUG
"%s: nv_rx_process: flags 0x%x.\n",
2323 * the packet is for us - immediately tear down the pci mapping.
2324 * TODO: check if a prefetch of the first cacheline improves
2327 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2328 np
->get_rx_ctx
->dma_len
,
2329 PCI_DMA_FROMDEVICE
);
2330 skb
= np
->get_rx_ctx
->skb
;
2331 np
->get_rx_ctx
->skb
= NULL
;
2335 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2336 for (j
=0; j
<64; j
++) {
2338 dprintk("\n%03x:", j
);
2339 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2343 /* look at what we actually got: */
2344 if (np
->desc_ver
== DESC_VER_1
) {
2345 if (likely(flags
& NV_RX_DESCRIPTORVALID
)) {
2346 len
= flags
& LEN_MASK_V1
;
2347 if (unlikely(flags
& NV_RX_ERROR
)) {
2348 if (flags
& NV_RX_ERROR4
) {
2349 len
= nv_getlen(dev
, skb
->data
, len
);
2351 dev
->stats
.rx_errors
++;
2356 /* framing errors are soft errors */
2357 else if (flags
& NV_RX_FRAMINGERR
) {
2358 if (flags
& NV_RX_SUBSTRACT1
) {
2362 /* the rest are hard errors */
2364 if (flags
& NV_RX_MISSEDFRAME
)
2365 dev
->stats
.rx_missed_errors
++;
2366 if (flags
& NV_RX_CRCERR
)
2367 dev
->stats
.rx_crc_errors
++;
2368 if (flags
& NV_RX_OVERFLOW
)
2369 dev
->stats
.rx_over_errors
++;
2370 dev
->stats
.rx_errors
++;
2380 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2381 len
= flags
& LEN_MASK_V2
;
2382 if (unlikely(flags
& NV_RX2_ERROR
)) {
2383 if (flags
& NV_RX2_ERROR4
) {
2384 len
= nv_getlen(dev
, skb
->data
, len
);
2386 dev
->stats
.rx_errors
++;
2391 /* framing errors are soft errors */
2392 else if (flags
& NV_RX2_FRAMINGERR
) {
2393 if (flags
& NV_RX2_SUBSTRACT1
) {
2397 /* the rest are hard errors */
2399 if (flags
& NV_RX2_CRCERR
)
2400 dev
->stats
.rx_crc_errors
++;
2401 if (flags
& NV_RX2_OVERFLOW
)
2402 dev
->stats
.rx_over_errors
++;
2403 dev
->stats
.rx_errors
++;
2408 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2409 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2410 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2416 /* got a valid packet - forward it to the network core */
2418 skb
->protocol
= eth_type_trans(skb
, dev
);
2419 dprintk(KERN_DEBUG
"%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2420 dev
->name
, len
, skb
->protocol
);
2421 #ifdef CONFIG_FORCEDETH_NAPI
2422 netif_receive_skb(skb
);
2426 dev
->last_rx
= jiffies
;
2427 dev
->stats
.rx_packets
++;
2428 dev
->stats
.rx_bytes
+= len
;
2430 if (unlikely(np
->get_rx
.orig
++ == np
->last_rx
.orig
))
2431 np
->get_rx
.orig
= np
->first_rx
.orig
;
2432 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2433 np
->get_rx_ctx
= np
->first_rx_ctx
;
2441 static int nv_rx_process_optimized(struct net_device
*dev
, int limit
)
2443 struct fe_priv
*np
= netdev_priv(dev
);
2447 struct sk_buff
*skb
;
2450 while((np
->get_rx
.ex
!= np
->put_rx
.ex
) &&
2451 !((flags
= le32_to_cpu(np
->get_rx
.ex
->flaglen
)) & NV_RX2_AVAIL
) &&
2452 (rx_work
< limit
)) {
2454 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: flags 0x%x.\n",
2458 * the packet is for us - immediately tear down the pci mapping.
2459 * TODO: check if a prefetch of the first cacheline improves
2462 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2463 np
->get_rx_ctx
->dma_len
,
2464 PCI_DMA_FROMDEVICE
);
2465 skb
= np
->get_rx_ctx
->skb
;
2466 np
->get_rx_ctx
->skb
= NULL
;
2470 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2471 for (j
=0; j
<64; j
++) {
2473 dprintk("\n%03x:", j
);
2474 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2478 /* look at what we actually got: */
2479 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2480 len
= flags
& LEN_MASK_V2
;
2481 if (unlikely(flags
& NV_RX2_ERROR
)) {
2482 if (flags
& NV_RX2_ERROR4
) {
2483 len
= nv_getlen(dev
, skb
->data
, len
);
2489 /* framing errors are soft errors */
2490 else if (flags
& NV_RX2_FRAMINGERR
) {
2491 if (flags
& NV_RX2_SUBSTRACT1
) {
2495 /* the rest are hard errors */
2502 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2503 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2504 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2506 /* got a valid packet - forward it to the network core */
2508 skb
->protocol
= eth_type_trans(skb
, dev
);
2509 prefetch(skb
->data
);
2511 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2512 dev
->name
, len
, skb
->protocol
);
2514 if (likely(!np
->vlangrp
)) {
2515 #ifdef CONFIG_FORCEDETH_NAPI
2516 netif_receive_skb(skb
);
2521 vlanflags
= le32_to_cpu(np
->get_rx
.ex
->buflow
);
2522 if (vlanflags
& NV_RX3_VLAN_TAG_PRESENT
) {
2523 #ifdef CONFIG_FORCEDETH_NAPI
2524 vlan_hwaccel_receive_skb(skb
, np
->vlangrp
,
2525 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2527 vlan_hwaccel_rx(skb
, np
->vlangrp
,
2528 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2531 #ifdef CONFIG_FORCEDETH_NAPI
2532 netif_receive_skb(skb
);
2539 dev
->last_rx
= jiffies
;
2540 dev
->stats
.rx_packets
++;
2541 dev
->stats
.rx_bytes
+= len
;
2546 if (unlikely(np
->get_rx
.ex
++ == np
->last_rx
.ex
))
2547 np
->get_rx
.ex
= np
->first_rx
.ex
;
2548 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2549 np
->get_rx_ctx
= np
->first_rx_ctx
;
2557 static void set_bufsize(struct net_device
*dev
)
2559 struct fe_priv
*np
= netdev_priv(dev
);
2561 if (dev
->mtu
<= ETH_DATA_LEN
)
2562 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
2564 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
2568 * nv_change_mtu: dev->change_mtu function
2569 * Called with dev_base_lock held for read.
2571 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
2573 struct fe_priv
*np
= netdev_priv(dev
);
2576 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
2582 /* return early if the buffer sizes will not change */
2583 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
2585 if (old_mtu
== new_mtu
)
2588 /* synchronized against open : rtnl_lock() held by caller */
2589 if (netif_running(dev
)) {
2590 u8 __iomem
*base
= get_hwbase(dev
);
2592 * It seems that the nic preloads valid ring entries into an
2593 * internal buffer. The procedure for flushing everything is
2594 * guessed, there is probably a simpler approach.
2595 * Changing the MTU is a rare event, it shouldn't matter.
2597 nv_disable_irq(dev
);
2598 netif_tx_lock_bh(dev
);
2599 spin_lock(&np
->lock
);
2603 /* drain rx queue */
2605 /* reinit driver view of the rx queue */
2607 if (nv_init_ring(dev
)) {
2608 if (!np
->in_shutdown
)
2609 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2611 /* reinit nic view of the rx queue */
2612 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2613 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
2614 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
2615 base
+ NvRegRingSizes
);
2617 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2620 /* restart rx engine */
2622 spin_unlock(&np
->lock
);
2623 netif_tx_unlock_bh(dev
);
2629 static void nv_copy_mac_to_hw(struct net_device
*dev
)
2631 u8 __iomem
*base
= get_hwbase(dev
);
2634 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
2635 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
2636 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
2638 writel(mac
[0], base
+ NvRegMacAddrA
);
2639 writel(mac
[1], base
+ NvRegMacAddrB
);
2643 * nv_set_mac_address: dev->set_mac_address function
2644 * Called with rtnl_lock() held.
2646 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
2648 struct fe_priv
*np
= netdev_priv(dev
);
2649 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
2651 if (!is_valid_ether_addr(macaddr
->sa_data
))
2652 return -EADDRNOTAVAIL
;
2654 /* synchronized against open : rtnl_lock() held by caller */
2655 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
2657 if (netif_running(dev
)) {
2658 netif_tx_lock_bh(dev
);
2659 spin_lock_irq(&np
->lock
);
2661 /* stop rx engine */
2664 /* set mac address */
2665 nv_copy_mac_to_hw(dev
);
2667 /* restart rx engine */
2669 spin_unlock_irq(&np
->lock
);
2670 netif_tx_unlock_bh(dev
);
2672 nv_copy_mac_to_hw(dev
);
2678 * nv_set_multicast: dev->set_multicast function
2679 * Called with netif_tx_lock held.
2681 static void nv_set_multicast(struct net_device
*dev
)
2683 struct fe_priv
*np
= netdev_priv(dev
);
2684 u8 __iomem
*base
= get_hwbase(dev
);
2687 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & NVREG_PFF_PAUSE_RX
;
2689 memset(addr
, 0, sizeof(addr
));
2690 memset(mask
, 0, sizeof(mask
));
2692 if (dev
->flags
& IFF_PROMISC
) {
2693 pff
|= NVREG_PFF_PROMISC
;
2695 pff
|= NVREG_PFF_MYADDR
;
2697 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
2701 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
2702 if (dev
->flags
& IFF_ALLMULTI
) {
2703 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
2705 struct dev_mc_list
*walk
;
2707 walk
= dev
->mc_list
;
2708 while (walk
!= NULL
) {
2710 a
= le32_to_cpu(*(__le32
*) walk
->dmi_addr
);
2711 b
= le16_to_cpu(*(__le16
*) (&walk
->dmi_addr
[4]));
2719 addr
[0] = alwaysOn
[0];
2720 addr
[1] = alwaysOn
[1];
2721 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
2722 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
2724 mask
[0] = NVREG_MCASTMASKA_NONE
;
2725 mask
[1] = NVREG_MCASTMASKB_NONE
;
2728 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
2729 pff
|= NVREG_PFF_ALWAYS
;
2730 spin_lock_irq(&np
->lock
);
2732 writel(addr
[0], base
+ NvRegMulticastAddrA
);
2733 writel(addr
[1], base
+ NvRegMulticastAddrB
);
2734 writel(mask
[0], base
+ NvRegMulticastMaskA
);
2735 writel(mask
[1], base
+ NvRegMulticastMaskB
);
2736 writel(pff
, base
+ NvRegPacketFilterFlags
);
2737 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
2740 spin_unlock_irq(&np
->lock
);
2743 static void nv_update_pause(struct net_device
*dev
, u32 pause_flags
)
2745 struct fe_priv
*np
= netdev_priv(dev
);
2746 u8 __iomem
*base
= get_hwbase(dev
);
2748 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
2750 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
2751 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
2752 if (pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) {
2753 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
2754 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2756 writel(pff
, base
+ NvRegPacketFilterFlags
);
2759 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
2760 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
2761 if (pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
2762 u32 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V1
;
2763 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
)
2764 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V2
;
2765 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)
2766 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V3
;
2767 writel(pause_enable
, base
+ NvRegTxPauseFrame
);
2768 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
2769 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2771 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
2772 writel(regmisc
, base
+ NvRegMisc1
);
2778 * nv_update_linkspeed: Setup the MAC according to the link partner
2779 * @dev: Network device to be configured
2781 * The function queries the PHY and checks if there is a link partner.
2782 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2783 * set to 10 MBit HD.
2785 * The function returns 0 if there is no link partner and 1 if there is
2786 * a good link partner.
2788 static int nv_update_linkspeed(struct net_device
*dev
)
2790 struct fe_priv
*np
= netdev_priv(dev
);
2791 u8 __iomem
*base
= get_hwbase(dev
);
2794 int adv_lpa
, adv_pause
, lpa_pause
;
2795 int newls
= np
->linkspeed
;
2796 int newdup
= np
->duplex
;
2799 u32 control_1000
, status_1000
, phyreg
, pause_flags
, txreg
;
2803 /* BMSR_LSTATUS is latched, read it twice:
2804 * we want the current value.
2806 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
2807 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
2809 if (!(mii_status
& BMSR_LSTATUS
)) {
2810 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
2812 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2818 if (np
->autoneg
== 0) {
2819 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2820 dev
->name
, np
->fixed_mode
);
2821 if (np
->fixed_mode
& LPA_100FULL
) {
2822 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2824 } else if (np
->fixed_mode
& LPA_100HALF
) {
2825 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2827 } else if (np
->fixed_mode
& LPA_10FULL
) {
2828 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2831 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2837 /* check auto negotiation is complete */
2838 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
2839 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2840 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2843 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
2847 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2848 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
2849 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2850 dev
->name
, adv
, lpa
);
2853 if (np
->gigabit
== PHY_GIGABIT
) {
2854 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
2855 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
2857 if ((control_1000
& ADVERTISE_1000FULL
) &&
2858 (status_1000
& LPA_1000FULL
)) {
2859 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
2861 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
2867 /* FIXME: handle parallel detection properly */
2868 adv_lpa
= lpa
& adv
;
2869 if (adv_lpa
& LPA_100FULL
) {
2870 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2872 } else if (adv_lpa
& LPA_100HALF
) {
2873 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2875 } else if (adv_lpa
& LPA_10FULL
) {
2876 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2878 } else if (adv_lpa
& LPA_10HALF
) {
2879 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2882 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, adv_lpa
);
2883 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2888 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
2891 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
2892 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
2894 np
->duplex
= newdup
;
2895 np
->linkspeed
= newls
;
2897 /* The transmitter and receiver must be restarted for safe update */
2898 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
) {
2899 txrxFlags
|= NV_RESTART_TX
;
2902 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
2903 txrxFlags
|= NV_RESTART_RX
;
2907 if (np
->gigabit
== PHY_GIGABIT
) {
2908 phyreg
= readl(base
+ NvRegRandomSeed
);
2909 phyreg
&= ~(0x3FF00);
2910 if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
)
2911 phyreg
|= NVREG_RNDSEED_FORCE3
;
2912 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
)
2913 phyreg
|= NVREG_RNDSEED_FORCE2
;
2914 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
2915 phyreg
|= NVREG_RNDSEED_FORCE
;
2916 writel(phyreg
, base
+ NvRegRandomSeed
);
2919 phyreg
= readl(base
+ NvRegPhyInterface
);
2920 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
2921 if (np
->duplex
== 0)
2923 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
2925 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2927 writel(phyreg
, base
+ NvRegPhyInterface
);
2929 phy_exp
= mii_rw(dev
, np
->phyaddr
, MII_EXPANSION
, MII_READ
) & EXPANSION_NWAY
; /* autoneg capable */
2930 if (phyreg
& PHY_RGMII
) {
2931 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
) {
2932 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
2934 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
)) {
2935 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_10
)
2936 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_10
;
2938 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_100
;
2940 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
2944 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
))
2945 txreg
= NVREG_TX_DEFERRAL_MII_STRETCH
;
2947 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
2949 writel(txreg
, base
+ NvRegTxDeferral
);
2951 if (np
->desc_ver
== DESC_VER_1
) {
2952 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
2954 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2955 txreg
= NVREG_TX_WM_DESC2_3_1000
;
2957 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
2959 writel(txreg
, base
+ NvRegTxWatermark
);
2961 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
2964 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
2968 /* setup pause frame */
2969 if (np
->duplex
!= 0) {
2970 if (np
->autoneg
&& np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) {
2971 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2972 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
2974 switch (adv_pause
) {
2975 case ADVERTISE_PAUSE_CAP
:
2976 if (lpa_pause
& LPA_PAUSE_CAP
) {
2977 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2978 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
2979 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2982 case ADVERTISE_PAUSE_ASYM
:
2983 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
2985 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2988 case ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
:
2989 if (lpa_pause
& LPA_PAUSE_CAP
)
2991 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2992 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
2993 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2995 if (lpa_pause
== LPA_PAUSE_ASYM
)
2997 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3002 pause_flags
= np
->pause_flags
;
3005 nv_update_pause(dev
, pause_flags
);
3007 if (txrxFlags
& NV_RESTART_TX
)
3009 if (txrxFlags
& NV_RESTART_RX
)
3015 static void nv_linkchange(struct net_device
*dev
)
3017 if (nv_update_linkspeed(dev
)) {
3018 if (!netif_carrier_ok(dev
)) {
3019 netif_carrier_on(dev
);
3020 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
3024 if (netif_carrier_ok(dev
)) {
3025 netif_carrier_off(dev
);
3026 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
3032 static void nv_link_irq(struct net_device
*dev
)
3034 u8 __iomem
*base
= get_hwbase(dev
);
3037 miistat
= readl(base
+ NvRegMIIStatus
);
3038 writel(NVREG_MIISTAT_LINKCHANGE
, base
+ NvRegMIIStatus
);
3039 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
3041 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
3043 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
3046 static irqreturn_t
nv_nic_irq(int foo
, void *data
)
3048 struct net_device
*dev
= (struct net_device
*) data
;
3049 struct fe_priv
*np
= netdev_priv(dev
);
3050 u8 __iomem
*base
= get_hwbase(dev
);
3054 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
3057 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3058 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3059 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3061 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3062 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3064 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3065 if (!(events
& np
->irqmask
))
3068 spin_lock(&np
->lock
);
3070 spin_unlock(&np
->lock
);
3072 #ifdef CONFIG_FORCEDETH_NAPI
3073 if (events
& NVREG_IRQ_RX_ALL
) {
3074 netif_rx_schedule(dev
, &np
->napi
);
3076 /* Disable furthur receive irq's */
3077 spin_lock(&np
->lock
);
3078 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3080 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3081 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3083 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3084 spin_unlock(&np
->lock
);
3087 if (nv_rx_process(dev
, RX_WORK_PER_LOOP
)) {
3088 if (unlikely(nv_alloc_rx(dev
))) {
3089 spin_lock(&np
->lock
);
3090 if (!np
->in_shutdown
)
3091 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3092 spin_unlock(&np
->lock
);
3096 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3097 spin_lock(&np
->lock
);
3099 spin_unlock(&np
->lock
);
3101 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3102 spin_lock(&np
->lock
);
3104 spin_unlock(&np
->lock
);
3105 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3107 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3108 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3111 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3112 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3115 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3116 spin_lock(&np
->lock
);
3117 /* disable interrupts on the nic */
3118 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3119 writel(0, base
+ NvRegIrqMask
);
3121 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3124 if (!np
->in_shutdown
) {
3125 np
->nic_poll_irq
= np
->irqmask
;
3126 np
->recover_error
= 1;
3127 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3129 spin_unlock(&np
->lock
);
3132 if (unlikely(i
> max_interrupt_work
)) {
3133 spin_lock(&np
->lock
);
3134 /* disable interrupts on the nic */
3135 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3136 writel(0, base
+ NvRegIrqMask
);
3138 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3141 if (!np
->in_shutdown
) {
3142 np
->nic_poll_irq
= np
->irqmask
;
3143 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3145 spin_unlock(&np
->lock
);
3146 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3151 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
3153 return IRQ_RETVAL(i
);
3157 * All _optimized functions are used to help increase performance
3158 * (reduce CPU and increase throughput). They use descripter version 3,
3159 * compiler directives, and reduce memory accesses.
3161 static irqreturn_t
nv_nic_irq_optimized(int foo
, void *data
)
3163 struct net_device
*dev
= (struct net_device
*) data
;
3164 struct fe_priv
*np
= netdev_priv(dev
);
3165 u8 __iomem
*base
= get_hwbase(dev
);
3169 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized\n", dev
->name
);
3172 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3173 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3174 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3176 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3177 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3179 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3180 if (!(events
& np
->irqmask
))
3183 spin_lock(&np
->lock
);
3184 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3185 spin_unlock(&np
->lock
);
3187 #ifdef CONFIG_FORCEDETH_NAPI
3188 if (events
& NVREG_IRQ_RX_ALL
) {
3189 netif_rx_schedule(dev
, &np
->napi
);
3191 /* Disable furthur receive irq's */
3192 spin_lock(&np
->lock
);
3193 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3195 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3196 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3198 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3199 spin_unlock(&np
->lock
);
3202 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3203 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3204 spin_lock(&np
->lock
);
3205 if (!np
->in_shutdown
)
3206 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3207 spin_unlock(&np
->lock
);
3211 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3212 spin_lock(&np
->lock
);
3214 spin_unlock(&np
->lock
);
3216 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3217 spin_lock(&np
->lock
);
3219 spin_unlock(&np
->lock
);
3220 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3222 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3223 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3226 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3227 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3230 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3231 spin_lock(&np
->lock
);
3232 /* disable interrupts on the nic */
3233 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3234 writel(0, base
+ NvRegIrqMask
);
3236 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3239 if (!np
->in_shutdown
) {
3240 np
->nic_poll_irq
= np
->irqmask
;
3241 np
->recover_error
= 1;
3242 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3244 spin_unlock(&np
->lock
);
3248 if (unlikely(i
> max_interrupt_work
)) {
3249 spin_lock(&np
->lock
);
3250 /* disable interrupts on the nic */
3251 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3252 writel(0, base
+ NvRegIrqMask
);
3254 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3257 if (!np
->in_shutdown
) {
3258 np
->nic_poll_irq
= np
->irqmask
;
3259 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3261 spin_unlock(&np
->lock
);
3262 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3267 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized completed\n", dev
->name
);
3269 return IRQ_RETVAL(i
);
3272 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
)
3274 struct net_device
*dev
= (struct net_device
*) data
;
3275 struct fe_priv
*np
= netdev_priv(dev
);
3276 u8 __iomem
*base
= get_hwbase(dev
);
3279 unsigned long flags
;
3281 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx\n", dev
->name
);
3284 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
3285 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegMSIXIrqStatus
);
3286 dprintk(KERN_DEBUG
"%s: tx irq: %08x\n", dev
->name
, events
);
3287 if (!(events
& np
->irqmask
))
3290 spin_lock_irqsave(&np
->lock
, flags
);
3291 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3292 spin_unlock_irqrestore(&np
->lock
, flags
);
3294 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3295 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3298 if (unlikely(i
> max_interrupt_work
)) {
3299 spin_lock_irqsave(&np
->lock
, flags
);
3300 /* disable interrupts on the nic */
3301 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
3304 if (!np
->in_shutdown
) {
3305 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
3306 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3308 spin_unlock_irqrestore(&np
->lock
, flags
);
3309 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev
->name
, i
);
3314 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx completed\n", dev
->name
);
3316 return IRQ_RETVAL(i
);
3319 #ifdef CONFIG_FORCEDETH_NAPI
3320 static int nv_napi_poll(struct napi_struct
*napi
, int budget
)
3322 struct fe_priv
*np
= container_of(napi
, struct fe_priv
, napi
);
3323 struct net_device
*dev
= np
->dev
;
3324 u8 __iomem
*base
= get_hwbase(dev
);
3325 unsigned long flags
;
3328 if (!nv_optimized(np
)) {
3329 pkts
= nv_rx_process(dev
, budget
);
3330 retcode
= nv_alloc_rx(dev
);
3332 pkts
= nv_rx_process_optimized(dev
, budget
);
3333 retcode
= nv_alloc_rx_optimized(dev
);
3337 spin_lock_irqsave(&np
->lock
, flags
);
3338 if (!np
->in_shutdown
)
3339 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3340 spin_unlock_irqrestore(&np
->lock
, flags
);
3343 if (pkts
< budget
) {
3344 /* re-enable receive interrupts */
3345 spin_lock_irqsave(&np
->lock
, flags
);
3347 __netif_rx_complete(dev
, napi
);
3349 np
->irqmask
|= NVREG_IRQ_RX_ALL
;
3350 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3351 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3353 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3355 spin_unlock_irqrestore(&np
->lock
, flags
);
3361 #ifdef CONFIG_FORCEDETH_NAPI
3362 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3364 struct net_device
*dev
= (struct net_device
*) data
;
3365 struct fe_priv
*np
= netdev_priv(dev
);
3366 u8 __iomem
*base
= get_hwbase(dev
);
3369 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3370 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3373 netif_rx_schedule(dev
, &np
->napi
);
3374 /* disable receive interrupts on the nic */
3375 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3381 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3383 struct net_device
*dev
= (struct net_device
*) data
;
3384 struct fe_priv
*np
= netdev_priv(dev
);
3385 u8 __iomem
*base
= get_hwbase(dev
);
3388 unsigned long flags
;
3390 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx\n", dev
->name
);
3393 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3394 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3395 dprintk(KERN_DEBUG
"%s: rx irq: %08x\n", dev
->name
, events
);
3396 if (!(events
& np
->irqmask
))
3399 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3400 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3401 spin_lock_irqsave(&np
->lock
, flags
);
3402 if (!np
->in_shutdown
)
3403 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3404 spin_unlock_irqrestore(&np
->lock
, flags
);
3408 if (unlikely(i
> max_interrupt_work
)) {
3409 spin_lock_irqsave(&np
->lock
, flags
);
3410 /* disable interrupts on the nic */
3411 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3414 if (!np
->in_shutdown
) {
3415 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
3416 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3418 spin_unlock_irqrestore(&np
->lock
, flags
);
3419 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev
->name
, i
);
3423 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx completed\n", dev
->name
);
3425 return IRQ_RETVAL(i
);
3429 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
)
3431 struct net_device
*dev
= (struct net_device
*) data
;
3432 struct fe_priv
*np
= netdev_priv(dev
);
3433 u8 __iomem
*base
= get_hwbase(dev
);
3436 unsigned long flags
;
3438 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other\n", dev
->name
);
3441 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
3442 writel(NVREG_IRQ_OTHER
, base
+ NvRegMSIXIrqStatus
);
3443 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3444 if (!(events
& np
->irqmask
))
3447 /* check tx in case we reached max loop limit in tx isr */
3448 spin_lock_irqsave(&np
->lock
, flags
);
3449 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3450 spin_unlock_irqrestore(&np
->lock
, flags
);
3452 if (events
& NVREG_IRQ_LINK
) {
3453 spin_lock_irqsave(&np
->lock
, flags
);
3455 spin_unlock_irqrestore(&np
->lock
, flags
);
3457 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
3458 spin_lock_irqsave(&np
->lock
, flags
);
3460 spin_unlock_irqrestore(&np
->lock
, flags
);
3461 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3463 if (events
& NVREG_IRQ_RECOVER_ERROR
) {
3464 spin_lock_irq(&np
->lock
);
3465 /* disable interrupts on the nic */
3466 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3469 if (!np
->in_shutdown
) {
3470 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3471 np
->recover_error
= 1;
3472 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3474 spin_unlock_irq(&np
->lock
);
3477 if (events
& (NVREG_IRQ_UNKNOWN
)) {
3478 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3481 if (unlikely(i
> max_interrupt_work
)) {
3482 spin_lock_irqsave(&np
->lock
, flags
);
3483 /* disable interrupts on the nic */
3484 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3487 if (!np
->in_shutdown
) {
3488 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3489 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3491 spin_unlock_irqrestore(&np
->lock
, flags
);
3492 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.\n", dev
->name
, i
);
3497 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other completed\n", dev
->name
);
3499 return IRQ_RETVAL(i
);
3502 static irqreturn_t
nv_nic_irq_test(int foo
, void *data
)
3504 struct net_device
*dev
= (struct net_device
*) data
;
3505 struct fe_priv
*np
= netdev_priv(dev
);
3506 u8 __iomem
*base
= get_hwbase(dev
);
3509 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test\n", dev
->name
);
3511 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3512 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3513 writel(NVREG_IRQ_TIMER
, base
+ NvRegIrqStatus
);
3515 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3516 writel(NVREG_IRQ_TIMER
, base
+ NvRegMSIXIrqStatus
);
3519 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3520 if (!(events
& NVREG_IRQ_TIMER
))
3521 return IRQ_RETVAL(0);
3523 spin_lock(&np
->lock
);
3525 spin_unlock(&np
->lock
);
3527 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test completed\n", dev
->name
);
3529 return IRQ_RETVAL(1);
3532 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
3534 u8 __iomem
*base
= get_hwbase(dev
);
3538 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3539 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3540 * the remaining 8 interrupts.
3542 for (i
= 0; i
< 8; i
++) {
3543 if ((irqmask
>> i
) & 0x1) {
3544 msixmap
|= vector
<< (i
<< 2);
3547 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
3550 for (i
= 0; i
< 8; i
++) {
3551 if ((irqmask
>> (i
+ 8)) & 0x1) {
3552 msixmap
|= vector
<< (i
<< 2);
3555 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
3558 static int nv_request_irq(struct net_device
*dev
, int intr_test
)
3560 struct fe_priv
*np
= get_nvpriv(dev
);
3561 u8 __iomem
*base
= get_hwbase(dev
);
3564 irqreturn_t (*handler
)(int foo
, void *data
);
3567 handler
= nv_nic_irq_test
;
3569 if (nv_optimized(np
))
3570 handler
= nv_nic_irq_optimized
;
3572 handler
= nv_nic_irq
;
3575 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
3576 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3577 np
->msi_x_entry
[i
].entry
= i
;
3579 if ((ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
))) == 0) {
3580 np
->msi_flags
|= NV_MSI_X_ENABLED
;
3581 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
&& !intr_test
) {
3582 /* Request irq for rx handling */
3583 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, &nv_nic_irq_rx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3584 printk(KERN_INFO
"forcedeth: request_irq failed for rx %d\n", ret
);
3585 pci_disable_msix(np
->pci_dev
);
3586 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3589 /* Request irq for tx handling */
3590 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, &nv_nic_irq_tx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3591 printk(KERN_INFO
"forcedeth: request_irq failed for tx %d\n", ret
);
3592 pci_disable_msix(np
->pci_dev
);
3593 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3596 /* Request irq for link and timer handling */
3597 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
, &nv_nic_irq_other
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3598 printk(KERN_INFO
"forcedeth: request_irq failed for link %d\n", ret
);
3599 pci_disable_msix(np
->pci_dev
);
3600 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3603 /* map interrupts to their respective vector */
3604 writel(0, base
+ NvRegMSIXMap0
);
3605 writel(0, base
+ NvRegMSIXMap1
);
3606 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
3607 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
3608 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
3610 /* Request irq for all interrupts */
3611 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3612 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3613 pci_disable_msix(np
->pci_dev
);
3614 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3618 /* map interrupts to vector 0 */
3619 writel(0, base
+ NvRegMSIXMap0
);
3620 writel(0, base
+ NvRegMSIXMap1
);
3624 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
3625 if ((ret
= pci_enable_msi(np
->pci_dev
)) == 0) {
3626 np
->msi_flags
|= NV_MSI_ENABLED
;
3627 dev
->irq
= np
->pci_dev
->irq
;
3628 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3629 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3630 pci_disable_msi(np
->pci_dev
);
3631 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3632 dev
->irq
= np
->pci_dev
->irq
;
3636 /* map interrupts to vector 0 */
3637 writel(0, base
+ NvRegMSIMap0
);
3638 writel(0, base
+ NvRegMSIMap1
);
3639 /* enable msi vector 0 */
3640 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3644 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0)
3651 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
3653 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
3658 static void nv_free_irq(struct net_device
*dev
)
3660 struct fe_priv
*np
= get_nvpriv(dev
);
3663 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
3664 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3665 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
3667 pci_disable_msix(np
->pci_dev
);
3668 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3670 free_irq(np
->pci_dev
->irq
, dev
);
3671 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3672 pci_disable_msi(np
->pci_dev
);
3673 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3678 static void nv_do_nic_poll(unsigned long data
)
3680 struct net_device
*dev
= (struct net_device
*) data
;
3681 struct fe_priv
*np
= netdev_priv(dev
);
3682 u8 __iomem
*base
= get_hwbase(dev
);
3686 * First disable irq(s) and then
3687 * reenable interrupts on the nic, we have to do this before calling
3688 * nv_nic_irq because that may decide to do otherwise
3691 if (!using_multi_irqs(dev
)) {
3692 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3693 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
3695 disable_irq_lockdep(np
->pci_dev
->irq
);
3698 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
3699 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
3700 mask
|= NVREG_IRQ_RX_ALL
;
3702 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
3703 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
3704 mask
|= NVREG_IRQ_TX_ALL
;
3706 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
3707 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
3708 mask
|= NVREG_IRQ_OTHER
;
3711 np
->nic_poll_irq
= 0;
3713 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3715 if (np
->recover_error
) {
3716 np
->recover_error
= 0;
3717 printk(KERN_INFO
"forcedeth: MAC in recoverable error state\n");
3718 if (netif_running(dev
)) {
3719 netif_tx_lock_bh(dev
);
3720 spin_lock(&np
->lock
);
3724 /* drain rx queue */
3726 /* reinit driver view of the rx queue */
3728 if (nv_init_ring(dev
)) {
3729 if (!np
->in_shutdown
)
3730 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3732 /* reinit nic view of the rx queue */
3733 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
3734 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
3735 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
3736 base
+ NvRegRingSizes
);
3738 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
3741 /* restart rx engine */
3743 spin_unlock(&np
->lock
);
3744 netif_tx_unlock_bh(dev
);
3749 writel(mask
, base
+ NvRegIrqMask
);
3752 if (!using_multi_irqs(dev
)) {
3753 if (nv_optimized(np
))
3754 nv_nic_irq_optimized(0, dev
);
3757 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3758 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
3760 enable_irq_lockdep(np
->pci_dev
->irq
);
3762 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
3763 nv_nic_irq_rx(0, dev
);
3764 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
3766 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
3767 nv_nic_irq_tx(0, dev
);
3768 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
3770 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
3771 nv_nic_irq_other(0, dev
);
3772 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
3777 #ifdef CONFIG_NET_POLL_CONTROLLER
3778 static void nv_poll_controller(struct net_device
*dev
)
3780 nv_do_nic_poll((unsigned long) dev
);
3784 static void nv_do_stats_poll(unsigned long data
)
3786 struct net_device
*dev
= (struct net_device
*) data
;
3787 struct fe_priv
*np
= netdev_priv(dev
);
3789 nv_get_hw_stats(dev
);
3791 if (!np
->in_shutdown
)
3792 mod_timer(&np
->stats_poll
,
3793 round_jiffies(jiffies
+ STATS_INTERVAL
));
3796 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
3798 struct fe_priv
*np
= netdev_priv(dev
);
3799 strcpy(info
->driver
, DRV_NAME
);
3800 strcpy(info
->version
, FORCEDETH_VERSION
);
3801 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
3804 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
3806 struct fe_priv
*np
= netdev_priv(dev
);
3807 wolinfo
->supported
= WAKE_MAGIC
;
3809 spin_lock_irq(&np
->lock
);
3811 wolinfo
->wolopts
= WAKE_MAGIC
;
3812 spin_unlock_irq(&np
->lock
);
3815 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
3817 struct fe_priv
*np
= netdev_priv(dev
);
3818 u8 __iomem
*base
= get_hwbase(dev
);
3821 if (wolinfo
->wolopts
== 0) {
3823 } else if (wolinfo
->wolopts
& WAKE_MAGIC
) {
3825 flags
= NVREG_WAKEUPFLAGS_ENABLE
;
3827 if (netif_running(dev
)) {
3828 spin_lock_irq(&np
->lock
);
3829 writel(flags
, base
+ NvRegWakeUpFlags
);
3830 spin_unlock_irq(&np
->lock
);
3835 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
3837 struct fe_priv
*np
= netdev_priv(dev
);
3840 spin_lock_irq(&np
->lock
);
3841 ecmd
->port
= PORT_MII
;
3842 if (!netif_running(dev
)) {
3843 /* We do not track link speed / duplex setting if the
3844 * interface is disabled. Force a link check */
3845 if (nv_update_linkspeed(dev
)) {
3846 if (!netif_carrier_ok(dev
))
3847 netif_carrier_on(dev
);
3849 if (netif_carrier_ok(dev
))
3850 netif_carrier_off(dev
);
3854 if (netif_carrier_ok(dev
)) {
3855 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
3856 case NVREG_LINKSPEED_10
:
3857 ecmd
->speed
= SPEED_10
;
3859 case NVREG_LINKSPEED_100
:
3860 ecmd
->speed
= SPEED_100
;
3862 case NVREG_LINKSPEED_1000
:
3863 ecmd
->speed
= SPEED_1000
;
3866 ecmd
->duplex
= DUPLEX_HALF
;
3868 ecmd
->duplex
= DUPLEX_FULL
;
3874 ecmd
->autoneg
= np
->autoneg
;
3876 ecmd
->advertising
= ADVERTISED_MII
;
3878 ecmd
->advertising
|= ADVERTISED_Autoneg
;
3879 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3880 if (adv
& ADVERTISE_10HALF
)
3881 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
3882 if (adv
& ADVERTISE_10FULL
)
3883 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
3884 if (adv
& ADVERTISE_100HALF
)
3885 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
3886 if (adv
& ADVERTISE_100FULL
)
3887 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
3888 if (np
->gigabit
== PHY_GIGABIT
) {
3889 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3890 if (adv
& ADVERTISE_1000FULL
)
3891 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
3894 ecmd
->supported
= (SUPPORTED_Autoneg
|
3895 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
3896 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
3898 if (np
->gigabit
== PHY_GIGABIT
)
3899 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
3901 ecmd
->phy_address
= np
->phyaddr
;
3902 ecmd
->transceiver
= XCVR_EXTERNAL
;
3904 /* ignore maxtxpkt, maxrxpkt for now */
3905 spin_unlock_irq(&np
->lock
);
3909 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
3911 struct fe_priv
*np
= netdev_priv(dev
);
3913 if (ecmd
->port
!= PORT_MII
)
3915 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
3917 if (ecmd
->phy_address
!= np
->phyaddr
) {
3918 /* TODO: support switching between multiple phys. Should be
3919 * trivial, but not enabled due to lack of test hardware. */
3922 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
3925 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
3926 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
3927 if (np
->gigabit
== PHY_GIGABIT
)
3928 mask
|= ADVERTISED_1000baseT_Full
;
3930 if ((ecmd
->advertising
& mask
) == 0)
3933 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
3934 /* Note: autonegotiation disable, speed 1000 intentionally
3935 * forbidden - noone should need that. */
3937 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
3939 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
3945 netif_carrier_off(dev
);
3946 if (netif_running(dev
)) {
3947 nv_disable_irq(dev
);
3948 netif_tx_lock_bh(dev
);
3949 spin_lock(&np
->lock
);
3952 spin_unlock(&np
->lock
);
3953 netif_tx_unlock_bh(dev
);
3956 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
3961 /* advertise only what has been requested */
3962 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3963 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3964 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
3965 adv
|= ADVERTISE_10HALF
;
3966 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
3967 adv
|= ADVERTISE_10FULL
;
3968 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
3969 adv
|= ADVERTISE_100HALF
;
3970 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
3971 adv
|= ADVERTISE_100FULL
;
3972 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
3973 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
3974 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3975 adv
|= ADVERTISE_PAUSE_ASYM
;
3976 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
3978 if (np
->gigabit
== PHY_GIGABIT
) {
3979 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3980 adv
&= ~ADVERTISE_1000FULL
;
3981 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
3982 adv
|= ADVERTISE_1000FULL
;
3983 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
3986 if (netif_running(dev
))
3987 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
3988 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
3989 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
3990 bmcr
|= BMCR_ANENABLE
;
3991 /* reset the phy in order for settings to stick,
3992 * and cause autoneg to start */
3993 if (phy_reset(dev
, bmcr
)) {
3994 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
3998 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
3999 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4006 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4007 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4008 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
4009 adv
|= ADVERTISE_10HALF
;
4010 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
4011 adv
|= ADVERTISE_10FULL
;
4012 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
4013 adv
|= ADVERTISE_100HALF
;
4014 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
4015 adv
|= ADVERTISE_100FULL
;
4016 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4017 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) {/* for rx we set both advertisments but disable tx pause */
4018 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4019 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4021 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
) {
4022 adv
|= ADVERTISE_PAUSE_ASYM
;
4023 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4025 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4026 np
->fixed_mode
= adv
;
4028 if (np
->gigabit
== PHY_GIGABIT
) {
4029 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4030 adv
&= ~ADVERTISE_1000FULL
;
4031 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4034 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4035 bmcr
&= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_SPEED1000
|BMCR_FULLDPLX
);
4036 if (np
->fixed_mode
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
4037 bmcr
|= BMCR_FULLDPLX
;
4038 if (np
->fixed_mode
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
4039 bmcr
|= BMCR_SPEED100
;
4040 if (np
->phy_oui
== PHY_OUI_MARVELL
) {
4041 /* reset the phy in order for forced mode settings to stick */
4042 if (phy_reset(dev
, bmcr
)) {
4043 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4047 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4048 if (netif_running(dev
)) {
4049 /* Wait a bit and then reconfigure the nic. */
4056 if (netif_running(dev
)) {
4064 #define FORCEDETH_REGS_VER 1
4066 static int nv_get_regs_len(struct net_device
*dev
)
4068 struct fe_priv
*np
= netdev_priv(dev
);
4069 return np
->register_size
;
4072 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
4074 struct fe_priv
*np
= netdev_priv(dev
);
4075 u8 __iomem
*base
= get_hwbase(dev
);
4079 regs
->version
= FORCEDETH_REGS_VER
;
4080 spin_lock_irq(&np
->lock
);
4081 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
4082 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
4083 spin_unlock_irq(&np
->lock
);
4086 static int nv_nway_reset(struct net_device
*dev
)
4088 struct fe_priv
*np
= netdev_priv(dev
);
4094 netif_carrier_off(dev
);
4095 if (netif_running(dev
)) {
4096 nv_disable_irq(dev
);
4097 netif_tx_lock_bh(dev
);
4098 spin_lock(&np
->lock
);
4101 spin_unlock(&np
->lock
);
4102 netif_tx_unlock_bh(dev
);
4103 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4106 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4107 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4108 bmcr
|= BMCR_ANENABLE
;
4109 /* reset the phy in order for settings to stick*/
4110 if (phy_reset(dev
, bmcr
)) {
4111 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4115 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4116 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4119 if (netif_running(dev
)) {
4131 static int nv_set_tso(struct net_device
*dev
, u32 value
)
4133 struct fe_priv
*np
= netdev_priv(dev
);
4135 if ((np
->driver_data
& DEV_HAS_CHECKSUM
))
4136 return ethtool_op_set_tso(dev
, value
);
4141 static void nv_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4143 struct fe_priv
*np
= netdev_priv(dev
);
4145 ring
->rx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4146 ring
->rx_mini_max_pending
= 0;
4147 ring
->rx_jumbo_max_pending
= 0;
4148 ring
->tx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4150 ring
->rx_pending
= np
->rx_ring_size
;
4151 ring
->rx_mini_pending
= 0;
4152 ring
->rx_jumbo_pending
= 0;
4153 ring
->tx_pending
= np
->tx_ring_size
;
4156 static int nv_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4158 struct fe_priv
*np
= netdev_priv(dev
);
4159 u8 __iomem
*base
= get_hwbase(dev
);
4160 u8
*rxtx_ring
, *rx_skbuff
, *tx_skbuff
;
4161 dma_addr_t ring_addr
;
4163 if (ring
->rx_pending
< RX_RING_MIN
||
4164 ring
->tx_pending
< TX_RING_MIN
||
4165 ring
->rx_mini_pending
!= 0 ||
4166 ring
->rx_jumbo_pending
!= 0 ||
4167 (np
->desc_ver
== DESC_VER_1
&&
4168 (ring
->rx_pending
> RING_MAX_DESC_VER_1
||
4169 ring
->tx_pending
> RING_MAX_DESC_VER_1
)) ||
4170 (np
->desc_ver
!= DESC_VER_1
&&
4171 (ring
->rx_pending
> RING_MAX_DESC_VER_2_3
||
4172 ring
->tx_pending
> RING_MAX_DESC_VER_2_3
))) {
4176 /* allocate new rings */
4177 if (!nv_optimized(np
)) {
4178 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4179 sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4182 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4183 sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4186 rx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->rx_pending
, GFP_KERNEL
);
4187 tx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->tx_pending
, GFP_KERNEL
);
4188 if (!rxtx_ring
|| !rx_skbuff
|| !tx_skbuff
) {
4189 /* fall back to old rings */
4190 if (!nv_optimized(np
)) {
4192 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4193 rxtx_ring
, ring_addr
);
4196 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4197 rxtx_ring
, ring_addr
);
4206 if (netif_running(dev
)) {
4207 nv_disable_irq(dev
);
4208 netif_tx_lock_bh(dev
);
4209 spin_lock(&np
->lock
);
4219 /* set new values */
4220 np
->rx_ring_size
= ring
->rx_pending
;
4221 np
->tx_ring_size
= ring
->tx_pending
;
4223 if (!nv_optimized(np
)) {
4224 np
->rx_ring
.orig
= (struct ring_desc
*)rxtx_ring
;
4225 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
4227 np
->rx_ring
.ex
= (struct ring_desc_ex
*)rxtx_ring
;
4228 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
4230 np
->rx_skb
= (struct nv_skb_map
*)rx_skbuff
;
4231 np
->tx_skb
= (struct nv_skb_map
*)tx_skbuff
;
4232 np
->ring_addr
= ring_addr
;
4234 memset(np
->rx_skb
, 0, sizeof(struct nv_skb_map
) * np
->rx_ring_size
);
4235 memset(np
->tx_skb
, 0, sizeof(struct nv_skb_map
) * np
->tx_ring_size
);
4237 if (netif_running(dev
)) {
4238 /* reinit driver view of the queues */
4240 if (nv_init_ring(dev
)) {
4241 if (!np
->in_shutdown
)
4242 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4245 /* reinit nic view of the queues */
4246 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4247 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4248 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4249 base
+ NvRegRingSizes
);
4251 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4254 /* restart engines */
4256 spin_unlock(&np
->lock
);
4257 netif_tx_unlock_bh(dev
);
4265 static void nv_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4267 struct fe_priv
*np
= netdev_priv(dev
);
4269 pause
->autoneg
= (np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) != 0;
4270 pause
->rx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) != 0;
4271 pause
->tx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) != 0;
4274 static int nv_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4276 struct fe_priv
*np
= netdev_priv(dev
);
4279 if ((!np
->autoneg
&& np
->duplex
== 0) ||
4280 (np
->autoneg
&& !pause
->autoneg
&& np
->duplex
== 0)) {
4281 printk(KERN_INFO
"%s: can not set pause settings when forced link is in half duplex.\n",
4285 if (pause
->tx_pause
&& !(np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)) {
4286 printk(KERN_INFO
"%s: hardware does not support tx pause frames.\n", dev
->name
);
4290 netif_carrier_off(dev
);
4291 if (netif_running(dev
)) {
4292 nv_disable_irq(dev
);
4293 netif_tx_lock_bh(dev
);
4294 spin_lock(&np
->lock
);
4297 spin_unlock(&np
->lock
);
4298 netif_tx_unlock_bh(dev
);
4301 np
->pause_flags
&= ~(NV_PAUSEFRAME_RX_REQ
|NV_PAUSEFRAME_TX_REQ
);
4302 if (pause
->rx_pause
)
4303 np
->pause_flags
|= NV_PAUSEFRAME_RX_REQ
;
4304 if (pause
->tx_pause
)
4305 np
->pause_flags
|= NV_PAUSEFRAME_TX_REQ
;
4307 if (np
->autoneg
&& pause
->autoneg
) {
4308 np
->pause_flags
|= NV_PAUSEFRAME_AUTONEG
;
4310 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4311 adv
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4312 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4313 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4314 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4315 adv
|= ADVERTISE_PAUSE_ASYM
;
4316 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4318 if (netif_running(dev
))
4319 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4320 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4321 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4322 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4324 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4325 if (pause
->rx_pause
)
4326 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4327 if (pause
->tx_pause
)
4328 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4330 if (!netif_running(dev
))
4331 nv_update_linkspeed(dev
);
4333 nv_update_pause(dev
, np
->pause_flags
);
4336 if (netif_running(dev
)) {
4343 static u32
nv_get_rx_csum(struct net_device
*dev
)
4345 struct fe_priv
*np
= netdev_priv(dev
);
4346 return (np
->rx_csum
) != 0;
4349 static int nv_set_rx_csum(struct net_device
*dev
, u32 data
)
4351 struct fe_priv
*np
= netdev_priv(dev
);
4352 u8 __iomem
*base
= get_hwbase(dev
);
4355 if (np
->driver_data
& DEV_HAS_CHECKSUM
) {
4358 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
4361 /* vlan is dependent on rx checksum offload */
4362 if (!(np
->vlanctl_bits
& NVREG_VLANCONTROL_ENABLE
))
4363 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_RXCHECK
;
4365 if (netif_running(dev
)) {
4366 spin_lock_irq(&np
->lock
);
4367 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4368 spin_unlock_irq(&np
->lock
);
4377 static int nv_set_tx_csum(struct net_device
*dev
, u32 data
)
4379 struct fe_priv
*np
= netdev_priv(dev
);
4381 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4382 return ethtool_op_set_tx_hw_csum(dev
, data
);
4387 static int nv_set_sg(struct net_device
*dev
, u32 data
)
4389 struct fe_priv
*np
= netdev_priv(dev
);
4391 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4392 return ethtool_op_set_sg(dev
, data
);
4397 static int nv_get_sset_count(struct net_device
*dev
, int sset
)
4399 struct fe_priv
*np
= netdev_priv(dev
);
4403 if (np
->driver_data
& DEV_HAS_TEST_EXTENDED
)
4404 return NV_TEST_COUNT_EXTENDED
;
4406 return NV_TEST_COUNT_BASE
;
4408 if (np
->driver_data
& DEV_HAS_STATISTICS_V1
)
4409 return NV_DEV_STATISTICS_V1_COUNT
;
4410 else if (np
->driver_data
& DEV_HAS_STATISTICS_V2
)
4411 return NV_DEV_STATISTICS_V2_COUNT
;
4419 static void nv_get_ethtool_stats(struct net_device
*dev
, struct ethtool_stats
*estats
, u64
*buffer
)
4421 struct fe_priv
*np
= netdev_priv(dev
);
4424 nv_do_stats_poll((unsigned long)dev
);
4426 memcpy(buffer
, &np
->estats
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(u64
));
4429 static int nv_link_test(struct net_device
*dev
)
4431 struct fe_priv
*np
= netdev_priv(dev
);
4434 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4435 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4437 /* check phy link status */
4438 if (!(mii_status
& BMSR_LSTATUS
))
4444 static int nv_register_test(struct net_device
*dev
)
4446 u8 __iomem
*base
= get_hwbase(dev
);
4448 u32 orig_read
, new_read
;
4451 orig_read
= readl(base
+ nv_registers_test
[i
].reg
);
4453 /* xor with mask to toggle bits */
4454 orig_read
^= nv_registers_test
[i
].mask
;
4456 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4458 new_read
= readl(base
+ nv_registers_test
[i
].reg
);
4460 if ((new_read
& nv_registers_test
[i
].mask
) != (orig_read
& nv_registers_test
[i
].mask
))
4463 /* restore original value */
4464 orig_read
^= nv_registers_test
[i
].mask
;
4465 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4467 } while (nv_registers_test
[++i
].reg
!= 0);
4472 static int nv_interrupt_test(struct net_device
*dev
)
4474 struct fe_priv
*np
= netdev_priv(dev
);
4475 u8 __iomem
*base
= get_hwbase(dev
);
4478 u32 save_msi_flags
, save_poll_interval
= 0;
4480 if (netif_running(dev
)) {
4481 /* free current irq */
4483 save_poll_interval
= readl(base
+NvRegPollingInterval
);
4486 /* flag to test interrupt handler */
4489 /* setup test irq */
4490 save_msi_flags
= np
->msi_flags
;
4491 np
->msi_flags
&= ~NV_MSI_X_VECTORS_MASK
;
4492 np
->msi_flags
|= 0x001; /* setup 1 vector */
4493 if (nv_request_irq(dev
, 1))
4496 /* setup timer interrupt */
4497 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4498 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4500 nv_enable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4502 /* wait for at least one interrupt */
4505 spin_lock_irq(&np
->lock
);
4507 /* flag should be set within ISR */
4508 testcnt
= np
->intr_test
;
4512 nv_disable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4513 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4514 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4516 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4518 spin_unlock_irq(&np
->lock
);
4522 np
->msi_flags
= save_msi_flags
;
4524 if (netif_running(dev
)) {
4525 writel(save_poll_interval
, base
+ NvRegPollingInterval
);
4526 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4527 /* restore original irq */
4528 if (nv_request_irq(dev
, 0))
4535 static int nv_loopback_test(struct net_device
*dev
)
4537 struct fe_priv
*np
= netdev_priv(dev
);
4538 u8 __iomem
*base
= get_hwbase(dev
);
4539 struct sk_buff
*tx_skb
, *rx_skb
;
4540 dma_addr_t test_dma_addr
;
4541 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
4543 int len
, i
, pkt_len
;
4545 u32 filter_flags
= 0;
4546 u32 misc1_flags
= 0;
4549 if (netif_running(dev
)) {
4550 nv_disable_irq(dev
);
4551 filter_flags
= readl(base
+ NvRegPacketFilterFlags
);
4552 misc1_flags
= readl(base
+ NvRegMisc1
);
4557 /* reinit driver view of the rx queue */
4561 /* setup hardware for loopback */
4562 writel(NVREG_MISC1_FORCE
, base
+ NvRegMisc1
);
4563 writel(NVREG_PFF_ALWAYS
| NVREG_PFF_LOOPBACK
, base
+ NvRegPacketFilterFlags
);
4565 /* reinit nic view of the rx queue */
4566 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4567 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4568 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4569 base
+ NvRegRingSizes
);
4572 /* restart rx engine */
4575 /* setup packet for tx */
4576 pkt_len
= ETH_DATA_LEN
;
4577 tx_skb
= dev_alloc_skb(pkt_len
);
4579 printk(KERN_ERR
"dev_alloc_skb() failed during loopback test"
4580 " of %s\n", dev
->name
);
4584 test_dma_addr
= pci_map_single(np
->pci_dev
, tx_skb
->data
,
4585 skb_tailroom(tx_skb
),
4586 PCI_DMA_FROMDEVICE
);
4587 pkt_data
= skb_put(tx_skb
, pkt_len
);
4588 for (i
= 0; i
< pkt_len
; i
++)
4589 pkt_data
[i
] = (u8
)(i
& 0xff);
4591 if (!nv_optimized(np
)) {
4592 np
->tx_ring
.orig
[0].buf
= cpu_to_le32(test_dma_addr
);
4593 np
->tx_ring
.orig
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4595 np
->tx_ring
.ex
[0].bufhigh
= cpu_to_le32(dma_high(test_dma_addr
));
4596 np
->tx_ring
.ex
[0].buflow
= cpu_to_le32(dma_low(test_dma_addr
));
4597 np
->tx_ring
.ex
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4599 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4600 pci_push(get_hwbase(dev
));
4604 /* check for rx of the packet */
4605 if (!nv_optimized(np
)) {
4606 flags
= le32_to_cpu(np
->rx_ring
.orig
[0].flaglen
);
4607 len
= nv_descr_getlength(&np
->rx_ring
.orig
[0], np
->desc_ver
);
4610 flags
= le32_to_cpu(np
->rx_ring
.ex
[0].flaglen
);
4611 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[0], np
->desc_ver
);
4614 if (flags
& NV_RX_AVAIL
) {
4616 } else if (np
->desc_ver
== DESC_VER_1
) {
4617 if (flags
& NV_RX_ERROR
)
4620 if (flags
& NV_RX2_ERROR
) {
4626 if (len
!= pkt_len
) {
4628 dprintk(KERN_DEBUG
"%s: loopback len mismatch %d vs %d\n",
4629 dev
->name
, len
, pkt_len
);
4631 rx_skb
= np
->rx_skb
[0].skb
;
4632 for (i
= 0; i
< pkt_len
; i
++) {
4633 if (rx_skb
->data
[i
] != (u8
)(i
& 0xff)) {
4635 dprintk(KERN_DEBUG
"%s: loopback pattern check failed on byte %d\n",
4642 dprintk(KERN_DEBUG
"%s: loopback - did not receive test packet\n", dev
->name
);
4645 pci_unmap_page(np
->pci_dev
, test_dma_addr
,
4646 (skb_end_pointer(tx_skb
) - tx_skb
->data
),
4648 dev_kfree_skb_any(tx_skb
);
4653 /* drain rx queue */
4656 if (netif_running(dev
)) {
4657 writel(misc1_flags
, base
+ NvRegMisc1
);
4658 writel(filter_flags
, base
+ NvRegPacketFilterFlags
);
4665 static void nv_self_test(struct net_device
*dev
, struct ethtool_test
*test
, u64
*buffer
)
4667 struct fe_priv
*np
= netdev_priv(dev
);
4668 u8 __iomem
*base
= get_hwbase(dev
);
4670 memset(buffer
, 0, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(u64
));
4672 if (!nv_link_test(dev
)) {
4673 test
->flags
|= ETH_TEST_FL_FAILED
;
4677 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
4678 if (netif_running(dev
)) {
4679 netif_stop_queue(dev
);
4680 #ifdef CONFIG_FORCEDETH_NAPI
4681 napi_disable(&np
->napi
);
4683 netif_tx_lock_bh(dev
);
4684 spin_lock_irq(&np
->lock
);
4685 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4686 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
4687 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4689 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4694 /* drain rx queue */
4696 spin_unlock_irq(&np
->lock
);
4697 netif_tx_unlock_bh(dev
);
4700 if (!nv_register_test(dev
)) {
4701 test
->flags
|= ETH_TEST_FL_FAILED
;
4705 result
= nv_interrupt_test(dev
);
4707 test
->flags
|= ETH_TEST_FL_FAILED
;
4715 if (!nv_loopback_test(dev
)) {
4716 test
->flags
|= ETH_TEST_FL_FAILED
;
4720 if (netif_running(dev
)) {
4721 /* reinit driver view of the rx queue */
4723 if (nv_init_ring(dev
)) {
4724 if (!np
->in_shutdown
)
4725 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4727 /* reinit nic view of the rx queue */
4728 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4729 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4730 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4731 base
+ NvRegRingSizes
);
4733 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4735 /* restart rx engine */
4737 netif_start_queue(dev
);
4738 #ifdef CONFIG_FORCEDETH_NAPI
4739 napi_enable(&np
->napi
);
4741 nv_enable_hw_interrupts(dev
, np
->irqmask
);
4746 static void nv_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buffer
)
4748 switch (stringset
) {
4750 memcpy(buffer
, &nv_estats_str
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(struct nv_ethtool_str
));
4753 memcpy(buffer
, &nv_etests_str
, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(struct nv_ethtool_str
));
4758 static const struct ethtool_ops ops
= {
4759 .get_drvinfo
= nv_get_drvinfo
,
4760 .get_link
= ethtool_op_get_link
,
4761 .get_wol
= nv_get_wol
,
4762 .set_wol
= nv_set_wol
,
4763 .get_settings
= nv_get_settings
,
4764 .set_settings
= nv_set_settings
,
4765 .get_regs_len
= nv_get_regs_len
,
4766 .get_regs
= nv_get_regs
,
4767 .nway_reset
= nv_nway_reset
,
4768 .set_tso
= nv_set_tso
,
4769 .get_ringparam
= nv_get_ringparam
,
4770 .set_ringparam
= nv_set_ringparam
,
4771 .get_pauseparam
= nv_get_pauseparam
,
4772 .set_pauseparam
= nv_set_pauseparam
,
4773 .get_rx_csum
= nv_get_rx_csum
,
4774 .set_rx_csum
= nv_set_rx_csum
,
4775 .set_tx_csum
= nv_set_tx_csum
,
4776 .set_sg
= nv_set_sg
,
4777 .get_strings
= nv_get_strings
,
4778 .get_ethtool_stats
= nv_get_ethtool_stats
,
4779 .get_sset_count
= nv_get_sset_count
,
4780 .self_test
= nv_self_test
,
4783 static void nv_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
4785 struct fe_priv
*np
= get_nvpriv(dev
);
4787 spin_lock_irq(&np
->lock
);
4789 /* save vlan group */
4793 /* enable vlan on MAC */
4794 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
| NVREG_TXRXCTL_VLANINS
;
4796 /* disable vlan on MAC */
4797 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
4798 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
4801 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4803 spin_unlock_irq(&np
->lock
);
4806 /* The mgmt unit and driver use a semaphore to access the phy during init */
4807 static int nv_mgmt_acquire_sema(struct net_device
*dev
)
4809 u8 __iomem
*base
= get_hwbase(dev
);
4811 u32 tx_ctrl
, mgmt_sema
;
4813 for (i
= 0; i
< 10; i
++) {
4814 mgmt_sema
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_SEMA_MASK
;
4815 if (mgmt_sema
== NVREG_XMITCTL_MGMT_SEMA_FREE
)
4820 if (mgmt_sema
!= NVREG_XMITCTL_MGMT_SEMA_FREE
)
4823 for (i
= 0; i
< 2; i
++) {
4824 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
4825 tx_ctrl
|= NVREG_XMITCTL_HOST_SEMA_ACQ
;
4826 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
4828 /* verify that semaphore was acquired */
4829 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
4830 if (((tx_ctrl
& NVREG_XMITCTL_HOST_SEMA_MASK
) == NVREG_XMITCTL_HOST_SEMA_ACQ
) &&
4831 ((tx_ctrl
& NVREG_XMITCTL_MGMT_SEMA_MASK
) == NVREG_XMITCTL_MGMT_SEMA_FREE
))
4840 static int nv_open(struct net_device
*dev
)
4842 struct fe_priv
*np
= netdev_priv(dev
);
4843 u8 __iomem
*base
= get_hwbase(dev
);
4847 dprintk(KERN_DEBUG
"nv_open: begin\n");
4849 /* erase previous misconfiguration */
4850 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
4852 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
4853 writel(0, base
+ NvRegMulticastAddrB
);
4854 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
4855 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
4856 writel(0, base
+ NvRegPacketFilterFlags
);
4858 writel(0, base
+ NvRegTransmitterControl
);
4859 writel(0, base
+ NvRegReceiverControl
);
4861 writel(0, base
+ NvRegAdapterControl
);
4863 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
4864 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
4866 /* initialize descriptor rings */
4868 oom
= nv_init_ring(dev
);
4870 writel(0, base
+ NvRegLinkSpeed
);
4871 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
4873 writel(0, base
+ NvRegUnknownSetupReg6
);
4875 np
->in_shutdown
= 0;
4878 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4879 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4880 base
+ NvRegRingSizes
);
4882 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
4883 if (np
->desc_ver
== DESC_VER_1
)
4884 writel(NVREG_TX_WM_DESC1_DEFAULT
, base
+ NvRegTxWatermark
);
4886 writel(NVREG_TX_WM_DESC2_3_DEFAULT
, base
+ NvRegTxWatermark
);
4887 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4888 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
4890 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4891 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
4892 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
4893 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
4895 writel(0, base
+ NvRegMIIMask
);
4896 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4897 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
4899 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
4900 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
4901 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
4902 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4904 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
4905 get_random_bytes(&i
, sizeof(i
));
4906 writel(NVREG_RNDSEED_FORCE
| (i
&NVREG_RNDSEED_MASK
), base
+ NvRegRandomSeed
);
4907 writel(NVREG_TX_DEFERRAL_DEFAULT
, base
+ NvRegTxDeferral
);
4908 writel(NVREG_RX_DEFERRAL_DEFAULT
, base
+ NvRegRxDeferral
);
4909 if (poll_interval
== -1) {
4910 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
4911 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
4913 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4916 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
4917 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4918 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
4919 base
+ NvRegAdapterControl
);
4920 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
4921 writel(NVREG_MII_LINKCHANGE
, base
+ NvRegMIIMask
);
4923 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
4925 i
= readl(base
+ NvRegPowerState
);
4926 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
4927 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
4931 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
4933 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4935 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
4936 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4939 if (nv_request_irq(dev
, 0)) {
4943 /* ask for interrupts */
4944 nv_enable_hw_interrupts(dev
, np
->irqmask
);
4946 spin_lock_irq(&np
->lock
);
4947 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
4948 writel(0, base
+ NvRegMulticastAddrB
);
4949 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
4950 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
4951 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
4952 /* One manual link speed update: Interrupts are enabled, future link
4953 * speed changes cause interrupts and are handled by nv_link_irq().
4957 miistat
= readl(base
+ NvRegMIIStatus
);
4958 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
4959 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
4961 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4964 ret
= nv_update_linkspeed(dev
);
4966 netif_start_queue(dev
);
4967 #ifdef CONFIG_FORCEDETH_NAPI
4968 napi_enable(&np
->napi
);
4972 netif_carrier_on(dev
);
4974 printk(KERN_INFO
"%s: no link during initialization.\n", dev
->name
);
4975 netif_carrier_off(dev
);
4978 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4980 /* start statistics timer */
4981 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
))
4982 mod_timer(&np
->stats_poll
,
4983 round_jiffies(jiffies
+ STATS_INTERVAL
));
4985 spin_unlock_irq(&np
->lock
);
4993 static int nv_close(struct net_device
*dev
)
4995 struct fe_priv
*np
= netdev_priv(dev
);
4998 spin_lock_irq(&np
->lock
);
4999 np
->in_shutdown
= 1;
5000 spin_unlock_irq(&np
->lock
);
5001 #ifdef CONFIG_FORCEDETH_NAPI
5002 napi_disable(&np
->napi
);
5004 synchronize_irq(np
->pci_dev
->irq
);
5006 del_timer_sync(&np
->oom_kick
);
5007 del_timer_sync(&np
->nic_poll
);
5008 del_timer_sync(&np
->stats_poll
);
5010 netif_stop_queue(dev
);
5011 spin_lock_irq(&np
->lock
);
5015 /* disable interrupts on the nic or we will lock up */
5016 base
= get_hwbase(dev
);
5017 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5019 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
5021 spin_unlock_irq(&np
->lock
);
5027 if (np
->wolenabled
) {
5028 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5032 /* FIXME: power down nic */
5037 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
5039 struct net_device
*dev
;
5044 u32 powerstate
, txreg
;
5045 u32 phystate_orig
= 0, phystate
;
5046 int phyinitialized
= 0;
5047 DECLARE_MAC_BUF(mac
);
5048 static int printed_version
;
5050 if (!printed_version
++)
5051 printk(KERN_INFO
"%s: Reverse Engineered nForce ethernet"
5052 " driver. Version %s.\n", DRV_NAME
, FORCEDETH_VERSION
);
5054 dev
= alloc_etherdev(sizeof(struct fe_priv
));
5059 np
= netdev_priv(dev
);
5061 np
->pci_dev
= pci_dev
;
5062 spin_lock_init(&np
->lock
);
5063 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
5065 init_timer(&np
->oom_kick
);
5066 np
->oom_kick
.data
= (unsigned long) dev
;
5067 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
5068 init_timer(&np
->nic_poll
);
5069 np
->nic_poll
.data
= (unsigned long) dev
;
5070 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
5071 init_timer(&np
->stats_poll
);
5072 np
->stats_poll
.data
= (unsigned long) dev
;
5073 np
->stats_poll
.function
= &nv_do_stats_poll
; /* timer handler */
5075 err
= pci_enable_device(pci_dev
);
5079 pci_set_master(pci_dev
);
5081 err
= pci_request_regions(pci_dev
, DRV_NAME
);
5085 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V2
))
5086 np
->register_size
= NV_PCI_REGSZ_VER3
;
5087 else if (id
->driver_data
& DEV_HAS_STATISTICS_V1
)
5088 np
->register_size
= NV_PCI_REGSZ_VER2
;
5090 np
->register_size
= NV_PCI_REGSZ_VER1
;
5094 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5095 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
5096 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
5097 pci_resource_len(pci_dev
, i
),
5098 pci_resource_flags(pci_dev
, i
));
5099 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
5100 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
5101 addr
= pci_resource_start(pci_dev
, i
);
5105 if (i
== DEVICE_COUNT_RESOURCE
) {
5106 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5107 "Couldn't find register window\n");
5111 /* copy of driver data */
5112 np
->driver_data
= id
->driver_data
;
5114 /* handle different descriptor versions */
5115 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
5116 /* packet format 3: supports 40-bit addressing */
5117 np
->desc_ver
= DESC_VER_3
;
5118 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
5120 if (pci_set_dma_mask(pci_dev
, DMA_39BIT_MASK
))
5121 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5122 "64-bit DMA failed, using 32-bit addressing\n");
5124 dev
->features
|= NETIF_F_HIGHDMA
;
5125 if (pci_set_consistent_dma_mask(pci_dev
, DMA_39BIT_MASK
)) {
5126 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5127 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5130 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
5131 /* packet format 2: supports jumbo frames */
5132 np
->desc_ver
= DESC_VER_2
;
5133 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
5135 /* original packet format */
5136 np
->desc_ver
= DESC_VER_1
;
5137 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
5140 np
->pkt_limit
= NV_PKTLIMIT_1
;
5141 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
5142 np
->pkt_limit
= NV_PKTLIMIT_2
;
5144 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
5146 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
5147 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
5148 dev
->features
|= NETIF_F_TSO
;
5151 np
->vlanctl_bits
= 0;
5152 if (id
->driver_data
& DEV_HAS_VLAN
) {
5153 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
5154 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
;
5155 dev
->vlan_rx_register
= nv_vlan_rx_register
;
5159 if ((id
->driver_data
& DEV_HAS_MSI
) && msi
) {
5160 np
->msi_flags
|= NV_MSI_CAPABLE
;
5162 if ((id
->driver_data
& DEV_HAS_MSI_X
) && msix
) {
5163 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
5166 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
| NV_PAUSEFRAME_RX_REQ
| NV_PAUSEFRAME_AUTONEG
;
5167 if ((id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V1
) ||
5168 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
) ||
5169 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)) {
5170 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
| NV_PAUSEFRAME_TX_REQ
;
5175 np
->base
= ioremap(addr
, np
->register_size
);
5178 dev
->base_addr
= (unsigned long)np
->base
;
5180 dev
->irq
= pci_dev
->irq
;
5182 np
->rx_ring_size
= RX_RING_DEFAULT
;
5183 np
->tx_ring_size
= TX_RING_DEFAULT
;
5185 if (!nv_optimized(np
)) {
5186 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
5187 sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5189 if (!np
->rx_ring
.orig
)
5191 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
5193 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
5194 sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5196 if (!np
->rx_ring
.ex
)
5198 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
5200 np
->rx_skb
= kcalloc(np
->rx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5201 np
->tx_skb
= kcalloc(np
->tx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5202 if (!np
->rx_skb
|| !np
->tx_skb
)
5205 dev
->open
= nv_open
;
5206 dev
->stop
= nv_close
;
5208 if (!nv_optimized(np
))
5209 dev
->hard_start_xmit
= nv_start_xmit
;
5211 dev
->hard_start_xmit
= nv_start_xmit_optimized
;
5212 dev
->get_stats
= nv_get_stats
;
5213 dev
->change_mtu
= nv_change_mtu
;
5214 dev
->set_mac_address
= nv_set_mac_address
;
5215 dev
->set_multicast_list
= nv_set_multicast
;
5216 #ifdef CONFIG_NET_POLL_CONTROLLER
5217 dev
->poll_controller
= nv_poll_controller
;
5219 #ifdef CONFIG_FORCEDETH_NAPI
5220 netif_napi_add(dev
, &np
->napi
, nv_napi_poll
, RX_WORK_PER_LOOP
);
5222 SET_ETHTOOL_OPS(dev
, &ops
);
5223 dev
->tx_timeout
= nv_tx_timeout
;
5224 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
5226 pci_set_drvdata(pci_dev
, dev
);
5228 /* read the mac address */
5229 base
= get_hwbase(dev
);
5230 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
5231 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
5233 /* check the workaround bit for correct mac address order */
5234 txreg
= readl(base
+ NvRegTransmitPoll
);
5235 if (id
->driver_data
& DEV_HAS_CORRECT_MACADDR
) {
5236 /* mac address is already in correct order */
5237 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5238 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5239 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5240 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5241 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5242 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5243 } else if (txreg
& NVREG_TRANSMITPOLL_MAC_ADDR_REV
) {
5244 /* mac address is already in correct order */
5245 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5246 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5247 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5248 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5249 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5250 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5252 * Set orig mac address back to the reversed version.
5253 * This flag will be cleared during low power transition.
5254 * Therefore, we should always put back the reversed address.
5256 np
->orig_mac
[0] = (dev
->dev_addr
[5] << 0) + (dev
->dev_addr
[4] << 8) +
5257 (dev
->dev_addr
[3] << 16) + (dev
->dev_addr
[2] << 24);
5258 np
->orig_mac
[1] = (dev
->dev_addr
[1] << 0) + (dev
->dev_addr
[0] << 8);
5260 /* need to reverse mac address to correct order */
5261 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
5262 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
5263 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
5264 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
5265 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
5266 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
5267 writel(txreg
|NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5269 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
5271 if (!is_valid_ether_addr(dev
->perm_addr
)) {
5273 * Bad mac address. At least one bios sets the mac address
5274 * to 01:23:45:67:89:ab
5276 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5277 "Invalid Mac address detected: %s\n",
5278 print_mac(mac
, dev
->dev_addr
));
5279 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5280 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5281 dev
->dev_addr
[0] = 0x00;
5282 dev
->dev_addr
[1] = 0x00;
5283 dev
->dev_addr
[2] = 0x6c;
5284 get_random_bytes(&dev
->dev_addr
[3], 3);
5287 dprintk(KERN_DEBUG
"%s: MAC Address %s\n",
5288 pci_name(pci_dev
), print_mac(mac
, dev
->dev_addr
));
5290 /* set mac address */
5291 nv_copy_mac_to_hw(dev
);
5294 writel(0, base
+ NvRegWakeUpFlags
);
5297 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
5299 /* take phy and nic out of low power mode */
5300 powerstate
= readl(base
+ NvRegPowerState2
);
5301 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
5302 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_12
||
5303 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_13
) &&
5304 pci_dev
->revision
>= 0xA3)
5305 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
5306 writel(powerstate
, base
+ NvRegPowerState2
);
5309 if (np
->desc_ver
== DESC_VER_1
) {
5310 np
->tx_flags
= NV_TX_VALID
;
5312 np
->tx_flags
= NV_TX2_VALID
;
5314 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
) {
5315 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5316 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5317 np
->msi_flags
|= 0x0003;
5319 np
->irqmask
= NVREG_IRQMASK_CPU
;
5320 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5321 np
->msi_flags
|= 0x0001;
5324 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
5325 np
->irqmask
|= NVREG_IRQ_TIMER
;
5326 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
5327 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
5328 np
->need_linktimer
= 1;
5329 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
5331 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
5332 np
->need_linktimer
= 0;
5335 /* Limit the number of tx's outstanding for hw bug */
5336 if (id
->driver_data
& DEV_NEED_TX_LIMIT
) {
5338 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
5339 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
5340 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
5341 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
5342 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
5343 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
5344 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
5345 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) &&
5346 pci_dev
->revision
>= 0xA2)
5350 /* clear phy state and temporarily halt phy interrupts */
5351 writel(0, base
+ NvRegMIIMask
);
5352 phystate
= readl(base
+ NvRegAdapterControl
);
5353 if (phystate
& NVREG_ADAPTCTL_RUNNING
) {
5355 phystate
&= ~NVREG_ADAPTCTL_RUNNING
;
5356 writel(phystate
, base
+ NvRegAdapterControl
);
5358 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5360 if (id
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5361 /* management unit running on the mac? */
5362 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_PHY_INIT
) {
5363 np
->mac_in_use
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_ST
;
5364 dprintk(KERN_INFO
"%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev
), np
->mac_in_use
);
5365 if (nv_mgmt_acquire_sema(dev
)) {
5366 /* management unit setup the phy already? */
5367 if ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_MASK
) ==
5368 NVREG_XMITCTL_SYNC_PHY_INIT
) {
5369 /* phy is inited by mgmt unit */
5371 dprintk(KERN_INFO
"%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev
));
5373 /* we need to init the phy */
5379 /* find a suitable phy */
5380 for (i
= 1; i
<= 32; i
++) {
5382 int phyaddr
= i
& 0x1F;
5384 spin_lock_irq(&np
->lock
);
5385 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
5386 spin_unlock_irq(&np
->lock
);
5387 if (id1
< 0 || id1
== 0xffff)
5389 spin_lock_irq(&np
->lock
);
5390 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
5391 spin_unlock_irq(&np
->lock
);
5392 if (id2
< 0 || id2
== 0xffff)
5395 np
->phy_model
= id2
& PHYID2_MODEL_MASK
;
5396 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
5397 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
5398 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
5399 pci_name(pci_dev
), id1
, id2
, phyaddr
);
5400 np
->phyaddr
= phyaddr
;
5401 np
->phy_oui
= id1
| id2
;
5405 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5406 "open: Could not find a valid PHY.\n");
5410 if (!phyinitialized
) {
5414 /* see if it is a gigabit phy */
5415 u32 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
5416 if (mii_status
& PHY_GIGABIT
) {
5417 np
->gigabit
= PHY_GIGABIT
;
5421 /* set default link speed settings */
5422 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
5426 err
= register_netdev(dev
);
5428 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5429 "unable to register netdev: %d\n", err
);
5433 dev_printk(KERN_INFO
, &pci_dev
->dev
, "ifname %s, PHY OUI 0x%x @ %d, "
5434 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5445 dev_printk(KERN_INFO
, &pci_dev
->dev
, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5446 dev
->features
& NETIF_F_HIGHDMA
? "highdma " : "",
5447 dev
->features
& (NETIF_F_HW_CSUM
| NETIF_F_SG
) ?
5449 dev
->features
& (NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
) ?
5451 id
->driver_data
& DEV_HAS_POWER_CNTRL
? "pwrctl " : "",
5452 id
->driver_data
& DEV_HAS_MGMT_UNIT
? "mgmt " : "",
5453 id
->driver_data
& DEV_NEED_TIMERIRQ
? "timirq " : "",
5454 np
->gigabit
== PHY_GIGABIT
? "gbit " : "",
5455 np
->need_linktimer
? "lnktim " : "",
5456 np
->msi_flags
& NV_MSI_CAPABLE
? "msi " : "",
5457 np
->msi_flags
& NV_MSI_X_CAPABLE
? "msi-x " : "",
5464 writel(phystate
|NVREG_ADAPTCTL_RUNNING
, base
+ NvRegAdapterControl
);
5465 pci_set_drvdata(pci_dev
, NULL
);
5469 iounmap(get_hwbase(dev
));
5471 pci_release_regions(pci_dev
);
5473 pci_disable_device(pci_dev
);
5480 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
5482 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
5483 struct fe_priv
*np
= netdev_priv(dev
);
5484 u8 __iomem
*base
= get_hwbase(dev
);
5486 unregister_netdev(dev
);
5488 /* special op: write back the misordered MAC address - otherwise
5489 * the next nv_probe would see a wrong address.
5491 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
5492 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
5493 writel(readl(base
+ NvRegTransmitPoll
) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
5494 base
+ NvRegTransmitPoll
);
5496 /* free all structures */
5498 iounmap(get_hwbase(dev
));
5499 pci_release_regions(pci_dev
);
5500 pci_disable_device(pci_dev
);
5502 pci_set_drvdata(pci_dev
, NULL
);
5506 static int nv_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5508 struct net_device
*dev
= pci_get_drvdata(pdev
);
5509 struct fe_priv
*np
= netdev_priv(dev
);
5511 if (!netif_running(dev
))
5514 netif_device_detach(dev
);
5519 pci_save_state(pdev
);
5520 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), np
->wolenabled
);
5521 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
5526 static int nv_resume(struct pci_dev
*pdev
)
5528 struct net_device
*dev
= pci_get_drvdata(pdev
);
5529 u8 __iomem
*base
= get_hwbase(dev
);
5533 if (!netif_running(dev
))
5536 netif_device_attach(dev
);
5538 pci_set_power_state(pdev
, PCI_D0
);
5539 pci_restore_state(pdev
);
5540 pci_enable_wake(pdev
, PCI_D0
, 0);
5542 /* restore mac address reverse flag */
5543 txreg
= readl(base
+ NvRegTransmitPoll
);
5544 txreg
|= NVREG_TRANSMITPOLL_MAC_ADDR_REV
;
5545 writel(txreg
, base
+ NvRegTransmitPoll
);
5552 #define nv_suspend NULL
5553 #define nv_resume NULL
5554 #endif /* CONFIG_PM */
5556 static struct pci_device_id pci_tbl
[] = {
5557 { /* nForce Ethernet Controller */
5558 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
5559 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5561 { /* nForce2 Ethernet Controller */
5562 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
5563 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5565 { /* nForce3 Ethernet Controller */
5566 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
5567 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5569 { /* nForce3 Ethernet Controller */
5570 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
5571 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5573 { /* nForce3 Ethernet Controller */
5574 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
5575 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5577 { /* nForce3 Ethernet Controller */
5578 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
5579 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5581 { /* nForce3 Ethernet Controller */
5582 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
5583 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5585 { /* CK804 Ethernet Controller */
5586 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
5587 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
5589 { /* CK804 Ethernet Controller */
5590 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
5591 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
5593 { /* MCP04 Ethernet Controller */
5594 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
5595 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
5597 { /* MCP04 Ethernet Controller */
5598 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
5599 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
5601 { /* MCP51 Ethernet Controller */
5602 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
5603 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
5605 { /* MCP51 Ethernet Controller */
5606 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
5607 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
5609 { /* MCP55 Ethernet Controller */
5610 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
5611 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
5613 { /* MCP55 Ethernet Controller */
5614 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
5615 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
5617 { /* MCP61 Ethernet Controller */
5618 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_16
),
5619 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5621 { /* MCP61 Ethernet Controller */
5622 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_17
),
5623 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5625 { /* MCP61 Ethernet Controller */
5626 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_18
),
5627 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5629 { /* MCP61 Ethernet Controller */
5630 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_19
),
5631 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5633 { /* MCP65 Ethernet Controller */
5634 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_20
),
5635 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_NEED_TX_LIMIT
,
5637 { /* MCP65 Ethernet Controller */
5638 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_21
),
5639 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
,
5641 { /* MCP65 Ethernet Controller */
5642 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_22
),
5643 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
,
5645 { /* MCP65 Ethernet Controller */
5646 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_23
),
5647 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
,
5649 { /* MCP67 Ethernet Controller */
5650 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_24
),
5651 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5653 { /* MCP67 Ethernet Controller */
5654 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_25
),
5655 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5657 { /* MCP67 Ethernet Controller */
5658 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_26
),
5659 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5661 { /* MCP67 Ethernet Controller */
5662 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_27
),
5663 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5665 { /* MCP73 Ethernet Controller */
5666 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_28
),
5667 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
,
5669 { /* MCP73 Ethernet Controller */
5670 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_29
),
5671 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
,
5673 { /* MCP73 Ethernet Controller */
5674 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_30
),
5675 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
,
5677 { /* MCP73 Ethernet Controller */
5678 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_31
),
5679 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
,
5681 { /* MCP77 Ethernet Controller */
5682 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_32
),
5683 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5685 { /* MCP77 Ethernet Controller */
5686 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_33
),
5687 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5689 { /* MCP77 Ethernet Controller */
5690 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_34
),
5691 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5693 { /* MCP77 Ethernet Controller */
5694 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_35
),
5695 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5697 { /* MCP79 Ethernet Controller */
5698 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_36
),
5699 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5701 { /* MCP79 Ethernet Controller */
5702 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_37
),
5703 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5705 { /* MCP79 Ethernet Controller */
5706 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_38
),
5707 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5709 { /* MCP79 Ethernet Controller */
5710 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_39
),
5711 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
,
5716 static struct pci_driver driver
= {
5718 .id_table
= pci_tbl
,
5720 .remove
= __devexit_p(nv_remove
),
5721 .suspend
= nv_suspend
,
5722 .resume
= nv_resume
,
5725 static int __init
init_nic(void)
5727 return pci_register_driver(&driver
);
5730 static void __exit
exit_nic(void)
5732 pci_unregister_driver(&driver
);
5735 module_param(max_interrupt_work
, int, 0);
5736 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
5737 module_param(optimization_mode
, int, 0);
5738 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5739 module_param(poll_interval
, int, 0);
5740 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5741 module_param(msi
, int, 0);
5742 MODULE_PARM_DESC(msi
, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5743 module_param(msix
, int, 0);
5744 MODULE_PARM_DESC(msix
, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5745 module_param(dma_64bit
, int, 0);
5746 MODULE_PARM_DESC(dma_64bit
, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5748 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5749 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5750 MODULE_LICENSE("GPL");
5752 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
5754 module_init(init_nic
);
5755 module_exit(exit_nic
);