tg3: Add function status reporting
[deliverable/linux.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
b86fb2cf 7 * Copyright (C) 2005-2011 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
6867c843 21#include <linux/stringify.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4 28#include <linux/init.h>
a6b7a407 29#include <linux/interrupt.h>
1da177e4
LT
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
3110f5f5 36#include <linux/mdio.h>
1da177e4 37#include <linux/mii.h>
158d7abd 38#include <linux/phy.h>
a9daf367 39#include <linux/brcmphy.h>
1da177e4
LT
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
61487480 44#include <linux/prefetch.h>
f9a5f7d3 45#include <linux/dma-mapping.h>
077f849d 46#include <linux/firmware.h>
1da177e4
LT
47
48#include <net/checksum.h>
c9bdd4b5 49#include <net/ip.h>
1da177e4
LT
50
51#include <asm/system.h>
27fd9de8 52#include <linux/io.h>
1da177e4 53#include <asm/byteorder.h>
27fd9de8 54#include <linux/uaccess.h>
1da177e4 55
49b6e95f 56#ifdef CONFIG_SPARC
1da177e4 57#include <asm/idprom.h>
49b6e95f 58#include <asm/prom.h>
1da177e4
LT
59#endif
60
63532394
MC
61#define BAR_0 0
62#define BAR_2 2
63
1da177e4
LT
64#include "tg3.h"
65
63c3a66f
JP
66/* Functions & macros to verify TG3_FLAGS types */
67
68static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69{
70 return test_bit(flag, bits);
71}
72
73static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74{
75 set_bit(flag, bits);
76}
77
78static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79{
80 clear_bit(flag, bits);
81}
82
83#define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85#define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87#define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
1da177e4 90#define DRV_MODULE_NAME "tg3"
6867c843 91#define TG3_MAJ_NUM 3
43a5f002 92#define TG3_MIN_NUM 119
6867c843
MC
93#define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
43a5f002 95#define DRV_MODULE_RELDATE "May 18, 2011"
1da177e4
LT
96
97#define TG3_DEF_MAC_MODE 0
98#define TG3_DEF_RX_MODE 0
99#define TG3_DEF_TX_MODE 0
100#define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
109
520b2756
MC
110#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
111
1da177e4
LT
112/* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
114 */
63c3a66f 115
1da177e4
LT
116#define TG3_TX_TIMEOUT (5 * HZ)
117
118/* hardware minimum and maximum for a single frame's data payload */
119#define TG3_MIN_MTU 60
120#define TG3_MAX_MTU(tp) \
63c3a66f 121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
122
123/* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
126 */
7cb32cf2 127#define TG3_RX_STD_RING_SIZE(tp) \
63c3a66f 128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
1da177e4 130#define TG3_DEF_RX_RING_PENDING 200
7cb32cf2 131#define TG3_RX_JMB_RING_SIZE(tp) \
63c3a66f 132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
1da177e4 134#define TG3_DEF_RX_JUMBO_RING_PENDING 100
c6cdf436 135#define TG3_RSS_INDIR_TBL_SIZE 128
1da177e4
LT
136
137/* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
142 */
1da177e4
LT
143
144#define TG3_TX_RING_SIZE 512
145#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
146
2c49a44d
MC
147#define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149#define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151#define TG3_RX_RCB_RING_BYTES(tp) \
7cb32cf2 152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
1da177e4
LT
153#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
1da177e4
LT
155#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
287be12e
MC
157#define TG3_DMA_BYTE_ENAB 64
158
159#define TG3_RX_STD_DMA_SZ 1536
160#define TG3_RX_JMB_DMA_SZ 9046
161
162#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
163
164#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
1da177e4 166
2c49a44d
MC
167#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
2b2cdb65 169
2c49a44d
MC
170#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
2b2cdb65 172
d2757fc4
MC
173/* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
177 *
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
183 */
184#define TG3_RX_COPY_THRESHOLD 256
185#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187#else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189#endif
190
1da177e4 191/* minimum number of free TX descriptors required to wake up TX process */
f3f3f27e 192#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
1da177e4 193
ad829268
MC
194#define TG3_RAW_IP_ALIGN 2
195
c6cdf436
MC
196#define TG3_FW_UPDATE_TIMEOUT_SEC 5
197
077f849d
JSR
198#define FIRMWARE_TG3 "tigon/tg3.bin"
199#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
201
1da177e4 202static char version[] __devinitdata =
05dbe005 203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
1da177e4
LT
204
205MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207MODULE_LICENSE("GPL");
208MODULE_VERSION(DRV_MODULE_VERSION);
077f849d
JSR
209MODULE_FIRMWARE(FIRMWARE_TG3);
210MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
1da177e4
LT
213static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214module_param(tg3_debug, int, 0);
215MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
a3aa1884 217static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
13185217
HK
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
13185217 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
13185217
HK
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
c88e668b
MC
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
2befdcea
MC
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
321d32a0
MC
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
5e7ccf20 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
5001e2f6
MC
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
b0f75221
MC
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
302b500b 289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
ba1f3c76 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
13185217
HK
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
1dcb14d9 298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
13185217 299 {}
1da177e4
LT
300};
301
302MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
50da859d 304static const struct {
1da177e4 305 const char string[ETH_GSTRING_LEN];
48fa55a0 306} ethtool_stats_keys[] = {
1da177e4
LT
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
333
334 { "tx_octets" },
335 { "tx_collisions" },
336
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
366
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
373
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
377
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
4452d099
MC
382 { "nic_tx_threshold_hit" },
383
384 { "mbuf_lwm_thresh_hit" },
1da177e4
LT
385};
386
48fa55a0
MC
387#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
388
389
50da859d 390static const struct {
4cafd3f5 391 const char string[ETH_GSTRING_LEN];
48fa55a0 392} ethtool_test_keys[] = {
4cafd3f5
MC
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
399};
400
48fa55a0
MC
401#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
402
403
b401e9e2
MC
404static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405{
406 writel(val, tp->regs + off);
407}
408
409static u32 tg3_read32(struct tg3 *tp, u32 off)
410{
de6f31eb 411 return readl(tp->regs + off);
b401e9e2
MC
412}
413
0d3031d9
MC
414static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415{
416 writel(val, tp->aperegs + off);
417}
418
419static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420{
de6f31eb 421 return readl(tp->aperegs + off);
0d3031d9
MC
422}
423
1da177e4
LT
424static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425{
6892914f
MC
426 unsigned long flags;
427
428 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
432}
433
434static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435{
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
1da177e4
LT
438}
439
6892914f 440static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 441{
6892914f
MC
442 unsigned long flags;
443 u32 val;
444
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
450}
451
452static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453{
454 unsigned long flags;
455
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
460 }
66711e66 461 if (off == TG3_RX_STD_PROD_IDX_REG) {
6892914f
MC
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
1da177e4 465 }
6892914f
MC
466
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
474 */
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479 }
480}
481
482static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483{
484 unsigned long flags;
485 u32 val;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
492}
493
b401e9e2
MC
494/* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498 */
499static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 500{
63c3a66f 501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
b401e9e2
MC
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
510 }
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
513 */
514 if (usec_wait)
515 udelay(usec_wait);
1da177e4
LT
516}
517
09ee929c
MC
518static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519{
520 tp->write32_mbox(tp, off, val);
63c3a66f 521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
6892914f 522 tp->read32_mbox(tp, off);
09ee929c
MC
523}
524
20094930 525static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
526{
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
63c3a66f 529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
1da177e4 530 writel(val, mbox);
63c3a66f 531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1da177e4
LT
532 readl(mbox);
533}
534
b5d3772c
MC
535static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536{
de6f31eb 537 return readl(tp->regs + off + GRCMBOX_BASE);
b5d3772c
MC
538}
539
540static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541{
542 writel(val, tp->regs + off + GRCMBOX_BASE);
543}
544
c6cdf436 545#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 546#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
c6cdf436
MC
547#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930 550
c6cdf436
MC
551#define tw32(reg, val) tp->write32(tp, reg, val)
552#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
555
556static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557{
6892914f
MC
558 unsigned long flags;
559
6ff6f81d 560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
563
6892914f 564 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 568
bbadf503
MC
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 574
bbadf503
MC
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577 }
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
579}
580
1da177e4
LT
581static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582{
6892914f
MC
583 unsigned long flags;
584
6ff6f81d 585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
589 }
590
6892914f 591 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 595
bbadf503
MC
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604 }
6892914f 605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
606}
607
0d3031d9
MC
608static void tg3_ape_lock_init(struct tg3 *tp)
609{
610 int i;
6f5c8f83 611 u32 regbase, bit;
f92d9dc1
MC
612
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
0d3031d9
MC
617
618 /* Make sure the driver hasn't any stale locks. */
6f5c8f83
MC
619 for (i = 0; i < 8; i++) {
620 if (i == TG3_APE_LOCK_GPIO)
621 continue;
f92d9dc1 622 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
6f5c8f83
MC
623 }
624
625 /* Clear the correct bit of the GPIO lock too. */
626 if (!tp->pci_fn)
627 bit = APE_LOCK_GRANT_DRIVER;
628 else
629 bit = 1 << tp->pci_fn;
630
631 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
0d3031d9
MC
632}
633
634static int tg3_ape_lock(struct tg3 *tp, int locknum)
635{
636 int i, off;
637 int ret = 0;
6f5c8f83 638 u32 status, req, gnt, bit;
0d3031d9 639
63c3a66f 640 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
641 return 0;
642
643 switch (locknum) {
6f5c8f83
MC
644 case TG3_APE_LOCK_GPIO:
645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646 return 0;
33f401ae
MC
647 case TG3_APE_LOCK_GRC:
648 case TG3_APE_LOCK_MEM:
649 break;
650 default:
651 return -EINVAL;
0d3031d9
MC
652 }
653
f92d9dc1
MC
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655 req = TG3_APE_LOCK_REQ;
656 gnt = TG3_APE_LOCK_GRANT;
657 } else {
658 req = TG3_APE_PER_LOCK_REQ;
659 gnt = TG3_APE_PER_LOCK_GRANT;
660 }
661
0d3031d9
MC
662 off = 4 * locknum;
663
6f5c8f83
MC
664 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665 bit = APE_LOCK_REQ_DRIVER;
666 else
667 bit = 1 << tp->pci_fn;
668
669 tg3_ape_write32(tp, req + off, bit);
0d3031d9
MC
670
671 /* Wait for up to 1 millisecond to acquire lock. */
672 for (i = 0; i < 100; i++) {
f92d9dc1 673 status = tg3_ape_read32(tp, gnt + off);
6f5c8f83 674 if (status == bit)
0d3031d9
MC
675 break;
676 udelay(10);
677 }
678
6f5c8f83 679 if (status != bit) {
0d3031d9 680 /* Revoke the lock request. */
6f5c8f83 681 tg3_ape_write32(tp, gnt + off, bit);
0d3031d9
MC
682 ret = -EBUSY;
683 }
684
685 return ret;
686}
687
688static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689{
6f5c8f83 690 u32 gnt, bit;
0d3031d9 691
63c3a66f 692 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
693 return;
694
695 switch (locknum) {
6f5c8f83
MC
696 case TG3_APE_LOCK_GPIO:
697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698 return;
33f401ae
MC
699 case TG3_APE_LOCK_GRC:
700 case TG3_APE_LOCK_MEM:
701 break;
702 default:
703 return;
0d3031d9
MC
704 }
705
f92d9dc1
MC
706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707 gnt = TG3_APE_LOCK_GRANT;
708 else
709 gnt = TG3_APE_PER_LOCK_GRANT;
710
6f5c8f83
MC
711 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712 bit = APE_LOCK_GRANT_DRIVER;
713 else
714 bit = 1 << tp->pci_fn;
715
716 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
0d3031d9
MC
717}
718
1da177e4
LT
719static void tg3_disable_ints(struct tg3 *tp)
720{
89aeb3bc
MC
721 int i;
722
1da177e4
LT
723 tw32(TG3PCI_MISC_HOST_CTRL,
724 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc
MC
725 for (i = 0; i < tp->irq_max; i++)
726 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1da177e4
LT
727}
728
1da177e4
LT
729static void tg3_enable_ints(struct tg3 *tp)
730{
89aeb3bc 731 int i;
89aeb3bc 732
bbe832c0
MC
733 tp->irq_sync = 0;
734 wmb();
735
1da177e4
LT
736 tw32(TG3PCI_MISC_HOST_CTRL,
737 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc 738
f89f38b8 739 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
89aeb3bc
MC
740 for (i = 0; i < tp->irq_cnt; i++) {
741 struct tg3_napi *tnapi = &tp->napi[i];
c6cdf436 742
898a56f8 743 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
63c3a66f 744 if (tg3_flag(tp, 1SHOT_MSI))
89aeb3bc 745 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
f19af9c2 746
f89f38b8 747 tp->coal_now |= tnapi->coal_now;
89aeb3bc 748 }
f19af9c2
MC
749
750 /* Force an initial interrupt */
63c3a66f 751 if (!tg3_flag(tp, TAGGED_STATUS) &&
f19af9c2
MC
752 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754 else
f89f38b8
MC
755 tw32(HOSTCC_MODE, tp->coal_now);
756
757 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1da177e4
LT
758}
759
17375d25 760static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
04237ddd 761{
17375d25 762 struct tg3 *tp = tnapi->tp;
898a56f8 763 struct tg3_hw_status *sblk = tnapi->hw_status;
04237ddd
MC
764 unsigned int work_exists = 0;
765
766 /* check for phy events */
63c3a66f 767 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
04237ddd
MC
768 if (sblk->status & SD_STATUS_LINK_CHG)
769 work_exists = 1;
770 }
771 /* check for RX/TX work to do */
f3f3f27e 772 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
8d9d7cfc 773 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
04237ddd
MC
774 work_exists = 1;
775
776 return work_exists;
777}
778
17375d25 779/* tg3_int_reenable
04237ddd
MC
780 * similar to tg3_enable_ints, but it accurately determines whether there
781 * is new work pending and can return without flushing the PIO write
6aa20a22 782 * which reenables interrupts
1da177e4 783 */
17375d25 784static void tg3_int_reenable(struct tg3_napi *tnapi)
1da177e4 785{
17375d25
MC
786 struct tg3 *tp = tnapi->tp;
787
898a56f8 788 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1da177e4
LT
789 mmiowb();
790
fac9b83e
DM
791 /* When doing tagged status, this work check is unnecessary.
792 * The last_tag we write above tells the chip which piece of
793 * work we've completed.
794 */
63c3a66f 795 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
04237ddd 796 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 797 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1da177e4
LT
798}
799
1da177e4
LT
800static void tg3_switch_clocks(struct tg3 *tp)
801{
f6eb9b1f 802 u32 clock_ctrl;
1da177e4
LT
803 u32 orig_clock_ctrl;
804
63c3a66f 805 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
4cf78e4f
MC
806 return;
807
f6eb9b1f
MC
808 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
1da177e4
LT
810 orig_clock_ctrl = clock_ctrl;
811 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812 CLOCK_CTRL_CLKRUN_OENABLE |
813 0x1f);
814 tp->pci_clock_ctrl = clock_ctrl;
815
63c3a66f 816 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4 817 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
818 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
820 }
821 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
822 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823 clock_ctrl |
824 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825 40);
826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827 clock_ctrl | (CLOCK_CTRL_ALTCLK),
828 40);
1da177e4 829 }
b401e9e2 830 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
831}
832
833#define PHY_BUSY_LOOPS 5000
834
835static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836{
837 u32 frame_val;
838 unsigned int loops;
839 int ret;
840
841 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842 tw32_f(MAC_MI_MODE,
843 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 udelay(80);
845 }
846
847 *val = 0x0;
848
882e9793 849 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
850 MI_COM_PHY_ADDR_MASK);
851 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852 MI_COM_REG_ADDR_MASK);
853 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 854
1da177e4
LT
855 tw32_f(MAC_MI_COM, frame_val);
856
857 loops = PHY_BUSY_LOOPS;
858 while (loops != 0) {
859 udelay(10);
860 frame_val = tr32(MAC_MI_COM);
861
862 if ((frame_val & MI_COM_BUSY) == 0) {
863 udelay(5);
864 frame_val = tr32(MAC_MI_COM);
865 break;
866 }
867 loops -= 1;
868 }
869
870 ret = -EBUSY;
871 if (loops != 0) {
872 *val = frame_val & MI_COM_DATA_MASK;
873 ret = 0;
874 }
875
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
878 udelay(80);
879 }
880
881 return ret;
882}
883
884static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885{
886 u32 frame_val;
887 unsigned int loops;
888 int ret;
889
f07e9af3 890 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
221c5637 891 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
b5d3772c
MC
892 return 0;
893
1da177e4
LT
894 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895 tw32_f(MAC_MI_MODE,
896 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897 udelay(80);
898 }
899
882e9793 900 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
901 MI_COM_PHY_ADDR_MASK);
902 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903 MI_COM_REG_ADDR_MASK);
904 frame_val |= (val & MI_COM_DATA_MASK);
905 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 906
1da177e4
LT
907 tw32_f(MAC_MI_COM, frame_val);
908
909 loops = PHY_BUSY_LOOPS;
910 while (loops != 0) {
911 udelay(10);
912 frame_val = tr32(MAC_MI_COM);
913 if ((frame_val & MI_COM_BUSY) == 0) {
914 udelay(5);
915 frame_val = tr32(MAC_MI_COM);
916 break;
917 }
918 loops -= 1;
919 }
920
921 ret = -EBUSY;
922 if (loops != 0)
923 ret = 0;
924
925 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926 tw32_f(MAC_MI_MODE, tp->mi_mode);
927 udelay(80);
928 }
929
930 return ret;
931}
932
b0988c15
MC
933static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934{
935 int err;
936
937 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938 if (err)
939 goto done;
940
941 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942 if (err)
943 goto done;
944
945 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947 if (err)
948 goto done;
949
950 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952done:
953 return err;
954}
955
956static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957{
958 int err;
959
960 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961 if (err)
962 goto done;
963
964 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965 if (err)
966 goto done;
967
968 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970 if (err)
971 goto done;
972
973 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975done:
976 return err;
977}
978
979static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980{
981 int err;
982
983 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984 if (!err)
985 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987 return err;
988}
989
990static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991{
992 int err;
993
994 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995 if (!err)
996 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998 return err;
999}
1000
15ee95c3
MC
1001static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002{
1003 int err;
1004
1005 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007 MII_TG3_AUXCTL_SHDWSEL_MISC);
1008 if (!err)
1009 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011 return err;
1012}
1013
b4bd2929
MC
1014static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015{
1016 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020}
1021
1d36ba45
MC
1022#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025 MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029 MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
95e2869a
MC
1031static int tg3_bmcr_reset(struct tg3 *tp)
1032{
1033 u32 phy_control;
1034 int limit, err;
1035
1036 /* OK, reset it, and poll the BMCR_RESET bit until it
1037 * clears or we time out.
1038 */
1039 phy_control = BMCR_RESET;
1040 err = tg3_writephy(tp, MII_BMCR, phy_control);
1041 if (err != 0)
1042 return -EBUSY;
1043
1044 limit = 5000;
1045 while (limit--) {
1046 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047 if (err != 0)
1048 return -EBUSY;
1049
1050 if ((phy_control & BMCR_RESET) == 0) {
1051 udelay(40);
1052 break;
1053 }
1054 udelay(10);
1055 }
d4675b52 1056 if (limit < 0)
95e2869a
MC
1057 return -EBUSY;
1058
1059 return 0;
1060}
1061
158d7abd
MC
1062static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063{
3d16543d 1064 struct tg3 *tp = bp->priv;
158d7abd
MC
1065 u32 val;
1066
24bb4fb6 1067 spin_lock_bh(&tp->lock);
158d7abd
MC
1068
1069 if (tg3_readphy(tp, reg, &val))
24bb4fb6
MC
1070 val = -EIO;
1071
1072 spin_unlock_bh(&tp->lock);
158d7abd
MC
1073
1074 return val;
1075}
1076
1077static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078{
3d16543d 1079 struct tg3 *tp = bp->priv;
24bb4fb6 1080 u32 ret = 0;
158d7abd 1081
24bb4fb6 1082 spin_lock_bh(&tp->lock);
158d7abd
MC
1083
1084 if (tg3_writephy(tp, reg, val))
24bb4fb6 1085 ret = -EIO;
158d7abd 1086
24bb4fb6
MC
1087 spin_unlock_bh(&tp->lock);
1088
1089 return ret;
158d7abd
MC
1090}
1091
1092static int tg3_mdio_reset(struct mii_bus *bp)
1093{
1094 return 0;
1095}
1096
9c61d6bc 1097static void tg3_mdio_config_5785(struct tg3 *tp)
a9daf367
MC
1098{
1099 u32 val;
fcb389df 1100 struct phy_device *phydev;
a9daf367 1101
3f0e3ad7 1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
fcb389df 1103 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f
MC
1104 case PHY_ID_BCM50610:
1105 case PHY_ID_BCM50610M:
fcb389df
MC
1106 val = MAC_PHYCFG2_50610_LED_MODES;
1107 break;
6a443a0f 1108 case PHY_ID_BCMAC131:
fcb389df
MC
1109 val = MAC_PHYCFG2_AC131_LED_MODES;
1110 break;
6a443a0f 1111 case PHY_ID_RTL8211C:
fcb389df
MC
1112 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113 break;
6a443a0f 1114 case PHY_ID_RTL8201E:
fcb389df
MC
1115 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116 break;
1117 default:
a9daf367 1118 return;
fcb389df
MC
1119 }
1120
1121 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122 tw32(MAC_PHYCFG2, val);
1123
1124 val = tr32(MAC_PHYCFG1);
bb85fbb6
MC
1125 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
fcb389df
MC
1128 tw32(MAC_PHYCFG1, val);
1129
1130 return;
1131 }
1132
63c3a66f 1133 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
fcb389df
MC
1134 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135 MAC_PHYCFG2_FMODE_MASK_MASK |
1136 MAC_PHYCFG2_GMODE_MASK_MASK |
1137 MAC_PHYCFG2_ACT_MASK_MASK |
1138 MAC_PHYCFG2_QUAL_MASK_MASK |
1139 MAC_PHYCFG2_INBAND_ENABLE;
1140
1141 tw32(MAC_PHYCFG2, val);
a9daf367 1142
bb85fbb6
MC
1143 val = tr32(MAC_PHYCFG1);
1144 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
63c3a66f
JP
1146 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1148 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
63c3a66f 1149 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1150 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151 }
bb85fbb6
MC
1152 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154 tw32(MAC_PHYCFG1, val);
a9daf367 1155
a9daf367
MC
1156 val = tr32(MAC_EXT_RGMII_MODE);
1157 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158 MAC_RGMII_MODE_RX_QUALITY |
1159 MAC_RGMII_MODE_RX_ACTIVITY |
1160 MAC_RGMII_MODE_RX_ENG_DET |
1161 MAC_RGMII_MODE_TX_ENABLE |
1162 MAC_RGMII_MODE_TX_LOWPWR |
1163 MAC_RGMII_MODE_TX_RESET);
63c3a66f
JP
1164 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367
MC
1166 val |= MAC_RGMII_MODE_RX_INT_B |
1167 MAC_RGMII_MODE_RX_QUALITY |
1168 MAC_RGMII_MODE_RX_ACTIVITY |
1169 MAC_RGMII_MODE_RX_ENG_DET;
63c3a66f 1170 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1171 val |= MAC_RGMII_MODE_TX_ENABLE |
1172 MAC_RGMII_MODE_TX_LOWPWR |
1173 MAC_RGMII_MODE_TX_RESET;
1174 }
1175 tw32(MAC_EXT_RGMII_MODE, val);
1176}
1177
158d7abd
MC
1178static void tg3_mdio_start(struct tg3 *tp)
1179{
158d7abd
MC
1180 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181 tw32_f(MAC_MI_MODE, tp->mi_mode);
1182 udelay(80);
a9daf367 1183
63c3a66f 1184 if (tg3_flag(tp, MDIOBUS_INITED) &&
9ea4818d
MC
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186 tg3_mdio_config_5785(tp);
1187}
1188
1189static int tg3_mdio_init(struct tg3 *tp)
1190{
1191 int i;
1192 u32 reg;
1193 struct phy_device *phydev;
1194
63c3a66f 1195 if (tg3_flag(tp, 5717_PLUS)) {
9c7df915 1196 u32 is_serdes;
882e9793 1197
69f11c99 1198 tp->phy_addr = tp->pci_fn + 1;
882e9793 1199
d1ec96af
MC
1200 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202 else
1203 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204 TG3_CPMU_PHY_STRAP_IS_SERDES;
882e9793
MC
1205 if (is_serdes)
1206 tp->phy_addr += 7;
1207 } else
3f0e3ad7 1208 tp->phy_addr = TG3_PHY_MII_ADDR;
882e9793 1209
158d7abd
MC
1210 tg3_mdio_start(tp);
1211
63c3a66f 1212 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
158d7abd
MC
1213 return 0;
1214
298cf9be
LB
1215 tp->mdio_bus = mdiobus_alloc();
1216 if (tp->mdio_bus == NULL)
1217 return -ENOMEM;
158d7abd 1218
298cf9be
LB
1219 tp->mdio_bus->name = "tg3 mdio bus";
1220 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
158d7abd 1221 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
298cf9be
LB
1222 tp->mdio_bus->priv = tp;
1223 tp->mdio_bus->parent = &tp->pdev->dev;
1224 tp->mdio_bus->read = &tg3_mdio_read;
1225 tp->mdio_bus->write = &tg3_mdio_write;
1226 tp->mdio_bus->reset = &tg3_mdio_reset;
3f0e3ad7 1227 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
298cf9be 1228 tp->mdio_bus->irq = &tp->mdio_irq[0];
158d7abd
MC
1229
1230 for (i = 0; i < PHY_MAX_ADDR; i++)
298cf9be 1231 tp->mdio_bus->irq[i] = PHY_POLL;
158d7abd
MC
1232
1233 /* The bus registration will look for all the PHYs on the mdio bus.
1234 * Unfortunately, it does not ensure the PHY is powered up before
1235 * accessing the PHY ID registers. A chip reset is the
1236 * quickest way to bring the device back to an operational state..
1237 */
1238 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239 tg3_bmcr_reset(tp);
1240
298cf9be 1241 i = mdiobus_register(tp->mdio_bus);
a9daf367 1242 if (i) {
ab96b241 1243 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
9c61d6bc 1244 mdiobus_free(tp->mdio_bus);
a9daf367
MC
1245 return i;
1246 }
158d7abd 1247
3f0e3ad7 1248 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
a9daf367 1249
9c61d6bc 1250 if (!phydev || !phydev->drv) {
ab96b241 1251 dev_warn(&tp->pdev->dev, "No PHY devices\n");
9c61d6bc
MC
1252 mdiobus_unregister(tp->mdio_bus);
1253 mdiobus_free(tp->mdio_bus);
1254 return -ENODEV;
1255 }
1256
1257 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f 1258 case PHY_ID_BCM57780:
321d32a0 1259 phydev->interface = PHY_INTERFACE_MODE_GMII;
c704dc23 1260 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
321d32a0 1261 break;
6a443a0f
MC
1262 case PHY_ID_BCM50610:
1263 case PHY_ID_BCM50610M:
32e5a8d6 1264 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
c704dc23 1265 PHY_BRCM_RX_REFCLK_UNUSED |
52fae083 1266 PHY_BRCM_DIS_TXCRXC_NOENRGY |
c704dc23 1267 PHY_BRCM_AUTO_PWRDWN_ENABLE;
63c3a66f 1268 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
a9daf367 1269 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
63c3a66f 1270 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1271 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
63c3a66f 1272 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367 1273 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fcb389df 1274 /* fallthru */
6a443a0f 1275 case PHY_ID_RTL8211C:
fcb389df 1276 phydev->interface = PHY_INTERFACE_MODE_RGMII;
a9daf367 1277 break;
6a443a0f
MC
1278 case PHY_ID_RTL8201E:
1279 case PHY_ID_BCMAC131:
a9daf367 1280 phydev->interface = PHY_INTERFACE_MODE_MII;
cdd4e09d 1281 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
f07e9af3 1282 tp->phy_flags |= TG3_PHYFLG_IS_FET;
a9daf367
MC
1283 break;
1284 }
1285
63c3a66f 1286 tg3_flag_set(tp, MDIOBUS_INITED);
9c61d6bc
MC
1287
1288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289 tg3_mdio_config_5785(tp);
a9daf367
MC
1290
1291 return 0;
158d7abd
MC
1292}
1293
1294static void tg3_mdio_fini(struct tg3 *tp)
1295{
63c3a66f
JP
1296 if (tg3_flag(tp, MDIOBUS_INITED)) {
1297 tg3_flag_clear(tp, MDIOBUS_INITED);
298cf9be
LB
1298 mdiobus_unregister(tp->mdio_bus);
1299 mdiobus_free(tp->mdio_bus);
158d7abd
MC
1300 }
1301}
1302
4ba526ce
MC
1303/* tp->lock is held. */
1304static inline void tg3_generate_fw_event(struct tg3 *tp)
1305{
1306 u32 val;
1307
1308 val = tr32(GRC_RX_CPU_EVENT);
1309 val |= GRC_RX_CPU_DRIVER_EVENT;
1310 tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312 tp->last_event_jiffies = jiffies;
1313}
1314
1315#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
95e2869a
MC
1317/* tp->lock is held. */
1318static void tg3_wait_for_event_ack(struct tg3 *tp)
1319{
1320 int i;
4ba526ce
MC
1321 unsigned int delay_cnt;
1322 long time_remain;
1323
1324 /* If enough time has passed, no wait is necessary. */
1325 time_remain = (long)(tp->last_event_jiffies + 1 +
1326 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327 (long)jiffies;
1328 if (time_remain < 0)
1329 return;
1330
1331 /* Check if we can shorten the wait time. */
1332 delay_cnt = jiffies_to_usecs(time_remain);
1333 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335 delay_cnt = (delay_cnt >> 3) + 1;
95e2869a 1336
4ba526ce 1337 for (i = 0; i < delay_cnt; i++) {
95e2869a
MC
1338 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339 break;
4ba526ce 1340 udelay(8);
95e2869a
MC
1341 }
1342}
1343
1344/* tp->lock is held. */
1345static void tg3_ump_link_report(struct tg3 *tp)
1346{
1347 u32 reg;
1348 u32 val;
1349
63c3a66f 1350 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
95e2869a
MC
1351 return;
1352
1353 tg3_wait_for_event_ack(tp);
1354
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359 val = 0;
1360 if (!tg3_readphy(tp, MII_BMCR, &reg))
1361 val = reg << 16;
1362 if (!tg3_readphy(tp, MII_BMSR, &reg))
1363 val |= (reg & 0xffff);
1364 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366 val = 0;
1367 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368 val = reg << 16;
1369 if (!tg3_readphy(tp, MII_LPA, &reg))
1370 val |= (reg & 0xffff);
1371 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373 val = 0;
f07e9af3 1374 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
95e2869a
MC
1375 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376 val = reg << 16;
1377 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378 val |= (reg & 0xffff);
1379 }
1380 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383 val = reg << 16;
1384 else
1385 val = 0;
1386 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
4ba526ce 1388 tg3_generate_fw_event(tp);
95e2869a
MC
1389}
1390
1391static void tg3_link_report(struct tg3 *tp)
1392{
1393 if (!netif_carrier_ok(tp->dev)) {
05dbe005 1394 netif_info(tp, link, tp->dev, "Link is down\n");
95e2869a
MC
1395 tg3_ump_link_report(tp);
1396 } else if (netif_msg_link(tp)) {
05dbe005
JP
1397 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398 (tp->link_config.active_speed == SPEED_1000 ?
1399 1000 :
1400 (tp->link_config.active_speed == SPEED_100 ?
1401 100 : 10)),
1402 (tp->link_config.active_duplex == DUPLEX_FULL ?
1403 "full" : "half"));
1404
1405 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407 "on" : "off",
1408 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409 "on" : "off");
47007831
MC
1410
1411 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412 netdev_info(tp->dev, "EEE is %s\n",
1413 tp->setlpicnt ? "enabled" : "disabled");
1414
95e2869a
MC
1415 tg3_ump_link_report(tp);
1416 }
1417}
1418
1419static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420{
1421 u16 miireg;
1422
e18ce346 1423 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1424 miireg = ADVERTISE_PAUSE_CAP;
e18ce346 1425 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1426 miireg = ADVERTISE_PAUSE_ASYM;
e18ce346 1427 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1428 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429 else
1430 miireg = 0;
1431
1432 return miireg;
1433}
1434
1435static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436{
1437 u16 miireg;
1438
e18ce346 1439 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1440 miireg = ADVERTISE_1000XPAUSE;
e18ce346 1441 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1442 miireg = ADVERTISE_1000XPSE_ASYM;
e18ce346 1443 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1444 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445 else
1446 miireg = 0;
1447
1448 return miireg;
1449}
1450
95e2869a
MC
1451static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452{
1453 u8 cap = 0;
1454
1455 if (lcladv & ADVERTISE_1000XPAUSE) {
1456 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457 if (rmtadv & LPA_1000XPAUSE)
e18ce346 1458 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
95e2869a 1459 else if (rmtadv & LPA_1000XPAUSE_ASYM)
e18ce346 1460 cap = FLOW_CTRL_RX;
95e2869a
MC
1461 } else {
1462 if (rmtadv & LPA_1000XPAUSE)
e18ce346 1463 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
95e2869a
MC
1464 }
1465 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
e18ce346 1467 cap = FLOW_CTRL_TX;
95e2869a
MC
1468 }
1469
1470 return cap;
1471}
1472
f51f3562 1473static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1474{
b02fd9e3 1475 u8 autoneg;
f51f3562 1476 u8 flowctrl = 0;
95e2869a
MC
1477 u32 old_rx_mode = tp->rx_mode;
1478 u32 old_tx_mode = tp->tx_mode;
1479
63c3a66f 1480 if (tg3_flag(tp, USE_PHYLIB))
3f0e3ad7 1481 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
b02fd9e3
MC
1482 else
1483 autoneg = tp->link_config.autoneg;
1484
63c3a66f 1485 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
f07e9af3 1486 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
f51f3562 1487 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1488 else
bc02ff95 1489 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
f51f3562
MC
1490 } else
1491 flowctrl = tp->link_config.flowctrl;
95e2869a 1492
f51f3562 1493 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1494
e18ce346 1495 if (flowctrl & FLOW_CTRL_RX)
95e2869a
MC
1496 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497 else
1498 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
f51f3562 1500 if (old_rx_mode != tp->rx_mode)
95e2869a 1501 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1502
e18ce346 1503 if (flowctrl & FLOW_CTRL_TX)
95e2869a
MC
1504 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505 else
1506 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
f51f3562 1508 if (old_tx_mode != tp->tx_mode)
95e2869a 1509 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1510}
1511
b02fd9e3
MC
1512static void tg3_adjust_link(struct net_device *dev)
1513{
1514 u8 oldflowctrl, linkmesg = 0;
1515 u32 mac_mode, lcl_adv, rmt_adv;
1516 struct tg3 *tp = netdev_priv(dev);
3f0e3ad7 1517 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1518
24bb4fb6 1519 spin_lock_bh(&tp->lock);
b02fd9e3
MC
1520
1521 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522 MAC_MODE_HALF_DUPLEX);
1523
1524 oldflowctrl = tp->link_config.active_flowctrl;
1525
1526 if (phydev->link) {
1527 lcl_adv = 0;
1528 rmt_adv = 0;
1529
1530 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531 mac_mode |= MAC_MODE_PORT_MODE_MII;
c3df0748
MC
1532 else if (phydev->speed == SPEED_1000 ||
1533 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
b02fd9e3 1534 mac_mode |= MAC_MODE_PORT_MODE_GMII;
c3df0748
MC
1535 else
1536 mac_mode |= MAC_MODE_PORT_MODE_MII;
b02fd9e3
MC
1537
1538 if (phydev->duplex == DUPLEX_HALF)
1539 mac_mode |= MAC_MODE_HALF_DUPLEX;
1540 else {
1541 lcl_adv = tg3_advert_flowctrl_1000T(
1542 tp->link_config.flowctrl);
1543
1544 if (phydev->pause)
1545 rmt_adv = LPA_PAUSE_CAP;
1546 if (phydev->asym_pause)
1547 rmt_adv |= LPA_PAUSE_ASYM;
1548 }
1549
1550 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551 } else
1552 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554 if (mac_mode != tp->mac_mode) {
1555 tp->mac_mode = mac_mode;
1556 tw32_f(MAC_MODE, tp->mac_mode);
1557 udelay(40);
1558 }
1559
fcb389df
MC
1560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561 if (phydev->speed == SPEED_10)
1562 tw32(MAC_MI_STAT,
1563 MAC_MI_STAT_10MBPS_MODE |
1564 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565 else
1566 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567 }
1568
b02fd9e3
MC
1569 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570 tw32(MAC_TX_LENGTHS,
1571 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572 (6 << TX_LENGTHS_IPG_SHIFT) |
1573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574 else
1575 tw32(MAC_TX_LENGTHS,
1576 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577 (6 << TX_LENGTHS_IPG_SHIFT) |
1578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582 phydev->speed != tp->link_config.active_speed ||
1583 phydev->duplex != tp->link_config.active_duplex ||
1584 oldflowctrl != tp->link_config.active_flowctrl)
c6cdf436 1585 linkmesg = 1;
b02fd9e3
MC
1586
1587 tp->link_config.active_speed = phydev->speed;
1588 tp->link_config.active_duplex = phydev->duplex;
1589
24bb4fb6 1590 spin_unlock_bh(&tp->lock);
b02fd9e3
MC
1591
1592 if (linkmesg)
1593 tg3_link_report(tp);
1594}
1595
1596static int tg3_phy_init(struct tg3 *tp)
1597{
1598 struct phy_device *phydev;
1599
f07e9af3 1600 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
b02fd9e3
MC
1601 return 0;
1602
1603 /* Bring the PHY back to a known state. */
1604 tg3_bmcr_reset(tp);
1605
3f0e3ad7 1606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3
MC
1607
1608 /* Attach the MAC to the PHY. */
fb28ad35 1609 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
a9daf367 1610 phydev->dev_flags, phydev->interface);
b02fd9e3 1611 if (IS_ERR(phydev)) {
ab96b241 1612 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
b02fd9e3
MC
1613 return PTR_ERR(phydev);
1614 }
1615
b02fd9e3 1616 /* Mask with MAC supported features. */
9c61d6bc
MC
1617 switch (phydev->interface) {
1618 case PHY_INTERFACE_MODE_GMII:
1619 case PHY_INTERFACE_MODE_RGMII:
f07e9af3 1620 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
321d32a0
MC
1621 phydev->supported &= (PHY_GBIT_FEATURES |
1622 SUPPORTED_Pause |
1623 SUPPORTED_Asym_Pause);
1624 break;
1625 }
1626 /* fallthru */
9c61d6bc
MC
1627 case PHY_INTERFACE_MODE_MII:
1628 phydev->supported &= (PHY_BASIC_FEATURES |
1629 SUPPORTED_Pause |
1630 SUPPORTED_Asym_Pause);
1631 break;
1632 default:
3f0e3ad7 1633 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9c61d6bc
MC
1634 return -EINVAL;
1635 }
1636
f07e9af3 1637 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
1638
1639 phydev->advertising = phydev->supported;
1640
b02fd9e3
MC
1641 return 0;
1642}
1643
1644static void tg3_phy_start(struct tg3 *tp)
1645{
1646 struct phy_device *phydev;
1647
f07e9af3 1648 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
1649 return;
1650
3f0e3ad7 1651 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1652
80096068
MC
1653 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3
MC
1655 phydev->speed = tp->link_config.orig_speed;
1656 phydev->duplex = tp->link_config.orig_duplex;
1657 phydev->autoneg = tp->link_config.orig_autoneg;
1658 phydev->advertising = tp->link_config.orig_advertising;
1659 }
1660
1661 phy_start(phydev);
1662
1663 phy_start_aneg(phydev);
1664}
1665
1666static void tg3_phy_stop(struct tg3 *tp)
1667{
f07e9af3 1668 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
1669 return;
1670
3f0e3ad7 1671 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
1672}
1673
1674static void tg3_phy_fini(struct tg3 *tp)
1675{
f07e9af3 1676 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7 1677 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
f07e9af3 1678 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
1679 }
1680}
1681
7f97a4bd
MC
1682static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683{
1684 u32 phytest;
1685
1686 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687 u32 phy;
1688
1689 tg3_writephy(tp, MII_TG3_FET_TEST,
1690 phytest | MII_TG3_FET_SHADOW_EN);
1691 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692 if (enable)
1693 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694 else
1695 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697 }
1698 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699 }
1700}
1701
6833c043
MC
1702static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703{
1704 u32 reg;
1705
63c3a66f
JP
1706 if (!tg3_flag(tp, 5705_PLUS) ||
1707 (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 1708 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
6833c043
MC
1709 return;
1710
f07e9af3 1711 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd
MC
1712 tg3_phy_fet_toggle_apd(tp, enable);
1713 return;
1714 }
1715
6833c043
MC
1716 reg = MII_TG3_MISC_SHDW_WREN |
1717 MII_TG3_MISC_SHDW_SCR5_SEL |
1718 MII_TG3_MISC_SHDW_SCR5_LPED |
1719 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720 MII_TG3_MISC_SHDW_SCR5_SDTL |
1721 MII_TG3_MISC_SHDW_SCR5_C125OE;
1722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728 reg = MII_TG3_MISC_SHDW_WREN |
1729 MII_TG3_MISC_SHDW_APD_SEL |
1730 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731 if (enable)
1732 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735}
1736
9ef8ca99
MC
1737static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738{
1739 u32 phy;
1740
63c3a66f 1741 if (!tg3_flag(tp, 5705_PLUS) ||
f07e9af3 1742 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9ef8ca99
MC
1743 return;
1744
f07e9af3 1745 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
9ef8ca99
MC
1746 u32 ephy;
1747
535ef6e1
MC
1748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751 tg3_writephy(tp, MII_TG3_FET_TEST,
1752 ephy | MII_TG3_FET_SHADOW_EN);
1753 if (!tg3_readphy(tp, reg, &phy)) {
9ef8ca99 1754 if (enable)
535ef6e1 1755 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
9ef8ca99 1756 else
535ef6e1
MC
1757 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758 tg3_writephy(tp, reg, phy);
9ef8ca99 1759 }
535ef6e1 1760 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
9ef8ca99
MC
1761 }
1762 } else {
15ee95c3
MC
1763 int ret;
1764
1765 ret = tg3_phy_auxctl_read(tp,
1766 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767 if (!ret) {
9ef8ca99
MC
1768 if (enable)
1769 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770 else
1771 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
b4bd2929
MC
1772 tg3_phy_auxctl_write(tp,
1773 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
9ef8ca99
MC
1774 }
1775 }
1776}
1777
1da177e4
LT
1778static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779{
15ee95c3 1780 int ret;
1da177e4
LT
1781 u32 val;
1782
f07e9af3 1783 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1da177e4
LT
1784 return;
1785
15ee95c3
MC
1786 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787 if (!ret)
b4bd2929
MC
1788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1da177e4
LT
1790}
1791
b2a5c19c
MC
1792static void tg3_phy_apply_otp(struct tg3 *tp)
1793{
1794 u32 otp, phy;
1795
1796 if (!tp->phy_otp)
1797 return;
1798
1799 otp = tp->phy_otp;
1800
1d36ba45
MC
1801 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802 return;
b2a5c19c
MC
1803
1804 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1d36ba45 1826 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
b2a5c19c
MC
1827}
1828
52b02d04
MC
1829static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830{
1831 u32 val;
1832
1833 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834 return;
1835
1836 tp->setlpicnt = 0;
1837
1838 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839 current_link_up == 1 &&
a6b68dab
MC
1840 tp->link_config.active_duplex == DUPLEX_FULL &&
1841 (tp->link_config.active_speed == SPEED_100 ||
1842 tp->link_config.active_speed == SPEED_1000)) {
52b02d04
MC
1843 u32 eeectl;
1844
1845 if (tp->link_config.active_speed == SPEED_1000)
1846 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847 else
1848 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
3110f5f5
MC
1852 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853 TG3_CL45_D7_EEERES_STAT, &val);
52b02d04 1854
b0c5943f
MC
1855 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
52b02d04
MC
1857 tp->setlpicnt = 2;
1858 }
1859
1860 if (!tp->setlpicnt) {
1861 val = tr32(TG3_CPMU_EEE_MODE);
1862 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1863 }
1864}
1865
b0c5943f
MC
1866static void tg3_phy_eee_enable(struct tg3 *tp)
1867{
1868 u32 val;
1869
1870 if (tp->link_config.active_speed == SPEED_1000 &&
1871 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1872 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1873 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1874 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1875 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1876 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1877 }
1878
1879 val = tr32(TG3_CPMU_EEE_MODE);
1880 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1881}
1882
1da177e4
LT
1883static int tg3_wait_macro_done(struct tg3 *tp)
1884{
1885 int limit = 100;
1886
1887 while (limit--) {
1888 u32 tmp32;
1889
f08aa1a8 1890 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1da177e4
LT
1891 if ((tmp32 & 0x1000) == 0)
1892 break;
1893 }
1894 }
d4675b52 1895 if (limit < 0)
1da177e4
LT
1896 return -EBUSY;
1897
1898 return 0;
1899}
1900
1901static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1902{
1903 static const u32 test_pat[4][6] = {
1904 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1905 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1906 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1907 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1908 };
1909 int chan;
1910
1911 for (chan = 0; chan < 4; chan++) {
1912 int i;
1913
1914 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1915 (chan * 0x2000) | 0x0200);
f08aa1a8 1916 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
1917
1918 for (i = 0; i < 6; i++)
1919 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1920 test_pat[chan][i]);
1921
f08aa1a8 1922 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
1923 if (tg3_wait_macro_done(tp)) {
1924 *resetp = 1;
1925 return -EBUSY;
1926 }
1927
1928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1929 (chan * 0x2000) | 0x0200);
f08aa1a8 1930 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1da177e4
LT
1931 if (tg3_wait_macro_done(tp)) {
1932 *resetp = 1;
1933 return -EBUSY;
1934 }
1935
f08aa1a8 1936 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1da177e4
LT
1937 if (tg3_wait_macro_done(tp)) {
1938 *resetp = 1;
1939 return -EBUSY;
1940 }
1941
1942 for (i = 0; i < 6; i += 2) {
1943 u32 low, high;
1944
1945 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1946 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1947 tg3_wait_macro_done(tp)) {
1948 *resetp = 1;
1949 return -EBUSY;
1950 }
1951 low &= 0x7fff;
1952 high &= 0x000f;
1953 if (low != test_pat[chan][i] ||
1954 high != test_pat[chan][i+1]) {
1955 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1956 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1957 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1958
1959 return -EBUSY;
1960 }
1961 }
1962 }
1963
1964 return 0;
1965}
1966
1967static int tg3_phy_reset_chanpat(struct tg3 *tp)
1968{
1969 int chan;
1970
1971 for (chan = 0; chan < 4; chan++) {
1972 int i;
1973
1974 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1975 (chan * 0x2000) | 0x0200);
f08aa1a8 1976 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
1977 for (i = 0; i < 6; i++)
1978 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
f08aa1a8 1979 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
1980 if (tg3_wait_macro_done(tp))
1981 return -EBUSY;
1982 }
1983
1984 return 0;
1985}
1986
1987static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1988{
1989 u32 reg32, phy9_orig;
1990 int retries, do_phy_reset, err;
1991
1992 retries = 10;
1993 do_phy_reset = 1;
1994 do {
1995 if (do_phy_reset) {
1996 err = tg3_bmcr_reset(tp);
1997 if (err)
1998 return err;
1999 do_phy_reset = 0;
2000 }
2001
2002 /* Disable transmitter and interrupt. */
2003 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2004 continue;
2005
2006 reg32 |= 0x3000;
2007 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2008
2009 /* Set full-duplex, 1000 mbps. */
2010 tg3_writephy(tp, MII_BMCR,
221c5637 2011 BMCR_FULLDPLX | BMCR_SPEED1000);
1da177e4
LT
2012
2013 /* Set to master mode. */
221c5637 2014 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1da177e4
LT
2015 continue;
2016
221c5637
MC
2017 tg3_writephy(tp, MII_CTRL1000,
2018 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1da177e4 2019
1d36ba45
MC
2020 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2021 if (err)
2022 return err;
1da177e4
LT
2023
2024 /* Block the PHY control access. */
6ee7c0a0 2025 tg3_phydsp_write(tp, 0x8005, 0x0800);
1da177e4
LT
2026
2027 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2028 if (!err)
2029 break;
2030 } while (--retries);
2031
2032 err = tg3_phy_reset_chanpat(tp);
2033 if (err)
2034 return err;
2035
6ee7c0a0 2036 tg3_phydsp_write(tp, 0x8005, 0x0000);
1da177e4
LT
2037
2038 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
f08aa1a8 2039 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1da177e4 2040
1d36ba45 2041 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2042
221c5637 2043 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1da177e4
LT
2044
2045 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2046 reg32 &= ~0x3000;
2047 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2048 } else if (!err)
2049 err = -EBUSY;
2050
2051 return err;
2052}
2053
2054/* This will reset the tigon3 PHY if there is no valid
2055 * link unless the FORCE argument is non-zero.
2056 */
2057static int tg3_phy_reset(struct tg3 *tp)
2058{
f833c4c1 2059 u32 val, cpmuctrl;
1da177e4
LT
2060 int err;
2061
60189ddf 2062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2063 val = tr32(GRC_MISC_CFG);
2064 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2065 udelay(40);
2066 }
f833c4c1
MC
2067 err = tg3_readphy(tp, MII_BMSR, &val);
2068 err |= tg3_readphy(tp, MII_BMSR, &val);
1da177e4
LT
2069 if (err != 0)
2070 return -EBUSY;
2071
c8e1e82b
MC
2072 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2073 netif_carrier_off(tp->dev);
2074 tg3_link_report(tp);
2075 }
2076
1da177e4
LT
2077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2080 err = tg3_phy_reset_5703_4_5(tp);
2081 if (err)
2082 return err;
2083 goto out;
2084 }
2085
b2a5c19c
MC
2086 cpmuctrl = 0;
2087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2088 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2089 cpmuctrl = tr32(TG3_CPMU_CTRL);
2090 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2091 tw32(TG3_CPMU_CTRL,
2092 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2093 }
2094
1da177e4
LT
2095 err = tg3_bmcr_reset(tp);
2096 if (err)
2097 return err;
2098
b2a5c19c 2099 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
f833c4c1
MC
2100 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2101 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
b2a5c19c
MC
2102
2103 tw32(TG3_CPMU_CTRL, cpmuctrl);
2104 }
2105
bcb37f6c
MC
2106 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2107 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2108 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2109 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2110 CPMU_LSPD_1000MB_MACCLK_12_5) {
2111 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2112 udelay(40);
2113 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2114 }
2115 }
2116
63c3a66f 2117 if (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2118 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
ecf1410b
MC
2119 return 0;
2120
b2a5c19c
MC
2121 tg3_phy_apply_otp(tp);
2122
f07e9af3 2123 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
2124 tg3_phy_toggle_apd(tp, true);
2125 else
2126 tg3_phy_toggle_apd(tp, false);
2127
1da177e4 2128out:
1d36ba45
MC
2129 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2130 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
6ee7c0a0
MC
2131 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2132 tg3_phydsp_write(tp, 0x000a, 0x0323);
1d36ba45 2133 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2134 }
1d36ba45 2135
f07e9af3 2136 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
f08aa1a8
MC
2137 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2138 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1da177e4 2139 }
1d36ba45 2140
f07e9af3 2141 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1d36ba45
MC
2142 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143 tg3_phydsp_write(tp, 0x000a, 0x310b);
2144 tg3_phydsp_write(tp, 0x201f, 0x9506);
2145 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2146 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147 }
f07e9af3 2148 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1d36ba45
MC
2149 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2150 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2151 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2152 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2153 tg3_writephy(tp, MII_TG3_TEST1,
2154 MII_TG3_TEST1_TRIM_EN | 0x4);
2155 } else
2156 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2157
2158 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2159 }
c424cb24 2160 }
1d36ba45 2161
1da177e4
LT
2162 /* Set Extended packet length bit (bit 14) on all chips that */
2163 /* support jumbo frames */
79eb6904 2164 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4 2165 /* Cannot do read-modify-write on 5401 */
b4bd2929 2166 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
63c3a66f 2167 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
1da177e4 2168 /* Set bit 14 with read-modify-write to preserve other bits */
15ee95c3
MC
2169 err = tg3_phy_auxctl_read(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2171 if (!err)
b4bd2929
MC
2172 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2173 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
1da177e4
LT
2174 }
2175
2176 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2177 * jumbo frames transmission.
2178 */
63c3a66f 2179 if (tg3_flag(tp, JUMBO_CAPABLE)) {
f833c4c1 2180 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
c6cdf436 2181 tg3_writephy(tp, MII_TG3_EXT_CTRL,
f833c4c1 2182 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1da177e4
LT
2183 }
2184
715116a1 2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1 2186 /* adjust output voltage */
535ef6e1 2187 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
715116a1
MC
2188 }
2189
9ef8ca99 2190 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
2191 tg3_phy_set_wirespeed(tp);
2192 return 0;
2193}
2194
3a1e19d3
MC
2195#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2196#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2197#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2198 TG3_GPIO_MSG_NEED_VAUX)
2199#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2200 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2201 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2202 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2203 (TG3_GPIO_MSG_DRVR_PRES << 12))
2204
2205#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2206 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2207 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2208 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2209 (TG3_GPIO_MSG_NEED_VAUX << 12))
2210
2211static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2212{
2213 u32 status, shift;
2214
2215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2217 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2218 else
2219 status = tr32(TG3_CPMU_DRV_STATUS);
2220
2221 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2222 status &= ~(TG3_GPIO_MSG_MASK << shift);
2223 status |= (newstat << shift);
2224
2225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2227 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2228 else
2229 tw32(TG3_CPMU_DRV_STATUS, status);
2230
2231 return status >> TG3_APE_GPIO_MSG_SHIFT;
2232}
2233
520b2756
MC
2234static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2235{
2236 if (!tg3_flag(tp, IS_NIC))
2237 return 0;
2238
3a1e19d3
MC
2239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2242 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2243 return -EIO;
520b2756 2244
3a1e19d3
MC
2245 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2246
2247 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2248 TG3_GRC_LCLCTL_PWRSW_DELAY);
2249
2250 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2251 } else {
2252 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2253 TG3_GRC_LCLCTL_PWRSW_DELAY);
2254 }
6f5c8f83 2255
520b2756
MC
2256 return 0;
2257}
2258
2259static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2260{
2261 u32 grc_local_ctrl;
2262
2263 if (!tg3_flag(tp, IS_NIC) ||
2264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2266 return;
2267
2268 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2269
2270 tw32_wait_f(GRC_LOCAL_CTRL,
2271 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2272 TG3_GRC_LCLCTL_PWRSW_DELAY);
2273
2274 tw32_wait_f(GRC_LOCAL_CTRL,
2275 grc_local_ctrl,
2276 TG3_GRC_LCLCTL_PWRSW_DELAY);
2277
2278 tw32_wait_f(GRC_LOCAL_CTRL,
2279 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280 TG3_GRC_LCLCTL_PWRSW_DELAY);
2281}
2282
2283static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2284{
2285 if (!tg3_flag(tp, IS_NIC))
2286 return;
2287
2288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2290 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2291 (GRC_LCLCTRL_GPIO_OE0 |
2292 GRC_LCLCTRL_GPIO_OE1 |
2293 GRC_LCLCTRL_GPIO_OE2 |
2294 GRC_LCLCTRL_GPIO_OUTPUT0 |
2295 GRC_LCLCTRL_GPIO_OUTPUT1),
2296 TG3_GRC_LCLCTL_PWRSW_DELAY);
2297 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2298 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2299 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2300 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2301 GRC_LCLCTRL_GPIO_OE1 |
2302 GRC_LCLCTRL_GPIO_OE2 |
2303 GRC_LCLCTRL_GPIO_OUTPUT0 |
2304 GRC_LCLCTRL_GPIO_OUTPUT1 |
2305 tp->grc_local_ctrl;
2306 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2307 TG3_GRC_LCLCTL_PWRSW_DELAY);
2308
2309 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2310 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2311 TG3_GRC_LCLCTL_PWRSW_DELAY);
2312
2313 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2314 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315 TG3_GRC_LCLCTL_PWRSW_DELAY);
2316 } else {
2317 u32 no_gpio2;
2318 u32 grc_local_ctrl = 0;
2319
2320 /* Workaround to prevent overdrawing Amps. */
2321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2322 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2323 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2324 grc_local_ctrl,
2325 TG3_GRC_LCLCTL_PWRSW_DELAY);
2326 }
2327
2328 /* On 5753 and variants, GPIO2 cannot be used. */
2329 no_gpio2 = tp->nic_sram_data_cfg &
2330 NIC_SRAM_DATA_CFG_NO_GPIO2;
2331
2332 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2333 GRC_LCLCTRL_GPIO_OE1 |
2334 GRC_LCLCTRL_GPIO_OE2 |
2335 GRC_LCLCTRL_GPIO_OUTPUT1 |
2336 GRC_LCLCTRL_GPIO_OUTPUT2;
2337 if (no_gpio2) {
2338 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2339 GRC_LCLCTRL_GPIO_OUTPUT2);
2340 }
2341 tw32_wait_f(GRC_LOCAL_CTRL,
2342 tp->grc_local_ctrl | grc_local_ctrl,
2343 TG3_GRC_LCLCTL_PWRSW_DELAY);
2344
2345 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2346
2347 tw32_wait_f(GRC_LOCAL_CTRL,
2348 tp->grc_local_ctrl | grc_local_ctrl,
2349 TG3_GRC_LCLCTL_PWRSW_DELAY);
2350
2351 if (!no_gpio2) {
2352 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2353 tw32_wait_f(GRC_LOCAL_CTRL,
2354 tp->grc_local_ctrl | grc_local_ctrl,
2355 TG3_GRC_LCLCTL_PWRSW_DELAY);
2356 }
2357 }
3a1e19d3
MC
2358}
2359
2360static void tg3_frob_aux_power_5717(struct tg3 *tp)
2361{
2362 u32 msg = 0;
2363
2364 /* Serialize power state transitions */
2365 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2366 return;
2367
2368 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) ||
2369 tg3_flag(tp, WOL_ENABLE))
2370 msg = TG3_GPIO_MSG_NEED_VAUX;
2371
2372 msg = tg3_set_function_status(tp, msg);
2373
2374 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2375 goto done;
6f5c8f83 2376
3a1e19d3
MC
2377 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2378 tg3_pwrsrc_switch_to_vaux(tp);
2379 else
2380 tg3_pwrsrc_die_with_vmain(tp);
2381
2382done:
6f5c8f83 2383 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
520b2756
MC
2384}
2385
1da177e4
LT
2386static void tg3_frob_aux_power(struct tg3 *tp)
2387{
683644b7 2388 bool need_vaux = false;
1da177e4 2389
334355aa 2390 /* The GPIOs do something completely different on 57765. */
63c3a66f 2391 if (!tg3_flag(tp, IS_NIC) ||
334355aa 2392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1da177e4
LT
2393 return;
2394
3a1e19d3
MC
2395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2398 tg3_frob_aux_power_5717(tp);
2399 return;
2400 }
2401
2402 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
8c2dc7e1
MC
2403 struct net_device *dev_peer;
2404
2405 dev_peer = pci_get_drvdata(tp->pdev_peer);
683644b7 2406
bc1c7567 2407 /* remove_one() may have been run on the peer. */
683644b7
MC
2408 if (dev_peer) {
2409 struct tg3 *tp_peer = netdev_priv(dev_peer);
2410
63c3a66f 2411 if (tg3_flag(tp_peer, INIT_COMPLETE))
683644b7
MC
2412 return;
2413
63c3a66f
JP
2414 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2415 tg3_flag(tp_peer, ENABLE_ASF))
683644b7
MC
2416 need_vaux = true;
2417 }
1da177e4
LT
2418 }
2419
63c3a66f 2420 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
683644b7
MC
2421 need_vaux = true;
2422
520b2756
MC
2423 if (need_vaux)
2424 tg3_pwrsrc_switch_to_vaux(tp);
2425 else
2426 tg3_pwrsrc_die_with_vmain(tp);
1da177e4
LT
2427}
2428
e8f3f6ca
MC
2429static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2430{
2431 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2432 return 1;
79eb6904 2433 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
e8f3f6ca
MC
2434 if (speed != SPEED_10)
2435 return 1;
2436 } else if (speed == SPEED_10)
2437 return 1;
2438
2439 return 0;
2440}
2441
1da177e4
LT
2442static int tg3_setup_phy(struct tg3 *, int);
2443
2444#define RESET_KIND_SHUTDOWN 0
2445#define RESET_KIND_INIT 1
2446#define RESET_KIND_SUSPEND 2
2447
2448static void tg3_write_sig_post_reset(struct tg3 *, int);
2449static int tg3_halt_cpu(struct tg3 *, u32);
2450
0a459aac 2451static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
15c3b696 2452{
ce057f01
MC
2453 u32 val;
2454
f07e9af3 2455 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
5129724a
MC
2456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2457 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2458 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2459
2460 sg_dig_ctrl |=
2461 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2462 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2463 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2464 }
3f7045c1 2465 return;
5129724a 2466 }
3f7045c1 2467
60189ddf 2468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2469 tg3_bmcr_reset(tp);
2470 val = tr32(GRC_MISC_CFG);
2471 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2472 udelay(40);
2473 return;
f07e9af3 2474 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
0e5f784c
MC
2475 u32 phytest;
2476 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2477 u32 phy;
2478
2479 tg3_writephy(tp, MII_ADVERTISE, 0);
2480 tg3_writephy(tp, MII_BMCR,
2481 BMCR_ANENABLE | BMCR_ANRESTART);
2482
2483 tg3_writephy(tp, MII_TG3_FET_TEST,
2484 phytest | MII_TG3_FET_SHADOW_EN);
2485 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2486 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2487 tg3_writephy(tp,
2488 MII_TG3_FET_SHDW_AUXMODE4,
2489 phy);
2490 }
2491 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2492 }
2493 return;
0a459aac 2494 } else if (do_low_power) {
715116a1
MC
2495 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2496 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
0a459aac 2497
b4bd2929
MC
2498 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2499 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2500 MII_TG3_AUXCTL_PCTL_VREG_11V;
2501 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
715116a1 2502 }
3f7045c1 2503
15c3b696
MC
2504 /* The PHY should not be powered down on some chips because
2505 * of bugs.
2506 */
2507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2509 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
f07e9af3 2510 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
15c3b696 2511 return;
ce057f01 2512
bcb37f6c
MC
2513 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2514 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2515 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2516 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2517 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2518 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2519 }
2520
15c3b696
MC
2521 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2522}
2523
ffbcfed4
MC
2524/* tp->lock is held. */
2525static int tg3_nvram_lock(struct tg3 *tp)
2526{
63c3a66f 2527 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2528 int i;
2529
2530 if (tp->nvram_lock_cnt == 0) {
2531 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2532 for (i = 0; i < 8000; i++) {
2533 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2534 break;
2535 udelay(20);
2536 }
2537 if (i == 8000) {
2538 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2539 return -ENODEV;
2540 }
2541 }
2542 tp->nvram_lock_cnt++;
2543 }
2544 return 0;
2545}
2546
2547/* tp->lock is held. */
2548static void tg3_nvram_unlock(struct tg3 *tp)
2549{
63c3a66f 2550 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2551 if (tp->nvram_lock_cnt > 0)
2552 tp->nvram_lock_cnt--;
2553 if (tp->nvram_lock_cnt == 0)
2554 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2555 }
2556}
2557
2558/* tp->lock is held. */
2559static void tg3_enable_nvram_access(struct tg3 *tp)
2560{
63c3a66f 2561 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2562 u32 nvaccess = tr32(NVRAM_ACCESS);
2563
2564 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2565 }
2566}
2567
2568/* tp->lock is held. */
2569static void tg3_disable_nvram_access(struct tg3 *tp)
2570{
63c3a66f 2571 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2572 u32 nvaccess = tr32(NVRAM_ACCESS);
2573
2574 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2575 }
2576}
2577
2578static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2579 u32 offset, u32 *val)
2580{
2581 u32 tmp;
2582 int i;
2583
2584 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2585 return -EINVAL;
2586
2587 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2588 EEPROM_ADDR_DEVID_MASK |
2589 EEPROM_ADDR_READ);
2590 tw32(GRC_EEPROM_ADDR,
2591 tmp |
2592 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2593 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2594 EEPROM_ADDR_ADDR_MASK) |
2595 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2596
2597 for (i = 0; i < 1000; i++) {
2598 tmp = tr32(GRC_EEPROM_ADDR);
2599
2600 if (tmp & EEPROM_ADDR_COMPLETE)
2601 break;
2602 msleep(1);
2603 }
2604 if (!(tmp & EEPROM_ADDR_COMPLETE))
2605 return -EBUSY;
2606
62cedd11
MC
2607 tmp = tr32(GRC_EEPROM_DATA);
2608
2609 /*
2610 * The data will always be opposite the native endian
2611 * format. Perform a blind byteswap to compensate.
2612 */
2613 *val = swab32(tmp);
2614
ffbcfed4
MC
2615 return 0;
2616}
2617
2618#define NVRAM_CMD_TIMEOUT 10000
2619
2620static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2621{
2622 int i;
2623
2624 tw32(NVRAM_CMD, nvram_cmd);
2625 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2626 udelay(10);
2627 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2628 udelay(10);
2629 break;
2630 }
2631 }
2632
2633 if (i == NVRAM_CMD_TIMEOUT)
2634 return -EBUSY;
2635
2636 return 0;
2637}
2638
2639static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2640{
63c3a66f
JP
2641 if (tg3_flag(tp, NVRAM) &&
2642 tg3_flag(tp, NVRAM_BUFFERED) &&
2643 tg3_flag(tp, FLASH) &&
2644 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
2645 (tp->nvram_jedecnum == JEDEC_ATMEL))
2646
2647 addr = ((addr / tp->nvram_pagesize) <<
2648 ATMEL_AT45DB0X1B_PAGE_POS) +
2649 (addr % tp->nvram_pagesize);
2650
2651 return addr;
2652}
2653
2654static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2655{
63c3a66f
JP
2656 if (tg3_flag(tp, NVRAM) &&
2657 tg3_flag(tp, NVRAM_BUFFERED) &&
2658 tg3_flag(tp, FLASH) &&
2659 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
2660 (tp->nvram_jedecnum == JEDEC_ATMEL))
2661
2662 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2663 tp->nvram_pagesize) +
2664 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2665
2666 return addr;
2667}
2668
e4f34110
MC
2669/* NOTE: Data read in from NVRAM is byteswapped according to
2670 * the byteswapping settings for all other register accesses.
2671 * tg3 devices are BE devices, so on a BE machine, the data
2672 * returned will be exactly as it is seen in NVRAM. On a LE
2673 * machine, the 32-bit value will be byteswapped.
2674 */
ffbcfed4
MC
2675static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2676{
2677 int ret;
2678
63c3a66f 2679 if (!tg3_flag(tp, NVRAM))
ffbcfed4
MC
2680 return tg3_nvram_read_using_eeprom(tp, offset, val);
2681
2682 offset = tg3_nvram_phys_addr(tp, offset);
2683
2684 if (offset > NVRAM_ADDR_MSK)
2685 return -EINVAL;
2686
2687 ret = tg3_nvram_lock(tp);
2688 if (ret)
2689 return ret;
2690
2691 tg3_enable_nvram_access(tp);
2692
2693 tw32(NVRAM_ADDR, offset);
2694 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2695 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2696
2697 if (ret == 0)
e4f34110 2698 *val = tr32(NVRAM_RDDATA);
ffbcfed4
MC
2699
2700 tg3_disable_nvram_access(tp);
2701
2702 tg3_nvram_unlock(tp);
2703
2704 return ret;
2705}
2706
a9dc529d
MC
2707/* Ensures NVRAM data is in bytestream format. */
2708static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
ffbcfed4
MC
2709{
2710 u32 v;
a9dc529d 2711 int res = tg3_nvram_read(tp, offset, &v);
ffbcfed4 2712 if (!res)
a9dc529d 2713 *val = cpu_to_be32(v);
ffbcfed4
MC
2714 return res;
2715}
2716
3f007891
MC
2717/* tp->lock is held. */
2718static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2719{
2720 u32 addr_high, addr_low;
2721 int i;
2722
2723 addr_high = ((tp->dev->dev_addr[0] << 8) |
2724 tp->dev->dev_addr[1]);
2725 addr_low = ((tp->dev->dev_addr[2] << 24) |
2726 (tp->dev->dev_addr[3] << 16) |
2727 (tp->dev->dev_addr[4] << 8) |
2728 (tp->dev->dev_addr[5] << 0));
2729 for (i = 0; i < 4; i++) {
2730 if (i == 1 && skip_mac_1)
2731 continue;
2732 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2733 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2734 }
2735
2736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2738 for (i = 0; i < 12; i++) {
2739 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2740 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2741 }
2742 }
2743
2744 addr_high = (tp->dev->dev_addr[0] +
2745 tp->dev->dev_addr[1] +
2746 tp->dev->dev_addr[2] +
2747 tp->dev->dev_addr[3] +
2748 tp->dev->dev_addr[4] +
2749 tp->dev->dev_addr[5]) &
2750 TX_BACKOFF_SEED_MASK;
2751 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2752}
2753
c866b7ea 2754static void tg3_enable_register_access(struct tg3 *tp)
1da177e4 2755{
c866b7ea
RW
2756 /*
2757 * Make sure register accesses (indirect or otherwise) will function
2758 * correctly.
1da177e4
LT
2759 */
2760 pci_write_config_dword(tp->pdev,
c866b7ea
RW
2761 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2762}
1da177e4 2763
c866b7ea
RW
2764static int tg3_power_up(struct tg3 *tp)
2765{
bed9829f 2766 int err;
8c6bda1a 2767
bed9829f 2768 tg3_enable_register_access(tp);
1da177e4 2769
bed9829f
MC
2770 err = pci_set_power_state(tp->pdev, PCI_D0);
2771 if (!err) {
2772 /* Switch out of Vaux if it is a NIC */
2773 tg3_pwrsrc_switch_to_vmain(tp);
2774 } else {
2775 netdev_err(tp->dev, "Transition to D0 failed\n");
2776 }
1da177e4 2777
bed9829f 2778 return err;
c866b7ea 2779}
1da177e4 2780
c866b7ea
RW
2781static int tg3_power_down_prepare(struct tg3 *tp)
2782{
2783 u32 misc_host_ctrl;
2784 bool device_should_wake, do_low_power;
2785
2786 tg3_enable_register_access(tp);
5e7dfd0f
MC
2787
2788 /* Restore the CLKREQ setting. */
63c3a66f 2789 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
2790 u16 lnkctl;
2791
2792 pci_read_config_word(tp->pdev,
708ebb3a 2793 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
2794 &lnkctl);
2795 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2796 pci_write_config_word(tp->pdev,
708ebb3a 2797 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
2798 lnkctl);
2799 }
2800
1da177e4
LT
2801 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2802 tw32(TG3PCI_MISC_HOST_CTRL,
2803 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2804
c866b7ea 2805 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 2806 tg3_flag(tp, WOL_ENABLE);
05ac4cb7 2807
63c3a66f 2808 if (tg3_flag(tp, USE_PHYLIB)) {
0a459aac 2809 do_low_power = false;
f07e9af3 2810 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
80096068 2811 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
b02fd9e3 2812 struct phy_device *phydev;
0a459aac 2813 u32 phyid, advertising;
b02fd9e3 2814
3f0e3ad7 2815 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 2816
80096068 2817 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3
MC
2818
2819 tp->link_config.orig_speed = phydev->speed;
2820 tp->link_config.orig_duplex = phydev->duplex;
2821 tp->link_config.orig_autoneg = phydev->autoneg;
2822 tp->link_config.orig_advertising = phydev->advertising;
2823
2824 advertising = ADVERTISED_TP |
2825 ADVERTISED_Pause |
2826 ADVERTISED_Autoneg |
2827 ADVERTISED_10baseT_Half;
2828
63c3a66f
JP
2829 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2830 if (tg3_flag(tp, WOL_SPEED_100MB))
b02fd9e3
MC
2831 advertising |=
2832 ADVERTISED_100baseT_Half |
2833 ADVERTISED_100baseT_Full |
2834 ADVERTISED_10baseT_Full;
2835 else
2836 advertising |= ADVERTISED_10baseT_Full;
2837 }
2838
2839 phydev->advertising = advertising;
2840
2841 phy_start_aneg(phydev);
0a459aac
MC
2842
2843 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
6a443a0f
MC
2844 if (phyid != PHY_ID_BCMAC131) {
2845 phyid &= PHY_BCM_OUI_MASK;
2846 if (phyid == PHY_BCM_OUI_1 ||
2847 phyid == PHY_BCM_OUI_2 ||
2848 phyid == PHY_BCM_OUI_3)
0a459aac
MC
2849 do_low_power = true;
2850 }
b02fd9e3 2851 }
dd477003 2852 } else {
2023276e 2853 do_low_power = true;
0a459aac 2854
80096068
MC
2855 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2856 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
dd477003
MC
2857 tp->link_config.orig_speed = tp->link_config.speed;
2858 tp->link_config.orig_duplex = tp->link_config.duplex;
2859 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2860 }
1da177e4 2861
f07e9af3 2862 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
dd477003
MC
2863 tp->link_config.speed = SPEED_10;
2864 tp->link_config.duplex = DUPLEX_HALF;
2865 tp->link_config.autoneg = AUTONEG_ENABLE;
2866 tg3_setup_phy(tp, 0);
2867 }
1da177e4
LT
2868 }
2869
b5d3772c
MC
2870 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2871 u32 val;
2872
2873 val = tr32(GRC_VCPU_EXT_CTRL);
2874 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
63c3a66f 2875 } else if (!tg3_flag(tp, ENABLE_ASF)) {
6921d201
MC
2876 int i;
2877 u32 val;
2878
2879 for (i = 0; i < 200; i++) {
2880 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2881 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2882 break;
2883 msleep(1);
2884 }
2885 }
63c3a66f 2886 if (tg3_flag(tp, WOL_CAP))
a85feb8c
GZ
2887 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2888 WOL_DRV_STATE_SHUTDOWN |
2889 WOL_DRV_WOL |
2890 WOL_SET_MAGIC_PKT);
6921d201 2891
05ac4cb7 2892 if (device_should_wake) {
1da177e4
LT
2893 u32 mac_mode;
2894
f07e9af3 2895 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
b4bd2929
MC
2896 if (do_low_power &&
2897 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2898 tg3_phy_auxctl_write(tp,
2899 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2900 MII_TG3_AUXCTL_PCTL_WOL_EN |
2901 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2902 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
dd477003
MC
2903 udelay(40);
2904 }
1da177e4 2905
f07e9af3 2906 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3f7045c1
MC
2907 mac_mode = MAC_MODE_PORT_MODE_GMII;
2908 else
2909 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 2910
e8f3f6ca
MC
2911 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2912 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2913 ASIC_REV_5700) {
63c3a66f 2914 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
e8f3f6ca
MC
2915 SPEED_100 : SPEED_10;
2916 if (tg3_5700_link_polarity(tp, speed))
2917 mac_mode |= MAC_MODE_LINK_POLARITY;
2918 else
2919 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2920 }
1da177e4
LT
2921 } else {
2922 mac_mode = MAC_MODE_PORT_MODE_TBI;
2923 }
2924
63c3a66f 2925 if (!tg3_flag(tp, 5750_PLUS))
1da177e4
LT
2926 tw32(MAC_LED_CTRL, tp->led_ctrl);
2927
05ac4cb7 2928 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
63c3a66f
JP
2929 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2930 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
05ac4cb7 2931 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
1da177e4 2932
63c3a66f 2933 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
2934 mac_mode |= MAC_MODE_APE_TX_EN |
2935 MAC_MODE_APE_RX_EN |
2936 MAC_MODE_TDE_ENABLE;
3bda1258 2937
1da177e4
LT
2938 tw32_f(MAC_MODE, mac_mode);
2939 udelay(100);
2940
2941 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2942 udelay(10);
2943 }
2944
63c3a66f 2945 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
1da177e4
LT
2946 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2948 u32 base_val;
2949
2950 base_val = tp->pci_clock_ctrl;
2951 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2952 CLOCK_CTRL_TXCLK_DISABLE);
2953
b401e9e2
MC
2954 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2955 CLOCK_CTRL_PWRDOWN_PLL133, 40);
63c3a66f
JP
2956 } else if (tg3_flag(tp, 5780_CLASS) ||
2957 tg3_flag(tp, CPMU_PRESENT) ||
6ff6f81d 2958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4cf78e4f 2959 /* do nothing */
63c3a66f 2960 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
1da177e4
LT
2961 u32 newbits1, newbits2;
2962
2963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2965 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2966 CLOCK_CTRL_TXCLK_DISABLE |
2967 CLOCK_CTRL_ALTCLK);
2968 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
63c3a66f 2969 } else if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
2970 newbits1 = CLOCK_CTRL_625_CORE;
2971 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2972 } else {
2973 newbits1 = CLOCK_CTRL_ALTCLK;
2974 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2975 }
2976
b401e9e2
MC
2977 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2978 40);
1da177e4 2979
b401e9e2
MC
2980 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2981 40);
1da177e4 2982
63c3a66f 2983 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
2984 u32 newbits3;
2985
2986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2988 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2989 CLOCK_CTRL_TXCLK_DISABLE |
2990 CLOCK_CTRL_44MHZ_CORE);
2991 } else {
2992 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2993 }
2994
b401e9e2
MC
2995 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2996 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
2997 }
2998 }
2999
63c3a66f 3000 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
0a459aac 3001 tg3_power_down_phy(tp, do_low_power);
6921d201 3002
1da177e4
LT
3003 tg3_frob_aux_power(tp);
3004
3005 /* Workaround for unstable PLL clock */
3006 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3007 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3008 u32 val = tr32(0x7d00);
3009
3010 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3011 tw32(0x7d00, val);
63c3a66f 3012 if (!tg3_flag(tp, ENABLE_ASF)) {
ec41c7df
MC
3013 int err;
3014
3015 err = tg3_nvram_lock(tp);
1da177e4 3016 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
3017 if (!err)
3018 tg3_nvram_unlock(tp);
6921d201 3019 }
1da177e4
LT
3020 }
3021
bbadf503
MC
3022 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3023
c866b7ea
RW
3024 return 0;
3025}
12dac075 3026
c866b7ea
RW
3027static void tg3_power_down(struct tg3 *tp)
3028{
3029 tg3_power_down_prepare(tp);
1da177e4 3030
63c3a66f 3031 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
c866b7ea 3032 pci_set_power_state(tp->pdev, PCI_D3hot);
1da177e4
LT
3033}
3034
1da177e4
LT
3035static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3036{
3037 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3038 case MII_TG3_AUX_STAT_10HALF:
3039 *speed = SPEED_10;
3040 *duplex = DUPLEX_HALF;
3041 break;
3042
3043 case MII_TG3_AUX_STAT_10FULL:
3044 *speed = SPEED_10;
3045 *duplex = DUPLEX_FULL;
3046 break;
3047
3048 case MII_TG3_AUX_STAT_100HALF:
3049 *speed = SPEED_100;
3050 *duplex = DUPLEX_HALF;
3051 break;
3052
3053 case MII_TG3_AUX_STAT_100FULL:
3054 *speed = SPEED_100;
3055 *duplex = DUPLEX_FULL;
3056 break;
3057
3058 case MII_TG3_AUX_STAT_1000HALF:
3059 *speed = SPEED_1000;
3060 *duplex = DUPLEX_HALF;
3061 break;
3062
3063 case MII_TG3_AUX_STAT_1000FULL:
3064 *speed = SPEED_1000;
3065 *duplex = DUPLEX_FULL;
3066 break;
3067
3068 default:
f07e9af3 3069 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
715116a1
MC
3070 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3071 SPEED_10;
3072 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3073 DUPLEX_HALF;
3074 break;
3075 }
1da177e4
LT
3076 *speed = SPEED_INVALID;
3077 *duplex = DUPLEX_INVALID;
3078 break;
855e1111 3079 }
1da177e4
LT
3080}
3081
42b64a45 3082static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1da177e4 3083{
42b64a45
MC
3084 int err = 0;
3085 u32 val, new_adv;
1da177e4 3086
42b64a45
MC
3087 new_adv = ADVERTISE_CSMA;
3088 if (advertise & ADVERTISED_10baseT_Half)
3089 new_adv |= ADVERTISE_10HALF;
3090 if (advertise & ADVERTISED_10baseT_Full)
3091 new_adv |= ADVERTISE_10FULL;
3092 if (advertise & ADVERTISED_100baseT_Half)
3093 new_adv |= ADVERTISE_100HALF;
3094 if (advertise & ADVERTISED_100baseT_Full)
3095 new_adv |= ADVERTISE_100FULL;
1da177e4 3096
42b64a45 3097 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
1da177e4 3098
42b64a45
MC
3099 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3100 if (err)
3101 goto done;
ba4d07a8 3102
42b64a45
MC
3103 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3104 goto done;
1da177e4 3105
42b64a45
MC
3106 new_adv = 0;
3107 if (advertise & ADVERTISED_1000baseT_Half)
221c5637 3108 new_adv |= ADVERTISE_1000HALF;
42b64a45 3109 if (advertise & ADVERTISED_1000baseT_Full)
221c5637 3110 new_adv |= ADVERTISE_1000FULL;
ba4d07a8 3111
42b64a45
MC
3112 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3113 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
221c5637 3114 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
ba4d07a8 3115
221c5637 3116 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
42b64a45
MC
3117 if (err)
3118 goto done;
1da177e4 3119
42b64a45
MC
3120 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3121 goto done;
52b02d04 3122
42b64a45
MC
3123 tw32(TG3_CPMU_EEE_MODE,
3124 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
52b02d04 3125
42b64a45
MC
3126 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3127 if (!err) {
3128 u32 err2;
52b02d04 3129
21a00ab2
MC
3130 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3131 case ASIC_REV_5717:
3132 case ASIC_REV_57765:
3133 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3134 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3135 MII_TG3_DSP_CH34TP2_HIBW01);
3136 /* Fall through */
3137 case ASIC_REV_5719:
3138 val = MII_TG3_DSP_TAP26_ALNOKO |
3139 MII_TG3_DSP_TAP26_RMRXSTO |
3140 MII_TG3_DSP_TAP26_OPCSINPT;
3141 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3142 }
52b02d04 3143
a6b68dab 3144 val = 0;
42b64a45
MC
3145 /* Advertise 100-BaseTX EEE ability */
3146 if (advertise & ADVERTISED_100baseT_Full)
3147 val |= MDIO_AN_EEE_ADV_100TX;
3148 /* Advertise 1000-BaseT EEE ability */
3149 if (advertise & ADVERTISED_1000baseT_Full)
3150 val |= MDIO_AN_EEE_ADV_1000T;
3151 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3152
3153 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3154 if (!err)
3155 err = err2;
3156 }
3157
3158done:
3159 return err;
3160}
3161
3162static void tg3_phy_copper_begin(struct tg3 *tp)
3163{
3164 u32 new_adv;
3165 int i;
3166
3167 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3168 new_adv = ADVERTISED_10baseT_Half |
3169 ADVERTISED_10baseT_Full;
3170 if (tg3_flag(tp, WOL_SPEED_100MB))
3171 new_adv |= ADVERTISED_100baseT_Half |
3172 ADVERTISED_100baseT_Full;
3173
3174 tg3_phy_autoneg_cfg(tp, new_adv,
3175 FLOW_CTRL_TX | FLOW_CTRL_RX);
3176 } else if (tp->link_config.speed == SPEED_INVALID) {
3177 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3178 tp->link_config.advertising &=
3179 ~(ADVERTISED_1000baseT_Half |
3180 ADVERTISED_1000baseT_Full);
3181
3182 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3183 tp->link_config.flowctrl);
3184 } else {
3185 /* Asking for a specific link mode. */
3186 if (tp->link_config.speed == SPEED_1000) {
3187 if (tp->link_config.duplex == DUPLEX_FULL)
3188 new_adv = ADVERTISED_1000baseT_Full;
3189 else
3190 new_adv = ADVERTISED_1000baseT_Half;
3191 } else if (tp->link_config.speed == SPEED_100) {
3192 if (tp->link_config.duplex == DUPLEX_FULL)
3193 new_adv = ADVERTISED_100baseT_Full;
3194 else
3195 new_adv = ADVERTISED_100baseT_Half;
3196 } else {
3197 if (tp->link_config.duplex == DUPLEX_FULL)
3198 new_adv = ADVERTISED_10baseT_Full;
3199 else
3200 new_adv = ADVERTISED_10baseT_Half;
52b02d04 3201 }
52b02d04 3202
42b64a45
MC
3203 tg3_phy_autoneg_cfg(tp, new_adv,
3204 tp->link_config.flowctrl);
52b02d04
MC
3205 }
3206
1da177e4
LT
3207 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3208 tp->link_config.speed != SPEED_INVALID) {
3209 u32 bmcr, orig_bmcr;
3210
3211 tp->link_config.active_speed = tp->link_config.speed;
3212 tp->link_config.active_duplex = tp->link_config.duplex;
3213
3214 bmcr = 0;
3215 switch (tp->link_config.speed) {
3216 default:
3217 case SPEED_10:
3218 break;
3219
3220 case SPEED_100:
3221 bmcr |= BMCR_SPEED100;
3222 break;
3223
3224 case SPEED_1000:
221c5637 3225 bmcr |= BMCR_SPEED1000;
1da177e4 3226 break;
855e1111 3227 }
1da177e4
LT
3228
3229 if (tp->link_config.duplex == DUPLEX_FULL)
3230 bmcr |= BMCR_FULLDPLX;
3231
3232 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3233 (bmcr != orig_bmcr)) {
3234 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3235 for (i = 0; i < 1500; i++) {
3236 u32 tmp;
3237
3238 udelay(10);
3239 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3240 tg3_readphy(tp, MII_BMSR, &tmp))
3241 continue;
3242 if (!(tmp & BMSR_LSTATUS)) {
3243 udelay(40);
3244 break;
3245 }
3246 }
3247 tg3_writephy(tp, MII_BMCR, bmcr);
3248 udelay(40);
3249 }
3250 } else {
3251 tg3_writephy(tp, MII_BMCR,
3252 BMCR_ANENABLE | BMCR_ANRESTART);
3253 }
3254}
3255
3256static int tg3_init_5401phy_dsp(struct tg3 *tp)
3257{
3258 int err;
3259
3260 /* Turn off tap power management. */
3261 /* Set Extended packet length bit */
b4bd2929 3262 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1da177e4 3263
6ee7c0a0
MC
3264 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3265 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3266 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3267 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3268 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
1da177e4
LT
3269
3270 udelay(40);
3271
3272 return err;
3273}
3274
3600d918 3275static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 3276{
3600d918
MC
3277 u32 adv_reg, all_mask = 0;
3278
3279 if (mask & ADVERTISED_10baseT_Half)
3280 all_mask |= ADVERTISE_10HALF;
3281 if (mask & ADVERTISED_10baseT_Full)
3282 all_mask |= ADVERTISE_10FULL;
3283 if (mask & ADVERTISED_100baseT_Half)
3284 all_mask |= ADVERTISE_100HALF;
3285 if (mask & ADVERTISED_100baseT_Full)
3286 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
3287
3288 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3289 return 0;
3290
1da177e4
LT
3291 if ((adv_reg & all_mask) != all_mask)
3292 return 0;
f07e9af3 3293 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1da177e4
LT
3294 u32 tg3_ctrl;
3295
3600d918
MC
3296 all_mask = 0;
3297 if (mask & ADVERTISED_1000baseT_Half)
3298 all_mask |= ADVERTISE_1000HALF;
3299 if (mask & ADVERTISED_1000baseT_Full)
3300 all_mask |= ADVERTISE_1000FULL;
3301
221c5637 3302 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
1da177e4
LT
3303 return 0;
3304
1da177e4
LT
3305 if ((tg3_ctrl & all_mask) != all_mask)
3306 return 0;
3307 }
3308 return 1;
3309}
3310
ef167e27
MC
3311static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3312{
3313 u32 curadv, reqadv;
3314
3315 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3316 return 1;
3317
3318 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3319 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3320
3321 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3322 if (curadv != reqadv)
3323 return 0;
3324
63c3a66f 3325 if (tg3_flag(tp, PAUSE_AUTONEG))
ef167e27
MC
3326 tg3_readphy(tp, MII_LPA, rmtadv);
3327 } else {
3328 /* Reprogram the advertisement register, even if it
3329 * does not affect the current link. If the link
3330 * gets renegotiated in the future, we can save an
3331 * additional renegotiation cycle by advertising
3332 * it correctly in the first place.
3333 */
3334 if (curadv != reqadv) {
3335 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3336 ADVERTISE_PAUSE_ASYM);
3337 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3338 }
3339 }
3340
3341 return 1;
3342}
3343
1da177e4
LT
3344static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3345{
3346 int current_link_up;
f833c4c1 3347 u32 bmsr, val;
ef167e27 3348 u32 lcl_adv, rmt_adv;
1da177e4
LT
3349 u16 current_speed;
3350 u8 current_duplex;
3351 int i, err;
3352
3353 tw32(MAC_EVENT, 0);
3354
3355 tw32_f(MAC_STATUS,
3356 (MAC_STATUS_SYNC_CHANGED |
3357 MAC_STATUS_CFG_CHANGED |
3358 MAC_STATUS_MI_COMPLETION |
3359 MAC_STATUS_LNKSTATE_CHANGED));
3360 udelay(40);
3361
8ef21428
MC
3362 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3363 tw32_f(MAC_MI_MODE,
3364 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3365 udelay(80);
3366 }
1da177e4 3367
b4bd2929 3368 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
1da177e4
LT
3369
3370 /* Some third-party PHYs need to be reset on link going
3371 * down.
3372 */
3373 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3374 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3376 netif_carrier_ok(tp->dev)) {
3377 tg3_readphy(tp, MII_BMSR, &bmsr);
3378 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3379 !(bmsr & BMSR_LSTATUS))
3380 force_reset = 1;
3381 }
3382 if (force_reset)
3383 tg3_phy_reset(tp);
3384
79eb6904 3385 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
3386 tg3_readphy(tp, MII_BMSR, &bmsr);
3387 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
63c3a66f 3388 !tg3_flag(tp, INIT_COMPLETE))
1da177e4
LT
3389 bmsr = 0;
3390
3391 if (!(bmsr & BMSR_LSTATUS)) {
3392 err = tg3_init_5401phy_dsp(tp);
3393 if (err)
3394 return err;
3395
3396 tg3_readphy(tp, MII_BMSR, &bmsr);
3397 for (i = 0; i < 1000; i++) {
3398 udelay(10);
3399 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3400 (bmsr & BMSR_LSTATUS)) {
3401 udelay(40);
3402 break;
3403 }
3404 }
3405
79eb6904
MC
3406 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3407 TG3_PHY_REV_BCM5401_B0 &&
1da177e4
LT
3408 !(bmsr & BMSR_LSTATUS) &&
3409 tp->link_config.active_speed == SPEED_1000) {
3410 err = tg3_phy_reset(tp);
3411 if (!err)
3412 err = tg3_init_5401phy_dsp(tp);
3413 if (err)
3414 return err;
3415 }
3416 }
3417 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3418 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3419 /* 5701 {A0,B0} CRC bug workaround */
3420 tg3_writephy(tp, 0x15, 0x0a75);
f08aa1a8
MC
3421 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3422 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3423 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
1da177e4
LT
3424 }
3425
3426 /* Clear pending interrupts... */
f833c4c1
MC
3427 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3428 tg3_readphy(tp, MII_TG3_ISTAT, &val);
1da177e4 3429
f07e9af3 3430 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
1da177e4 3431 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
f07e9af3 3432 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
1da177e4
LT
3433 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3434
3435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3437 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3438 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3439 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3440 else
3441 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3442 }
3443
3444 current_link_up = 0;
3445 current_speed = SPEED_INVALID;
3446 current_duplex = DUPLEX_INVALID;
3447
f07e9af3 3448 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
15ee95c3
MC
3449 err = tg3_phy_auxctl_read(tp,
3450 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3451 &val);
3452 if (!err && !(val & (1 << 10))) {
b4bd2929
MC
3453 tg3_phy_auxctl_write(tp,
3454 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3455 val | (1 << 10));
1da177e4
LT
3456 goto relink;
3457 }
3458 }
3459
3460 bmsr = 0;
3461 for (i = 0; i < 100; i++) {
3462 tg3_readphy(tp, MII_BMSR, &bmsr);
3463 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3464 (bmsr & BMSR_LSTATUS))
3465 break;
3466 udelay(40);
3467 }
3468
3469 if (bmsr & BMSR_LSTATUS) {
3470 u32 aux_stat, bmcr;
3471
3472 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3473 for (i = 0; i < 2000; i++) {
3474 udelay(10);
3475 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3476 aux_stat)
3477 break;
3478 }
3479
3480 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3481 &current_speed,
3482 &current_duplex);
3483
3484 bmcr = 0;
3485 for (i = 0; i < 200; i++) {
3486 tg3_readphy(tp, MII_BMCR, &bmcr);
3487 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3488 continue;
3489 if (bmcr && bmcr != 0x7fff)
3490 break;
3491 udelay(10);
3492 }
3493
ef167e27
MC
3494 lcl_adv = 0;
3495 rmt_adv = 0;
1da177e4 3496
ef167e27
MC
3497 tp->link_config.active_speed = current_speed;
3498 tp->link_config.active_duplex = current_duplex;
3499
3500 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3501 if ((bmcr & BMCR_ANENABLE) &&
3502 tg3_copper_is_advertising_all(tp,
3503 tp->link_config.advertising)) {
3504 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3505 &rmt_adv))
3506 current_link_up = 1;
1da177e4
LT
3507 }
3508 } else {
3509 if (!(bmcr & BMCR_ANENABLE) &&
3510 tp->link_config.speed == current_speed &&
ef167e27
MC
3511 tp->link_config.duplex == current_duplex &&
3512 tp->link_config.flowctrl ==
3513 tp->link_config.active_flowctrl) {
1da177e4 3514 current_link_up = 1;
1da177e4
LT
3515 }
3516 }
3517
ef167e27
MC
3518 if (current_link_up == 1 &&
3519 tp->link_config.active_duplex == DUPLEX_FULL)
3520 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1da177e4
LT
3521 }
3522
1da177e4 3523relink:
80096068 3524 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
1da177e4
LT
3525 tg3_phy_copper_begin(tp);
3526
f833c4c1 3527 tg3_readphy(tp, MII_BMSR, &bmsr);
06c03c02
MB
3528 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3529 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
1da177e4
LT
3530 current_link_up = 1;
3531 }
3532
3533 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3534 if (current_link_up == 1) {
3535 if (tp->link_config.active_speed == SPEED_100 ||
3536 tp->link_config.active_speed == SPEED_10)
3537 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3538 else
3539 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
f07e9af3 3540 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7f97a4bd
MC
3541 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3542 else
1da177e4
LT
3543 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3544
3545 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3546 if (tp->link_config.active_duplex == DUPLEX_HALF)
3547 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3548
1da177e4 3549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
3550 if (current_link_up == 1 &&
3551 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 3552 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
3553 else
3554 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
3555 }
3556
3557 /* ??? Without this setting Netgear GA302T PHY does not
3558 * ??? send/receive packets...
3559 */
79eb6904 3560 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
1da177e4
LT
3561 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3562 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3563 tw32_f(MAC_MI_MODE, tp->mi_mode);
3564 udelay(80);
3565 }
3566
3567 tw32_f(MAC_MODE, tp->mac_mode);
3568 udelay(40);
3569
52b02d04
MC
3570 tg3_phy_eee_adjust(tp, current_link_up);
3571
63c3a66f 3572 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
3573 /* Polled via timer. */
3574 tw32_f(MAC_EVENT, 0);
3575 } else {
3576 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3577 }
3578 udelay(40);
3579
3580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3581 current_link_up == 1 &&
3582 tp->link_config.active_speed == SPEED_1000 &&
63c3a66f 3583 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
1da177e4
LT
3584 udelay(120);
3585 tw32_f(MAC_STATUS,
3586 (MAC_STATUS_SYNC_CHANGED |
3587 MAC_STATUS_CFG_CHANGED));
3588 udelay(40);
3589 tg3_write_mem(tp,
3590 NIC_SRAM_FIRMWARE_MBOX,
3591 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3592 }
3593
5e7dfd0f 3594 /* Prevent send BD corruption. */
63c3a66f 3595 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
3596 u16 oldlnkctl, newlnkctl;
3597
3598 pci_read_config_word(tp->pdev,
708ebb3a 3599 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
3600 &oldlnkctl);
3601 if (tp->link_config.active_speed == SPEED_100 ||
3602 tp->link_config.active_speed == SPEED_10)
3603 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3604 else
3605 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3606 if (newlnkctl != oldlnkctl)
3607 pci_write_config_word(tp->pdev,
708ebb3a 3608 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
3609 newlnkctl);
3610 }
3611
1da177e4
LT
3612 if (current_link_up != netif_carrier_ok(tp->dev)) {
3613 if (current_link_up)
3614 netif_carrier_on(tp->dev);
3615 else
3616 netif_carrier_off(tp->dev);
3617 tg3_link_report(tp);
3618 }
3619
3620 return 0;
3621}
3622
3623struct tg3_fiber_aneginfo {
3624 int state;
3625#define ANEG_STATE_UNKNOWN 0
3626#define ANEG_STATE_AN_ENABLE 1
3627#define ANEG_STATE_RESTART_INIT 2
3628#define ANEG_STATE_RESTART 3
3629#define ANEG_STATE_DISABLE_LINK_OK 4
3630#define ANEG_STATE_ABILITY_DETECT_INIT 5
3631#define ANEG_STATE_ABILITY_DETECT 6
3632#define ANEG_STATE_ACK_DETECT_INIT 7
3633#define ANEG_STATE_ACK_DETECT 8
3634#define ANEG_STATE_COMPLETE_ACK_INIT 9
3635#define ANEG_STATE_COMPLETE_ACK 10
3636#define ANEG_STATE_IDLE_DETECT_INIT 11
3637#define ANEG_STATE_IDLE_DETECT 12
3638#define ANEG_STATE_LINK_OK 13
3639#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3640#define ANEG_STATE_NEXT_PAGE_WAIT 15
3641
3642 u32 flags;
3643#define MR_AN_ENABLE 0x00000001
3644#define MR_RESTART_AN 0x00000002
3645#define MR_AN_COMPLETE 0x00000004
3646#define MR_PAGE_RX 0x00000008
3647#define MR_NP_LOADED 0x00000010
3648#define MR_TOGGLE_TX 0x00000020
3649#define MR_LP_ADV_FULL_DUPLEX 0x00000040
3650#define MR_LP_ADV_HALF_DUPLEX 0x00000080
3651#define MR_LP_ADV_SYM_PAUSE 0x00000100
3652#define MR_LP_ADV_ASYM_PAUSE 0x00000200
3653#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3654#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3655#define MR_LP_ADV_NEXT_PAGE 0x00001000
3656#define MR_TOGGLE_RX 0x00002000
3657#define MR_NP_RX 0x00004000
3658
3659#define MR_LINK_OK 0x80000000
3660
3661 unsigned long link_time, cur_time;
3662
3663 u32 ability_match_cfg;
3664 int ability_match_count;
3665
3666 char ability_match, idle_match, ack_match;
3667
3668 u32 txconfig, rxconfig;
3669#define ANEG_CFG_NP 0x00000080
3670#define ANEG_CFG_ACK 0x00000040
3671#define ANEG_CFG_RF2 0x00000020
3672#define ANEG_CFG_RF1 0x00000010
3673#define ANEG_CFG_PS2 0x00000001
3674#define ANEG_CFG_PS1 0x00008000
3675#define ANEG_CFG_HD 0x00004000
3676#define ANEG_CFG_FD 0x00002000
3677#define ANEG_CFG_INVAL 0x00001f06
3678
3679};
3680#define ANEG_OK 0
3681#define ANEG_DONE 1
3682#define ANEG_TIMER_ENAB 2
3683#define ANEG_FAILED -1
3684
3685#define ANEG_STATE_SETTLE_TIME 10000
3686
3687static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3688 struct tg3_fiber_aneginfo *ap)
3689{
5be73b47 3690 u16 flowctrl;
1da177e4
LT
3691 unsigned long delta;
3692 u32 rx_cfg_reg;
3693 int ret;
3694
3695 if (ap->state == ANEG_STATE_UNKNOWN) {
3696 ap->rxconfig = 0;
3697 ap->link_time = 0;
3698 ap->cur_time = 0;
3699 ap->ability_match_cfg = 0;
3700 ap->ability_match_count = 0;
3701 ap->ability_match = 0;
3702 ap->idle_match = 0;
3703 ap->ack_match = 0;
3704 }
3705 ap->cur_time++;
3706
3707 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3708 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3709
3710 if (rx_cfg_reg != ap->ability_match_cfg) {
3711 ap->ability_match_cfg = rx_cfg_reg;
3712 ap->ability_match = 0;
3713 ap->ability_match_count = 0;
3714 } else {
3715 if (++ap->ability_match_count > 1) {
3716 ap->ability_match = 1;
3717 ap->ability_match_cfg = rx_cfg_reg;
3718 }
3719 }
3720 if (rx_cfg_reg & ANEG_CFG_ACK)
3721 ap->ack_match = 1;
3722 else
3723 ap->ack_match = 0;
3724
3725 ap->idle_match = 0;
3726 } else {
3727 ap->idle_match = 1;
3728 ap->ability_match_cfg = 0;
3729 ap->ability_match_count = 0;
3730 ap->ability_match = 0;
3731 ap->ack_match = 0;
3732
3733 rx_cfg_reg = 0;
3734 }
3735
3736 ap->rxconfig = rx_cfg_reg;
3737 ret = ANEG_OK;
3738
33f401ae 3739 switch (ap->state) {
1da177e4
LT
3740 case ANEG_STATE_UNKNOWN:
3741 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3742 ap->state = ANEG_STATE_AN_ENABLE;
3743
3744 /* fallthru */
3745 case ANEG_STATE_AN_ENABLE:
3746 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3747 if (ap->flags & MR_AN_ENABLE) {
3748 ap->link_time = 0;
3749 ap->cur_time = 0;
3750 ap->ability_match_cfg = 0;
3751 ap->ability_match_count = 0;
3752 ap->ability_match = 0;
3753 ap->idle_match = 0;
3754 ap->ack_match = 0;
3755
3756 ap->state = ANEG_STATE_RESTART_INIT;
3757 } else {
3758 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3759 }
3760 break;
3761
3762 case ANEG_STATE_RESTART_INIT:
3763 ap->link_time = ap->cur_time;
3764 ap->flags &= ~(MR_NP_LOADED);
3765 ap->txconfig = 0;
3766 tw32(MAC_TX_AUTO_NEG, 0);
3767 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3768 tw32_f(MAC_MODE, tp->mac_mode);
3769 udelay(40);
3770
3771 ret = ANEG_TIMER_ENAB;
3772 ap->state = ANEG_STATE_RESTART;
3773
3774 /* fallthru */
3775 case ANEG_STATE_RESTART:
3776 delta = ap->cur_time - ap->link_time;
859a5887 3777 if (delta > ANEG_STATE_SETTLE_TIME)
1da177e4 3778 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
859a5887 3779 else
1da177e4 3780 ret = ANEG_TIMER_ENAB;
1da177e4
LT
3781 break;
3782
3783 case ANEG_STATE_DISABLE_LINK_OK:
3784 ret = ANEG_DONE;
3785 break;
3786
3787 case ANEG_STATE_ABILITY_DETECT_INIT:
3788 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
3789 ap->txconfig = ANEG_CFG_FD;
3790 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3791 if (flowctrl & ADVERTISE_1000XPAUSE)
3792 ap->txconfig |= ANEG_CFG_PS1;
3793 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3794 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
3795 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3796 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3797 tw32_f(MAC_MODE, tp->mac_mode);
3798 udelay(40);
3799
3800 ap->state = ANEG_STATE_ABILITY_DETECT;
3801 break;
3802
3803 case ANEG_STATE_ABILITY_DETECT:
859a5887 3804 if (ap->ability_match != 0 && ap->rxconfig != 0)
1da177e4 3805 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1da177e4
LT
3806 break;
3807
3808 case ANEG_STATE_ACK_DETECT_INIT:
3809 ap->txconfig |= ANEG_CFG_ACK;
3810 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3811 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3812 tw32_f(MAC_MODE, tp->mac_mode);
3813 udelay(40);
3814
3815 ap->state = ANEG_STATE_ACK_DETECT;
3816
3817 /* fallthru */
3818 case ANEG_STATE_ACK_DETECT:
3819 if (ap->ack_match != 0) {
3820 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3821 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3822 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3823 } else {
3824 ap->state = ANEG_STATE_AN_ENABLE;
3825 }
3826 } else if (ap->ability_match != 0 &&
3827 ap->rxconfig == 0) {
3828 ap->state = ANEG_STATE_AN_ENABLE;
3829 }
3830 break;
3831
3832 case ANEG_STATE_COMPLETE_ACK_INIT:
3833 if (ap->rxconfig & ANEG_CFG_INVAL) {
3834 ret = ANEG_FAILED;
3835 break;
3836 }
3837 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3838 MR_LP_ADV_HALF_DUPLEX |
3839 MR_LP_ADV_SYM_PAUSE |
3840 MR_LP_ADV_ASYM_PAUSE |
3841 MR_LP_ADV_REMOTE_FAULT1 |
3842 MR_LP_ADV_REMOTE_FAULT2 |
3843 MR_LP_ADV_NEXT_PAGE |
3844 MR_TOGGLE_RX |
3845 MR_NP_RX);
3846 if (ap->rxconfig & ANEG_CFG_FD)
3847 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3848 if (ap->rxconfig & ANEG_CFG_HD)
3849 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3850 if (ap->rxconfig & ANEG_CFG_PS1)
3851 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3852 if (ap->rxconfig & ANEG_CFG_PS2)
3853 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3854 if (ap->rxconfig & ANEG_CFG_RF1)
3855 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3856 if (ap->rxconfig & ANEG_CFG_RF2)
3857 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3858 if (ap->rxconfig & ANEG_CFG_NP)
3859 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3860
3861 ap->link_time = ap->cur_time;
3862
3863 ap->flags ^= (MR_TOGGLE_TX);
3864 if (ap->rxconfig & 0x0008)
3865 ap->flags |= MR_TOGGLE_RX;
3866 if (ap->rxconfig & ANEG_CFG_NP)
3867 ap->flags |= MR_NP_RX;
3868 ap->flags |= MR_PAGE_RX;
3869
3870 ap->state = ANEG_STATE_COMPLETE_ACK;
3871 ret = ANEG_TIMER_ENAB;
3872 break;
3873
3874 case ANEG_STATE_COMPLETE_ACK:
3875 if (ap->ability_match != 0 &&
3876 ap->rxconfig == 0) {
3877 ap->state = ANEG_STATE_AN_ENABLE;
3878 break;
3879 }
3880 delta = ap->cur_time - ap->link_time;
3881 if (delta > ANEG_STATE_SETTLE_TIME) {
3882 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3883 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3884 } else {
3885 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3886 !(ap->flags & MR_NP_RX)) {
3887 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3888 } else {
3889 ret = ANEG_FAILED;
3890 }
3891 }
3892 }
3893 break;
3894
3895 case ANEG_STATE_IDLE_DETECT_INIT:
3896 ap->link_time = ap->cur_time;
3897 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3898 tw32_f(MAC_MODE, tp->mac_mode);
3899 udelay(40);
3900
3901 ap->state = ANEG_STATE_IDLE_DETECT;
3902 ret = ANEG_TIMER_ENAB;
3903 break;
3904
3905 case ANEG_STATE_IDLE_DETECT:
3906 if (ap->ability_match != 0 &&
3907 ap->rxconfig == 0) {
3908 ap->state = ANEG_STATE_AN_ENABLE;
3909 break;
3910 }
3911 delta = ap->cur_time - ap->link_time;
3912 if (delta > ANEG_STATE_SETTLE_TIME) {
3913 /* XXX another gem from the Broadcom driver :( */
3914 ap->state = ANEG_STATE_LINK_OK;
3915 }
3916 break;
3917
3918 case ANEG_STATE_LINK_OK:
3919 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3920 ret = ANEG_DONE;
3921 break;
3922
3923 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3924 /* ??? unimplemented */
3925 break;
3926
3927 case ANEG_STATE_NEXT_PAGE_WAIT:
3928 /* ??? unimplemented */
3929 break;
3930
3931 default:
3932 ret = ANEG_FAILED;
3933 break;
855e1111 3934 }
1da177e4
LT
3935
3936 return ret;
3937}
3938
5be73b47 3939static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
3940{
3941 int res = 0;
3942 struct tg3_fiber_aneginfo aninfo;
3943 int status = ANEG_FAILED;
3944 unsigned int tick;
3945 u32 tmp;
3946
3947 tw32_f(MAC_TX_AUTO_NEG, 0);
3948
3949 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3950 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3951 udelay(40);
3952
3953 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3954 udelay(40);
3955
3956 memset(&aninfo, 0, sizeof(aninfo));
3957 aninfo.flags |= MR_AN_ENABLE;
3958 aninfo.state = ANEG_STATE_UNKNOWN;
3959 aninfo.cur_time = 0;
3960 tick = 0;
3961 while (++tick < 195000) {
3962 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3963 if (status == ANEG_DONE || status == ANEG_FAILED)
3964 break;
3965
3966 udelay(1);
3967 }
3968
3969 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3970 tw32_f(MAC_MODE, tp->mac_mode);
3971 udelay(40);
3972
5be73b47
MC
3973 *txflags = aninfo.txconfig;
3974 *rxflags = aninfo.flags;
1da177e4
LT
3975
3976 if (status == ANEG_DONE &&
3977 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3978 MR_LP_ADV_FULL_DUPLEX)))
3979 res = 1;
3980
3981 return res;
3982}
3983
3984static void tg3_init_bcm8002(struct tg3 *tp)
3985{
3986 u32 mac_status = tr32(MAC_STATUS);
3987 int i;
3988
3989 /* Reset when initting first time or we have a link. */
63c3a66f 3990 if (tg3_flag(tp, INIT_COMPLETE) &&
1da177e4
LT
3991 !(mac_status & MAC_STATUS_PCS_SYNCED))
3992 return;
3993
3994 /* Set PLL lock range. */
3995 tg3_writephy(tp, 0x16, 0x8007);
3996
3997 /* SW reset */
3998 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3999
4000 /* Wait for reset to complete. */
4001 /* XXX schedule_timeout() ... */
4002 for (i = 0; i < 500; i++)
4003 udelay(10);
4004
4005 /* Config mode; select PMA/Ch 1 regs. */
4006 tg3_writephy(tp, 0x10, 0x8411);
4007
4008 /* Enable auto-lock and comdet, select txclk for tx. */
4009 tg3_writephy(tp, 0x11, 0x0a10);
4010
4011 tg3_writephy(tp, 0x18, 0x00a0);
4012 tg3_writephy(tp, 0x16, 0x41ff);
4013
4014 /* Assert and deassert POR. */
4015 tg3_writephy(tp, 0x13, 0x0400);
4016 udelay(40);
4017 tg3_writephy(tp, 0x13, 0x0000);
4018
4019 tg3_writephy(tp, 0x11, 0x0a50);
4020 udelay(40);
4021 tg3_writephy(tp, 0x11, 0x0a10);
4022
4023 /* Wait for signal to stabilize */
4024 /* XXX schedule_timeout() ... */
4025 for (i = 0; i < 15000; i++)
4026 udelay(10);
4027
4028 /* Deselect the channel register so we can read the PHYID
4029 * later.
4030 */
4031 tg3_writephy(tp, 0x10, 0x8011);
4032}
4033
4034static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4035{
82cd3d11 4036 u16 flowctrl;
1da177e4
LT
4037 u32 sg_dig_ctrl, sg_dig_status;
4038 u32 serdes_cfg, expected_sg_dig_ctrl;
4039 int workaround, port_a;
4040 int current_link_up;
4041
4042 serdes_cfg = 0;
4043 expected_sg_dig_ctrl = 0;
4044 workaround = 0;
4045 port_a = 1;
4046 current_link_up = 0;
4047
4048 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4049 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4050 workaround = 1;
4051 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4052 port_a = 0;
4053
4054 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4055 /* preserve bits 20-23 for voltage regulator */
4056 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4057 }
4058
4059 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4060
4061 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 4062 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
4063 if (workaround) {
4064 u32 val = serdes_cfg;
4065
4066 if (port_a)
4067 val |= 0xc010000;
4068 else
4069 val |= 0x4010000;
4070 tw32_f(MAC_SERDES_CFG, val);
4071 }
c98f6e3b
MC
4072
4073 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4074 }
4075 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4076 tg3_setup_flow_control(tp, 0, 0);
4077 current_link_up = 1;
4078 }
4079 goto out;
4080 }
4081
4082 /* Want auto-negotiation. */
c98f6e3b 4083 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 4084
82cd3d11
MC
4085 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4086 if (flowctrl & ADVERTISE_1000XPAUSE)
4087 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4088 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4089 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
4090
4091 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
f07e9af3 4092 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3d3ebe74
MC
4093 tp->serdes_counter &&
4094 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4095 MAC_STATUS_RCVD_CFG)) ==
4096 MAC_STATUS_PCS_SYNCED)) {
4097 tp->serdes_counter--;
4098 current_link_up = 1;
4099 goto out;
4100 }
4101restart_autoneg:
1da177e4
LT
4102 if (workaround)
4103 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 4104 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
4105 udelay(5);
4106 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4107
3d3ebe74 4108 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4109 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4110 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4111 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 4112 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
4113 mac_status = tr32(MAC_STATUS);
4114
c98f6e3b 4115 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 4116 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
4117 u32 local_adv = 0, remote_adv = 0;
4118
4119 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4120 local_adv |= ADVERTISE_1000XPAUSE;
4121 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4122 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 4123
c98f6e3b 4124 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 4125 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 4126 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 4127 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
4128
4129 tg3_setup_flow_control(tp, local_adv, remote_adv);
4130 current_link_up = 1;
3d3ebe74 4131 tp->serdes_counter = 0;
f07e9af3 4132 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c98f6e3b 4133 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
4134 if (tp->serdes_counter)
4135 tp->serdes_counter--;
1da177e4
LT
4136 else {
4137 if (workaround) {
4138 u32 val = serdes_cfg;
4139
4140 if (port_a)
4141 val |= 0xc010000;
4142 else
4143 val |= 0x4010000;
4144
4145 tw32_f(MAC_SERDES_CFG, val);
4146 }
4147
c98f6e3b 4148 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4149 udelay(40);
4150
4151 /* Link parallel detection - link is up */
4152 /* only if we have PCS_SYNC and not */
4153 /* receiving config code words */
4154 mac_status = tr32(MAC_STATUS);
4155 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4156 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4157 tg3_setup_flow_control(tp, 0, 0);
4158 current_link_up = 1;
f07e9af3
MC
4159 tp->phy_flags |=
4160 TG3_PHYFLG_PARALLEL_DETECT;
3d3ebe74
MC
4161 tp->serdes_counter =
4162 SERDES_PARALLEL_DET_TIMEOUT;
4163 } else
4164 goto restart_autoneg;
1da177e4
LT
4165 }
4166 }
3d3ebe74
MC
4167 } else {
4168 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4169 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4170 }
4171
4172out:
4173 return current_link_up;
4174}
4175
4176static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4177{
4178 int current_link_up = 0;
4179
5cf64b8a 4180 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 4181 goto out;
1da177e4
LT
4182
4183 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 4184 u32 txflags, rxflags;
1da177e4 4185 int i;
6aa20a22 4186
5be73b47
MC
4187 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4188 u32 local_adv = 0, remote_adv = 0;
1da177e4 4189
5be73b47
MC
4190 if (txflags & ANEG_CFG_PS1)
4191 local_adv |= ADVERTISE_1000XPAUSE;
4192 if (txflags & ANEG_CFG_PS2)
4193 local_adv |= ADVERTISE_1000XPSE_ASYM;
4194
4195 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4196 remote_adv |= LPA_1000XPAUSE;
4197 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4198 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
4199
4200 tg3_setup_flow_control(tp, local_adv, remote_adv);
4201
1da177e4
LT
4202 current_link_up = 1;
4203 }
4204 for (i = 0; i < 30; i++) {
4205 udelay(20);
4206 tw32_f(MAC_STATUS,
4207 (MAC_STATUS_SYNC_CHANGED |
4208 MAC_STATUS_CFG_CHANGED));
4209 udelay(40);
4210 if ((tr32(MAC_STATUS) &
4211 (MAC_STATUS_SYNC_CHANGED |
4212 MAC_STATUS_CFG_CHANGED)) == 0)
4213 break;
4214 }
4215
4216 mac_status = tr32(MAC_STATUS);
4217 if (current_link_up == 0 &&
4218 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4219 !(mac_status & MAC_STATUS_RCVD_CFG))
4220 current_link_up = 1;
4221 } else {
5be73b47
MC
4222 tg3_setup_flow_control(tp, 0, 0);
4223
1da177e4
LT
4224 /* Forcing 1000FD link up. */
4225 current_link_up = 1;
1da177e4
LT
4226
4227 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4228 udelay(40);
e8f3f6ca
MC
4229
4230 tw32_f(MAC_MODE, tp->mac_mode);
4231 udelay(40);
1da177e4
LT
4232 }
4233
4234out:
4235 return current_link_up;
4236}
4237
4238static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4239{
4240 u32 orig_pause_cfg;
4241 u16 orig_active_speed;
4242 u8 orig_active_duplex;
4243 u32 mac_status;
4244 int current_link_up;
4245 int i;
4246
8d018621 4247 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
4248 orig_active_speed = tp->link_config.active_speed;
4249 orig_active_duplex = tp->link_config.active_duplex;
4250
63c3a66f 4251 if (!tg3_flag(tp, HW_AUTONEG) &&
1da177e4 4252 netif_carrier_ok(tp->dev) &&
63c3a66f 4253 tg3_flag(tp, INIT_COMPLETE)) {
1da177e4
LT
4254 mac_status = tr32(MAC_STATUS);
4255 mac_status &= (MAC_STATUS_PCS_SYNCED |
4256 MAC_STATUS_SIGNAL_DET |
4257 MAC_STATUS_CFG_CHANGED |
4258 MAC_STATUS_RCVD_CFG);
4259 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4260 MAC_STATUS_SIGNAL_DET)) {
4261 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4262 MAC_STATUS_CFG_CHANGED));
4263 return 0;
4264 }
4265 }
4266
4267 tw32_f(MAC_TX_AUTO_NEG, 0);
4268
4269 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4270 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4271 tw32_f(MAC_MODE, tp->mac_mode);
4272 udelay(40);
4273
79eb6904 4274 if (tp->phy_id == TG3_PHY_ID_BCM8002)
1da177e4
LT
4275 tg3_init_bcm8002(tp);
4276
4277 /* Enable link change event even when serdes polling. */
4278 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4279 udelay(40);
4280
4281 current_link_up = 0;
4282 mac_status = tr32(MAC_STATUS);
4283
63c3a66f 4284 if (tg3_flag(tp, HW_AUTONEG))
1da177e4
LT
4285 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4286 else
4287 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4288
898a56f8 4289 tp->napi[0].hw_status->status =
1da177e4 4290 (SD_STATUS_UPDATED |
898a56f8 4291 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
1da177e4
LT
4292
4293 for (i = 0; i < 100; i++) {
4294 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4295 MAC_STATUS_CFG_CHANGED));
4296 udelay(5);
4297 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
4298 MAC_STATUS_CFG_CHANGED |
4299 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
4300 break;
4301 }
4302
4303 mac_status = tr32(MAC_STATUS);
4304 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4305 current_link_up = 0;
3d3ebe74
MC
4306 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4307 tp->serdes_counter == 0) {
1da177e4
LT
4308 tw32_f(MAC_MODE, (tp->mac_mode |
4309 MAC_MODE_SEND_CONFIGS));
4310 udelay(1);
4311 tw32_f(MAC_MODE, tp->mac_mode);
4312 }
4313 }
4314
4315 if (current_link_up == 1) {
4316 tp->link_config.active_speed = SPEED_1000;
4317 tp->link_config.active_duplex = DUPLEX_FULL;
4318 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4319 LED_CTRL_LNKLED_OVERRIDE |
4320 LED_CTRL_1000MBPS_ON));
4321 } else {
4322 tp->link_config.active_speed = SPEED_INVALID;
4323 tp->link_config.active_duplex = DUPLEX_INVALID;
4324 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4325 LED_CTRL_LNKLED_OVERRIDE |
4326 LED_CTRL_TRAFFIC_OVERRIDE));
4327 }
4328
4329 if (current_link_up != netif_carrier_ok(tp->dev)) {
4330 if (current_link_up)
4331 netif_carrier_on(tp->dev);
4332 else
4333 netif_carrier_off(tp->dev);
4334 tg3_link_report(tp);
4335 } else {
8d018621 4336 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
4337 if (orig_pause_cfg != now_pause_cfg ||
4338 orig_active_speed != tp->link_config.active_speed ||
4339 orig_active_duplex != tp->link_config.active_duplex)
4340 tg3_link_report(tp);
4341 }
4342
4343 return 0;
4344}
4345
747e8f8b
MC
4346static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4347{
4348 int current_link_up, err = 0;
4349 u32 bmsr, bmcr;
4350 u16 current_speed;
4351 u8 current_duplex;
ef167e27 4352 u32 local_adv, remote_adv;
747e8f8b
MC
4353
4354 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4355 tw32_f(MAC_MODE, tp->mac_mode);
4356 udelay(40);
4357
4358 tw32(MAC_EVENT, 0);
4359
4360 tw32_f(MAC_STATUS,
4361 (MAC_STATUS_SYNC_CHANGED |
4362 MAC_STATUS_CFG_CHANGED |
4363 MAC_STATUS_MI_COMPLETION |
4364 MAC_STATUS_LNKSTATE_CHANGED));
4365 udelay(40);
4366
4367 if (force_reset)
4368 tg3_phy_reset(tp);
4369
4370 current_link_up = 0;
4371 current_speed = SPEED_INVALID;
4372 current_duplex = DUPLEX_INVALID;
4373
4374 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4375 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
4376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4377 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4378 bmsr |= BMSR_LSTATUS;
4379 else
4380 bmsr &= ~BMSR_LSTATUS;
4381 }
747e8f8b
MC
4382
4383 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4384
4385 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
f07e9af3 4386 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
4387 /* do nothing, just check for link up at the end */
4388 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4389 u32 adv, new_adv;
4390
4391 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4392 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4393 ADVERTISE_1000XPAUSE |
4394 ADVERTISE_1000XPSE_ASYM |
4395 ADVERTISE_SLCT);
4396
ba4d07a8 4397 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
747e8f8b
MC
4398
4399 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4400 new_adv |= ADVERTISE_1000XHALF;
4401 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4402 new_adv |= ADVERTISE_1000XFULL;
4403
4404 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4405 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4406 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4407 tg3_writephy(tp, MII_BMCR, bmcr);
4408
4409 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 4410 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
f07e9af3 4411 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4412
4413 return err;
4414 }
4415 } else {
4416 u32 new_bmcr;
4417
4418 bmcr &= ~BMCR_SPEED1000;
4419 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4420
4421 if (tp->link_config.duplex == DUPLEX_FULL)
4422 new_bmcr |= BMCR_FULLDPLX;
4423
4424 if (new_bmcr != bmcr) {
4425 /* BMCR_SPEED1000 is a reserved bit that needs
4426 * to be set on write.
4427 */
4428 new_bmcr |= BMCR_SPEED1000;
4429
4430 /* Force a linkdown */
4431 if (netif_carrier_ok(tp->dev)) {
4432 u32 adv;
4433
4434 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4435 adv &= ~(ADVERTISE_1000XFULL |
4436 ADVERTISE_1000XHALF |
4437 ADVERTISE_SLCT);
4438 tg3_writephy(tp, MII_ADVERTISE, adv);
4439 tg3_writephy(tp, MII_BMCR, bmcr |
4440 BMCR_ANRESTART |
4441 BMCR_ANENABLE);
4442 udelay(10);
4443 netif_carrier_off(tp->dev);
4444 }
4445 tg3_writephy(tp, MII_BMCR, new_bmcr);
4446 bmcr = new_bmcr;
4447 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4448 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
4449 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4450 ASIC_REV_5714) {
4451 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4452 bmsr |= BMSR_LSTATUS;
4453 else
4454 bmsr &= ~BMSR_LSTATUS;
4455 }
f07e9af3 4456 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4457 }
4458 }
4459
4460 if (bmsr & BMSR_LSTATUS) {
4461 current_speed = SPEED_1000;
4462 current_link_up = 1;
4463 if (bmcr & BMCR_FULLDPLX)
4464 current_duplex = DUPLEX_FULL;
4465 else
4466 current_duplex = DUPLEX_HALF;
4467
ef167e27
MC
4468 local_adv = 0;
4469 remote_adv = 0;
4470
747e8f8b 4471 if (bmcr & BMCR_ANENABLE) {
ef167e27 4472 u32 common;
747e8f8b
MC
4473
4474 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4475 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4476 common = local_adv & remote_adv;
4477 if (common & (ADVERTISE_1000XHALF |
4478 ADVERTISE_1000XFULL)) {
4479 if (common & ADVERTISE_1000XFULL)
4480 current_duplex = DUPLEX_FULL;
4481 else
4482 current_duplex = DUPLEX_HALF;
63c3a66f 4483 } else if (!tg3_flag(tp, 5780_CLASS)) {
57d8b880 4484 /* Link is up via parallel detect */
859a5887 4485 } else {
747e8f8b 4486 current_link_up = 0;
859a5887 4487 }
747e8f8b
MC
4488 }
4489 }
4490
ef167e27
MC
4491 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4492 tg3_setup_flow_control(tp, local_adv, remote_adv);
4493
747e8f8b
MC
4494 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4495 if (tp->link_config.active_duplex == DUPLEX_HALF)
4496 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4497
4498 tw32_f(MAC_MODE, tp->mac_mode);
4499 udelay(40);
4500
4501 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4502
4503 tp->link_config.active_speed = current_speed;
4504 tp->link_config.active_duplex = current_duplex;
4505
4506 if (current_link_up != netif_carrier_ok(tp->dev)) {
4507 if (current_link_up)
4508 netif_carrier_on(tp->dev);
4509 else {
4510 netif_carrier_off(tp->dev);
f07e9af3 4511 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4512 }
4513 tg3_link_report(tp);
4514 }
4515 return err;
4516}
4517
4518static void tg3_serdes_parallel_detect(struct tg3 *tp)
4519{
3d3ebe74 4520 if (tp->serdes_counter) {
747e8f8b 4521 /* Give autoneg time to complete. */
3d3ebe74 4522 tp->serdes_counter--;
747e8f8b
MC
4523 return;
4524 }
c6cdf436 4525
747e8f8b
MC
4526 if (!netif_carrier_ok(tp->dev) &&
4527 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4528 u32 bmcr;
4529
4530 tg3_readphy(tp, MII_BMCR, &bmcr);
4531 if (bmcr & BMCR_ANENABLE) {
4532 u32 phy1, phy2;
4533
4534 /* Select shadow register 0x1f */
f08aa1a8
MC
4535 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4536 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
747e8f8b
MC
4537
4538 /* Select expansion interrupt status register */
f08aa1a8
MC
4539 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4540 MII_TG3_DSP_EXP1_INT_STAT);
4541 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4542 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
4543
4544 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4545 /* We have signal detect and not receiving
4546 * config code words, link is up by parallel
4547 * detection.
4548 */
4549
4550 bmcr &= ~BMCR_ANENABLE;
4551 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4552 tg3_writephy(tp, MII_BMCR, bmcr);
f07e9af3 4553 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4554 }
4555 }
859a5887
MC
4556 } else if (netif_carrier_ok(tp->dev) &&
4557 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
f07e9af3 4558 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
4559 u32 phy2;
4560
4561 /* Select expansion interrupt status register */
f08aa1a8
MC
4562 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4563 MII_TG3_DSP_EXP1_INT_STAT);
4564 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
4565 if (phy2 & 0x20) {
4566 u32 bmcr;
4567
4568 /* Config code words received, turn on autoneg. */
4569 tg3_readphy(tp, MII_BMCR, &bmcr);
4570 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4571
f07e9af3 4572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4573
4574 }
4575 }
4576}
4577
1da177e4
LT
4578static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4579{
f2096f94 4580 u32 val;
1da177e4
LT
4581 int err;
4582
f07e9af3 4583 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4 4584 err = tg3_setup_fiber_phy(tp, force_reset);
f07e9af3 4585 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
747e8f8b 4586 err = tg3_setup_fiber_mii_phy(tp, force_reset);
859a5887 4587 else
1da177e4 4588 err = tg3_setup_copper_phy(tp, force_reset);
1da177e4 4589
bcb37f6c 4590 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
f2096f94 4591 u32 scale;
aa6c91fe
MC
4592
4593 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4594 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4595 scale = 65;
4596 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4597 scale = 6;
4598 else
4599 scale = 12;
4600
4601 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4602 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4603 tw32(GRC_MISC_CFG, val);
4604 }
4605
f2096f94
MC
4606 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4607 (6 << TX_LENGTHS_IPG_SHIFT);
4608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4609 val |= tr32(MAC_TX_LENGTHS) &
4610 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4611 TX_LENGTHS_CNT_DWN_VAL_MSK);
4612
1da177e4
LT
4613 if (tp->link_config.active_speed == SPEED_1000 &&
4614 tp->link_config.active_duplex == DUPLEX_HALF)
f2096f94
MC
4615 tw32(MAC_TX_LENGTHS, val |
4616 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 4617 else
f2096f94
MC
4618 tw32(MAC_TX_LENGTHS, val |
4619 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 4620
63c3a66f 4621 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
4622 if (netif_carrier_ok(tp->dev)) {
4623 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 4624 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
4625 } else {
4626 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4627 }
4628 }
4629
63c3a66f 4630 if (tg3_flag(tp, ASPM_WORKAROUND)) {
f2096f94 4631 val = tr32(PCIE_PWR_MGMT_THRESH);
8ed5d97e
MC
4632 if (!netif_carrier_ok(tp->dev))
4633 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4634 tp->pwrmgmt_thresh;
4635 else
4636 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4637 tw32(PCIE_PWR_MGMT_THRESH, val);
4638 }
4639
1da177e4
LT
4640 return err;
4641}
4642
66cfd1bd
MC
4643static inline int tg3_irq_sync(struct tg3 *tp)
4644{
4645 return tp->irq_sync;
4646}
4647
97bd8e49
MC
4648static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4649{
4650 int i;
4651
4652 dst = (u32 *)((u8 *)dst + off);
4653 for (i = 0; i < len; i += sizeof(u32))
4654 *dst++ = tr32(off + i);
4655}
4656
4657static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4658{
4659 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4660 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4661 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4662 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4663 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4664 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4665 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4666 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4667 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4668 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4669 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4670 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4671 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4672 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4673 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4674 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4675 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4676 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4677 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4678
63c3a66f 4679 if (tg3_flag(tp, SUPPORT_MSIX))
97bd8e49
MC
4680 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4681
4682 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4683 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4684 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4685 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4686 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4687 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4688 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4689 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4690
63c3a66f 4691 if (!tg3_flag(tp, 5705_PLUS)) {
97bd8e49
MC
4692 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4693 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4694 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4695 }
4696
4697 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4698 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4699 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4700 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4701 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4702
63c3a66f 4703 if (tg3_flag(tp, NVRAM))
97bd8e49
MC
4704 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4705}
4706
4707static void tg3_dump_state(struct tg3 *tp)
4708{
4709 int i;
4710 u32 *regs;
4711
4712 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4713 if (!regs) {
4714 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4715 return;
4716 }
4717
63c3a66f 4718 if (tg3_flag(tp, PCI_EXPRESS)) {
97bd8e49
MC
4719 /* Read up to but not including private PCI registers */
4720 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4721 regs[i / sizeof(u32)] = tr32(i);
4722 } else
4723 tg3_dump_legacy_regs(tp, regs);
4724
4725 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4726 if (!regs[i + 0] && !regs[i + 1] &&
4727 !regs[i + 2] && !regs[i + 3])
4728 continue;
4729
4730 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4731 i * 4,
4732 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4733 }
4734
4735 kfree(regs);
4736
4737 for (i = 0; i < tp->irq_cnt; i++) {
4738 struct tg3_napi *tnapi = &tp->napi[i];
4739
4740 /* SW status block */
4741 netdev_err(tp->dev,
4742 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4743 i,
4744 tnapi->hw_status->status,
4745 tnapi->hw_status->status_tag,
4746 tnapi->hw_status->rx_jumbo_consumer,
4747 tnapi->hw_status->rx_consumer,
4748 tnapi->hw_status->rx_mini_consumer,
4749 tnapi->hw_status->idx[0].rx_producer,
4750 tnapi->hw_status->idx[0].tx_consumer);
4751
4752 netdev_err(tp->dev,
4753 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4754 i,
4755 tnapi->last_tag, tnapi->last_irq_tag,
4756 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4757 tnapi->rx_rcb_ptr,
4758 tnapi->prodring.rx_std_prod_idx,
4759 tnapi->prodring.rx_std_cons_idx,
4760 tnapi->prodring.rx_jmb_prod_idx,
4761 tnapi->prodring.rx_jmb_cons_idx);
4762 }
4763}
4764
df3e6548
MC
4765/* This is called whenever we suspect that the system chipset is re-
4766 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4767 * is bogus tx completions. We try to recover by setting the
4768 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4769 * in the workqueue.
4770 */
4771static void tg3_tx_recover(struct tg3 *tp)
4772{
63c3a66f 4773 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
df3e6548
MC
4774 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4775
5129c3a3
MC
4776 netdev_warn(tp->dev,
4777 "The system may be re-ordering memory-mapped I/O "
4778 "cycles to the network device, attempting to recover. "
4779 "Please report the problem to the driver maintainer "
4780 "and include system chipset information.\n");
df3e6548
MC
4781
4782 spin_lock(&tp->lock);
63c3a66f 4783 tg3_flag_set(tp, TX_RECOVERY_PENDING);
df3e6548
MC
4784 spin_unlock(&tp->lock);
4785}
4786
f3f3f27e 4787static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
1b2a7205 4788{
f65aac16
MC
4789 /* Tell compiler to fetch tx indices from memory. */
4790 barrier();
f3f3f27e
MC
4791 return tnapi->tx_pending -
4792 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
1b2a7205
MC
4793}
4794
1da177e4
LT
4795/* Tigon3 never reports partial packet sends. So we do not
4796 * need special logic to handle SKBs that have not had all
4797 * of their frags sent yet, like SunGEM does.
4798 */
17375d25 4799static void tg3_tx(struct tg3_napi *tnapi)
1da177e4 4800{
17375d25 4801 struct tg3 *tp = tnapi->tp;
898a56f8 4802 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
f3f3f27e 4803 u32 sw_idx = tnapi->tx_cons;
fe5f5787
MC
4804 struct netdev_queue *txq;
4805 int index = tnapi - tp->napi;
4806
63c3a66f 4807 if (tg3_flag(tp, ENABLE_TSS))
fe5f5787
MC
4808 index--;
4809
4810 txq = netdev_get_tx_queue(tp->dev, index);
1da177e4
LT
4811
4812 while (sw_idx != hw_idx) {
f4188d8a 4813 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
1da177e4 4814 struct sk_buff *skb = ri->skb;
df3e6548
MC
4815 int i, tx_bug = 0;
4816
4817 if (unlikely(skb == NULL)) {
4818 tg3_tx_recover(tp);
4819 return;
4820 }
1da177e4 4821
f4188d8a 4822 pci_unmap_single(tp->pdev,
4e5e4f0d 4823 dma_unmap_addr(ri, mapping),
f4188d8a
AD
4824 skb_headlen(skb),
4825 PCI_DMA_TODEVICE);
1da177e4
LT
4826
4827 ri->skb = NULL;
4828
4829 sw_idx = NEXT_TX(sw_idx);
4830
4831 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
f3f3f27e 4832 ri = &tnapi->tx_buffers[sw_idx];
df3e6548
MC
4833 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4834 tx_bug = 1;
f4188d8a
AD
4835
4836 pci_unmap_page(tp->pdev,
4e5e4f0d 4837 dma_unmap_addr(ri, mapping),
f4188d8a
AD
4838 skb_shinfo(skb)->frags[i].size,
4839 PCI_DMA_TODEVICE);
1da177e4
LT
4840 sw_idx = NEXT_TX(sw_idx);
4841 }
4842
f47c11ee 4843 dev_kfree_skb(skb);
df3e6548
MC
4844
4845 if (unlikely(tx_bug)) {
4846 tg3_tx_recover(tp);
4847 return;
4848 }
1da177e4
LT
4849 }
4850
f3f3f27e 4851 tnapi->tx_cons = sw_idx;
1da177e4 4852
1b2a7205
MC
4853 /* Need to make the tx_cons update visible to tg3_start_xmit()
4854 * before checking for netif_queue_stopped(). Without the
4855 * memory barrier, there is a small possibility that tg3_start_xmit()
4856 * will miss it and cause the queue to be stopped forever.
4857 */
4858 smp_mb();
4859
fe5f5787 4860 if (unlikely(netif_tx_queue_stopped(txq) &&
f3f3f27e 4861 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
fe5f5787
MC
4862 __netif_tx_lock(txq, smp_processor_id());
4863 if (netif_tx_queue_stopped(txq) &&
f3f3f27e 4864 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
fe5f5787
MC
4865 netif_tx_wake_queue(txq);
4866 __netif_tx_unlock(txq);
51b91468 4867 }
1da177e4
LT
4868}
4869
2b2cdb65
MC
4870static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4871{
4872 if (!ri->skb)
4873 return;
4874
4e5e4f0d 4875 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
2b2cdb65
MC
4876 map_sz, PCI_DMA_FROMDEVICE);
4877 dev_kfree_skb_any(ri->skb);
4878 ri->skb = NULL;
4879}
4880
1da177e4
LT
4881/* Returns size of skb allocated or < 0 on error.
4882 *
4883 * We only need to fill in the address because the other members
4884 * of the RX descriptor are invariant, see tg3_init_rings.
4885 *
4886 * Note the purposeful assymetry of cpu vs. chip accesses. For
4887 * posting buffers we only dirty the first cache line of the RX
4888 * descriptor (containing the address). Whereas for the RX status
4889 * buffers the cpu only reads the last cacheline of the RX descriptor
4890 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4891 */
86b21e59 4892static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
a3896167 4893 u32 opaque_key, u32 dest_idx_unmasked)
1da177e4
LT
4894{
4895 struct tg3_rx_buffer_desc *desc;
f94e290e 4896 struct ring_info *map;
1da177e4
LT
4897 struct sk_buff *skb;
4898 dma_addr_t mapping;
4899 int skb_size, dest_idx;
4900
1da177e4
LT
4901 switch (opaque_key) {
4902 case RXD_OPAQUE_RING_STD:
2c49a44d 4903 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
21f581a5
MC
4904 desc = &tpr->rx_std[dest_idx];
4905 map = &tpr->rx_std_buffers[dest_idx];
287be12e 4906 skb_size = tp->rx_pkt_map_sz;
1da177e4
LT
4907 break;
4908
4909 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 4910 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
79ed5ac7 4911 desc = &tpr->rx_jmb[dest_idx].std;
21f581a5 4912 map = &tpr->rx_jmb_buffers[dest_idx];
287be12e 4913 skb_size = TG3_RX_JMB_MAP_SZ;
1da177e4
LT
4914 break;
4915
4916 default:
4917 return -EINVAL;
855e1111 4918 }
1da177e4
LT
4919
4920 /* Do not overwrite any of the map or rp information
4921 * until we are sure we can commit to a new buffer.
4922 *
4923 * Callers depend upon this behavior and assume that
4924 * we leave everything unchanged if we fail.
4925 */
287be12e 4926 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
1da177e4
LT
4927 if (skb == NULL)
4928 return -ENOMEM;
4929
1da177e4
LT
4930 skb_reserve(skb, tp->rx_offset);
4931
287be12e 4932 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
1da177e4 4933 PCI_DMA_FROMDEVICE);
a21771dd
MC
4934 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4935 dev_kfree_skb(skb);
4936 return -EIO;
4937 }
1da177e4
LT
4938
4939 map->skb = skb;
4e5e4f0d 4940 dma_unmap_addr_set(map, mapping, mapping);
1da177e4 4941
1da177e4
LT
4942 desc->addr_hi = ((u64)mapping >> 32);
4943 desc->addr_lo = ((u64)mapping & 0xffffffff);
4944
4945 return skb_size;
4946}
4947
4948/* We only need to move over in the address because the other
4949 * members of the RX descriptor are invariant. See notes above
4950 * tg3_alloc_rx_skb for full details.
4951 */
a3896167
MC
4952static void tg3_recycle_rx(struct tg3_napi *tnapi,
4953 struct tg3_rx_prodring_set *dpr,
4954 u32 opaque_key, int src_idx,
4955 u32 dest_idx_unmasked)
1da177e4 4956{
17375d25 4957 struct tg3 *tp = tnapi->tp;
1da177e4
LT
4958 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4959 struct ring_info *src_map, *dest_map;
8fea32b9 4960 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
c6cdf436 4961 int dest_idx;
1da177e4
LT
4962
4963 switch (opaque_key) {
4964 case RXD_OPAQUE_RING_STD:
2c49a44d 4965 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
a3896167
MC
4966 dest_desc = &dpr->rx_std[dest_idx];
4967 dest_map = &dpr->rx_std_buffers[dest_idx];
4968 src_desc = &spr->rx_std[src_idx];
4969 src_map = &spr->rx_std_buffers[src_idx];
1da177e4
LT
4970 break;
4971
4972 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 4973 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
a3896167
MC
4974 dest_desc = &dpr->rx_jmb[dest_idx].std;
4975 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4976 src_desc = &spr->rx_jmb[src_idx].std;
4977 src_map = &spr->rx_jmb_buffers[src_idx];
1da177e4
LT
4978 break;
4979
4980 default:
4981 return;
855e1111 4982 }
1da177e4
LT
4983
4984 dest_map->skb = src_map->skb;
4e5e4f0d
FT
4985 dma_unmap_addr_set(dest_map, mapping,
4986 dma_unmap_addr(src_map, mapping));
1da177e4
LT
4987 dest_desc->addr_hi = src_desc->addr_hi;
4988 dest_desc->addr_lo = src_desc->addr_lo;
e92967bf
MC
4989
4990 /* Ensure that the update to the skb happens after the physical
4991 * addresses have been transferred to the new BD location.
4992 */
4993 smp_wmb();
4994
1da177e4
LT
4995 src_map->skb = NULL;
4996}
4997
1da177e4
LT
4998/* The RX ring scheme is composed of multiple rings which post fresh
4999 * buffers to the chip, and one special ring the chip uses to report
5000 * status back to the host.
5001 *
5002 * The special ring reports the status of received packets to the
5003 * host. The chip does not write into the original descriptor the
5004 * RX buffer was obtained from. The chip simply takes the original
5005 * descriptor as provided by the host, updates the status and length
5006 * field, then writes this into the next status ring entry.
5007 *
5008 * Each ring the host uses to post buffers to the chip is described
5009 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5010 * it is first placed into the on-chip ram. When the packet's length
5011 * is known, it walks down the TG3_BDINFO entries to select the ring.
5012 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5013 * which is within the range of the new packet's length is chosen.
5014 *
5015 * The "separate ring for rx status" scheme may sound queer, but it makes
5016 * sense from a cache coherency perspective. If only the host writes
5017 * to the buffer post rings, and only the chip writes to the rx status
5018 * rings, then cache lines never move beyond shared-modified state.
5019 * If both the host and chip were to write into the same ring, cache line
5020 * eviction could occur since both entities want it in an exclusive state.
5021 */
17375d25 5022static int tg3_rx(struct tg3_napi *tnapi, int budget)
1da177e4 5023{
17375d25 5024 struct tg3 *tp = tnapi->tp;
f92905de 5025 u32 work_mask, rx_std_posted = 0;
4361935a 5026 u32 std_prod_idx, jmb_prod_idx;
72334482 5027 u32 sw_idx = tnapi->rx_rcb_ptr;
483ba50b 5028 u16 hw_idx;
1da177e4 5029 int received;
8fea32b9 5030 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
1da177e4 5031
8d9d7cfc 5032 hw_idx = *(tnapi->rx_rcb_prod_idx);
1da177e4
LT
5033 /*
5034 * We need to order the read of hw_idx and the read of
5035 * the opaque cookie.
5036 */
5037 rmb();
1da177e4
LT
5038 work_mask = 0;
5039 received = 0;
4361935a
MC
5040 std_prod_idx = tpr->rx_std_prod_idx;
5041 jmb_prod_idx = tpr->rx_jmb_prod_idx;
1da177e4 5042 while (sw_idx != hw_idx && budget > 0) {
afc081f8 5043 struct ring_info *ri;
72334482 5044 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
1da177e4
LT
5045 unsigned int len;
5046 struct sk_buff *skb;
5047 dma_addr_t dma_addr;
5048 u32 opaque_key, desc_idx, *post_ptr;
5049
5050 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5051 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5052 if (opaque_key == RXD_OPAQUE_RING_STD) {
8fea32b9 5053 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4e5e4f0d 5054 dma_addr = dma_unmap_addr(ri, mapping);
21f581a5 5055 skb = ri->skb;
4361935a 5056 post_ptr = &std_prod_idx;
f92905de 5057 rx_std_posted++;
1da177e4 5058 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
8fea32b9 5059 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4e5e4f0d 5060 dma_addr = dma_unmap_addr(ri, mapping);
21f581a5 5061 skb = ri->skb;
4361935a 5062 post_ptr = &jmb_prod_idx;
21f581a5 5063 } else
1da177e4 5064 goto next_pkt_nopost;
1da177e4
LT
5065
5066 work_mask |= opaque_key;
5067
5068 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5069 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5070 drop_it:
a3896167 5071 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5072 desc_idx, *post_ptr);
5073 drop_it_no_recycle:
5074 /* Other statistics kept track of by card. */
b0057c51 5075 tp->rx_dropped++;
1da177e4
LT
5076 goto next_pkt;
5077 }
5078
ad829268
MC
5079 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5080 ETH_FCS_LEN;
1da177e4 5081
d2757fc4 5082 if (len > TG3_RX_COPY_THRESH(tp)) {
1da177e4
LT
5083 int skb_size;
5084
86b21e59 5085 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
afc081f8 5086 *post_ptr);
1da177e4
LT
5087 if (skb_size < 0)
5088 goto drop_it;
5089
287be12e 5090 pci_unmap_single(tp->pdev, dma_addr, skb_size,
1da177e4
LT
5091 PCI_DMA_FROMDEVICE);
5092
61e800cf
MC
5093 /* Ensure that the update to the skb happens
5094 * after the usage of the old DMA mapping.
5095 */
5096 smp_wmb();
5097
5098 ri->skb = NULL;
5099
1da177e4
LT
5100 skb_put(skb, len);
5101 } else {
5102 struct sk_buff *copy_skb;
5103
a3896167 5104 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5105 desc_idx, *post_ptr);
5106
bf933c80 5107 copy_skb = netdev_alloc_skb(tp->dev, len +
9dc7a113 5108 TG3_RAW_IP_ALIGN);
1da177e4
LT
5109 if (copy_skb == NULL)
5110 goto drop_it_no_recycle;
5111
bf933c80 5112 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
1da177e4
LT
5113 skb_put(copy_skb, len);
5114 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 5115 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
5116 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5117
5118 /* We'll reuse the original ring buffer. */
5119 skb = copy_skb;
5120 }
5121
dc668910 5122 if ((tp->dev->features & NETIF_F_RXCSUM) &&
1da177e4
LT
5123 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5124 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5125 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5126 skb->ip_summed = CHECKSUM_UNNECESSARY;
5127 else
bc8acf2c 5128 skb_checksum_none_assert(skb);
1da177e4
LT
5129
5130 skb->protocol = eth_type_trans(skb, tp->dev);
f7b493e0
MC
5131
5132 if (len > (tp->dev->mtu + ETH_HLEN) &&
5133 skb->protocol != htons(ETH_P_8021Q)) {
5134 dev_kfree_skb(skb);
b0057c51 5135 goto drop_it_no_recycle;
f7b493e0
MC
5136 }
5137
9dc7a113 5138 if (desc->type_flags & RXD_FLAG_VLAN &&
bf933c80
MC
5139 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5140 __vlan_hwaccel_put_tag(skb,
5141 desc->err_vlan & RXD_VLAN_MASK);
9dc7a113 5142
bf933c80 5143 napi_gro_receive(&tnapi->napi, skb);
1da177e4 5144
1da177e4
LT
5145 received++;
5146 budget--;
5147
5148next_pkt:
5149 (*post_ptr)++;
f92905de
MC
5150
5151 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
2c49a44d
MC
5152 tpr->rx_std_prod_idx = std_prod_idx &
5153 tp->rx_std_ring_mask;
86cfe4ff
MC
5154 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5155 tpr->rx_std_prod_idx);
f92905de
MC
5156 work_mask &= ~RXD_OPAQUE_RING_STD;
5157 rx_std_posted = 0;
5158 }
1da177e4 5159next_pkt_nopost:
483ba50b 5160 sw_idx++;
7cb32cf2 5161 sw_idx &= tp->rx_ret_ring_mask;
52f6d697
MC
5162
5163 /* Refresh hw_idx to see if there is new work */
5164 if (sw_idx == hw_idx) {
8d9d7cfc 5165 hw_idx = *(tnapi->rx_rcb_prod_idx);
52f6d697
MC
5166 rmb();
5167 }
1da177e4
LT
5168 }
5169
5170 /* ACK the status ring. */
72334482
MC
5171 tnapi->rx_rcb_ptr = sw_idx;
5172 tw32_rx_mbox(tnapi->consmbox, sw_idx);
1da177e4
LT
5173
5174 /* Refill RX ring(s). */
63c3a66f 5175 if (!tg3_flag(tp, ENABLE_RSS)) {
b196c7e4 5176 if (work_mask & RXD_OPAQUE_RING_STD) {
2c49a44d
MC
5177 tpr->rx_std_prod_idx = std_prod_idx &
5178 tp->rx_std_ring_mask;
b196c7e4
MC
5179 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5180 tpr->rx_std_prod_idx);
5181 }
5182 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2c49a44d
MC
5183 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5184 tp->rx_jmb_ring_mask;
b196c7e4
MC
5185 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5186 tpr->rx_jmb_prod_idx);
5187 }
5188 mmiowb();
5189 } else if (work_mask) {
5190 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5191 * updated before the producer indices can be updated.
5192 */
5193 smp_wmb();
5194
2c49a44d
MC
5195 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5196 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
b196c7e4 5197
e4af1af9
MC
5198 if (tnapi != &tp->napi[1])
5199 napi_schedule(&tp->napi[1].napi);
1da177e4 5200 }
1da177e4
LT
5201
5202 return received;
5203}
5204
35f2d7d0 5205static void tg3_poll_link(struct tg3 *tp)
1da177e4 5206{
1da177e4 5207 /* handle link change and other phy events */
63c3a66f 5208 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
35f2d7d0
MC
5209 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5210
1da177e4
LT
5211 if (sblk->status & SD_STATUS_LINK_CHG) {
5212 sblk->status = SD_STATUS_UPDATED |
35f2d7d0 5213 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 5214 spin_lock(&tp->lock);
63c3a66f 5215 if (tg3_flag(tp, USE_PHYLIB)) {
dd477003
MC
5216 tw32_f(MAC_STATUS,
5217 (MAC_STATUS_SYNC_CHANGED |
5218 MAC_STATUS_CFG_CHANGED |
5219 MAC_STATUS_MI_COMPLETION |
5220 MAC_STATUS_LNKSTATE_CHANGED));
5221 udelay(40);
5222 } else
5223 tg3_setup_phy(tp, 0);
f47c11ee 5224 spin_unlock(&tp->lock);
1da177e4
LT
5225 }
5226 }
35f2d7d0
MC
5227}
5228
f89f38b8
MC
5229static int tg3_rx_prodring_xfer(struct tg3 *tp,
5230 struct tg3_rx_prodring_set *dpr,
5231 struct tg3_rx_prodring_set *spr)
b196c7e4
MC
5232{
5233 u32 si, di, cpycnt, src_prod_idx;
f89f38b8 5234 int i, err = 0;
b196c7e4
MC
5235
5236 while (1) {
5237 src_prod_idx = spr->rx_std_prod_idx;
5238
5239 /* Make sure updates to the rx_std_buffers[] entries and the
5240 * standard producer index are seen in the correct order.
5241 */
5242 smp_rmb();
5243
5244 if (spr->rx_std_cons_idx == src_prod_idx)
5245 break;
5246
5247 if (spr->rx_std_cons_idx < src_prod_idx)
5248 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5249 else
2c49a44d
MC
5250 cpycnt = tp->rx_std_ring_mask + 1 -
5251 spr->rx_std_cons_idx;
b196c7e4 5252
2c49a44d
MC
5253 cpycnt = min(cpycnt,
5254 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
b196c7e4
MC
5255
5256 si = spr->rx_std_cons_idx;
5257 di = dpr->rx_std_prod_idx;
5258
e92967bf
MC
5259 for (i = di; i < di + cpycnt; i++) {
5260 if (dpr->rx_std_buffers[i].skb) {
5261 cpycnt = i - di;
f89f38b8 5262 err = -ENOSPC;
e92967bf
MC
5263 break;
5264 }
5265 }
5266
5267 if (!cpycnt)
5268 break;
5269
5270 /* Ensure that updates to the rx_std_buffers ring and the
5271 * shadowed hardware producer ring from tg3_recycle_skb() are
5272 * ordered correctly WRT the skb check above.
5273 */
5274 smp_rmb();
5275
b196c7e4
MC
5276 memcpy(&dpr->rx_std_buffers[di],
5277 &spr->rx_std_buffers[si],
5278 cpycnt * sizeof(struct ring_info));
5279
5280 for (i = 0; i < cpycnt; i++, di++, si++) {
5281 struct tg3_rx_buffer_desc *sbd, *dbd;
5282 sbd = &spr->rx_std[si];
5283 dbd = &dpr->rx_std[di];
5284 dbd->addr_hi = sbd->addr_hi;
5285 dbd->addr_lo = sbd->addr_lo;
5286 }
5287
2c49a44d
MC
5288 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5289 tp->rx_std_ring_mask;
5290 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5291 tp->rx_std_ring_mask;
b196c7e4
MC
5292 }
5293
5294 while (1) {
5295 src_prod_idx = spr->rx_jmb_prod_idx;
5296
5297 /* Make sure updates to the rx_jmb_buffers[] entries and
5298 * the jumbo producer index are seen in the correct order.
5299 */
5300 smp_rmb();
5301
5302 if (spr->rx_jmb_cons_idx == src_prod_idx)
5303 break;
5304
5305 if (spr->rx_jmb_cons_idx < src_prod_idx)
5306 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5307 else
2c49a44d
MC
5308 cpycnt = tp->rx_jmb_ring_mask + 1 -
5309 spr->rx_jmb_cons_idx;
b196c7e4
MC
5310
5311 cpycnt = min(cpycnt,
2c49a44d 5312 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
b196c7e4
MC
5313
5314 si = spr->rx_jmb_cons_idx;
5315 di = dpr->rx_jmb_prod_idx;
5316
e92967bf
MC
5317 for (i = di; i < di + cpycnt; i++) {
5318 if (dpr->rx_jmb_buffers[i].skb) {
5319 cpycnt = i - di;
f89f38b8 5320 err = -ENOSPC;
e92967bf
MC
5321 break;
5322 }
5323 }
5324
5325 if (!cpycnt)
5326 break;
5327
5328 /* Ensure that updates to the rx_jmb_buffers ring and the
5329 * shadowed hardware producer ring from tg3_recycle_skb() are
5330 * ordered correctly WRT the skb check above.
5331 */
5332 smp_rmb();
5333
b196c7e4
MC
5334 memcpy(&dpr->rx_jmb_buffers[di],
5335 &spr->rx_jmb_buffers[si],
5336 cpycnt * sizeof(struct ring_info));
5337
5338 for (i = 0; i < cpycnt; i++, di++, si++) {
5339 struct tg3_rx_buffer_desc *sbd, *dbd;
5340 sbd = &spr->rx_jmb[si].std;
5341 dbd = &dpr->rx_jmb[di].std;
5342 dbd->addr_hi = sbd->addr_hi;
5343 dbd->addr_lo = sbd->addr_lo;
5344 }
5345
2c49a44d
MC
5346 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5347 tp->rx_jmb_ring_mask;
5348 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5349 tp->rx_jmb_ring_mask;
b196c7e4 5350 }
f89f38b8
MC
5351
5352 return err;
b196c7e4
MC
5353}
5354
35f2d7d0
MC
5355static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5356{
5357 struct tg3 *tp = tnapi->tp;
1da177e4
LT
5358
5359 /* run TX completion thread */
f3f3f27e 5360 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
17375d25 5361 tg3_tx(tnapi);
63c3a66f 5362 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
4fd7ab59 5363 return work_done;
1da177e4
LT
5364 }
5365
1da177e4
LT
5366 /* run RX thread, within the bounds set by NAPI.
5367 * All RX "locking" is done by ensuring outside
bea3348e 5368 * code synchronizes with tg3->napi.poll()
1da177e4 5369 */
8d9d7cfc 5370 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
17375d25 5371 work_done += tg3_rx(tnapi, budget - work_done);
1da177e4 5372
63c3a66f 5373 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
8fea32b9 5374 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
f89f38b8 5375 int i, err = 0;
e4af1af9
MC
5376 u32 std_prod_idx = dpr->rx_std_prod_idx;
5377 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
b196c7e4 5378
e4af1af9 5379 for (i = 1; i < tp->irq_cnt; i++)
f89f38b8 5380 err |= tg3_rx_prodring_xfer(tp, dpr,
8fea32b9 5381 &tp->napi[i].prodring);
b196c7e4
MC
5382
5383 wmb();
5384
e4af1af9
MC
5385 if (std_prod_idx != dpr->rx_std_prod_idx)
5386 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5387 dpr->rx_std_prod_idx);
b196c7e4 5388
e4af1af9
MC
5389 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5390 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5391 dpr->rx_jmb_prod_idx);
b196c7e4
MC
5392
5393 mmiowb();
f89f38b8
MC
5394
5395 if (err)
5396 tw32_f(HOSTCC_MODE, tp->coal_now);
b196c7e4
MC
5397 }
5398
6f535763
DM
5399 return work_done;
5400}
5401
35f2d7d0
MC
5402static int tg3_poll_msix(struct napi_struct *napi, int budget)
5403{
5404 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5405 struct tg3 *tp = tnapi->tp;
5406 int work_done = 0;
5407 struct tg3_hw_status *sblk = tnapi->hw_status;
5408
5409 while (1) {
5410 work_done = tg3_poll_work(tnapi, work_done, budget);
5411
63c3a66f 5412 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
35f2d7d0
MC
5413 goto tx_recovery;
5414
5415 if (unlikely(work_done >= budget))
5416 break;
5417
c6cdf436 5418 /* tp->last_tag is used in tg3_int_reenable() below
35f2d7d0
MC
5419 * to tell the hw how much work has been processed,
5420 * so we must read it before checking for more work.
5421 */
5422 tnapi->last_tag = sblk->status_tag;
5423 tnapi->last_irq_tag = tnapi->last_tag;
5424 rmb();
5425
5426 /* check for RX/TX work to do */
6d40db7b
MC
5427 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5428 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
35f2d7d0
MC
5429 napi_complete(napi);
5430 /* Reenable interrupts. */
5431 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5432 mmiowb();
5433 break;
5434 }
5435 }
5436
5437 return work_done;
5438
5439tx_recovery:
5440 /* work_done is guaranteed to be less than budget. */
5441 napi_complete(napi);
5442 schedule_work(&tp->reset_task);
5443 return work_done;
5444}
5445
e64de4e6
MC
5446static void tg3_process_error(struct tg3 *tp)
5447{
5448 u32 val;
5449 bool real_error = false;
5450
63c3a66f 5451 if (tg3_flag(tp, ERROR_PROCESSED))
e64de4e6
MC
5452 return;
5453
5454 /* Check Flow Attention register */
5455 val = tr32(HOSTCC_FLOW_ATTN);
5456 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5457 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5458 real_error = true;
5459 }
5460
5461 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5462 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5463 real_error = true;
5464 }
5465
5466 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5467 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5468 real_error = true;
5469 }
5470
5471 if (!real_error)
5472 return;
5473
5474 tg3_dump_state(tp);
5475
63c3a66f 5476 tg3_flag_set(tp, ERROR_PROCESSED);
e64de4e6
MC
5477 schedule_work(&tp->reset_task);
5478}
5479
6f535763
DM
5480static int tg3_poll(struct napi_struct *napi, int budget)
5481{
8ef0442f
MC
5482 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5483 struct tg3 *tp = tnapi->tp;
6f535763 5484 int work_done = 0;
898a56f8 5485 struct tg3_hw_status *sblk = tnapi->hw_status;
6f535763
DM
5486
5487 while (1) {
e64de4e6
MC
5488 if (sblk->status & SD_STATUS_ERROR)
5489 tg3_process_error(tp);
5490
35f2d7d0
MC
5491 tg3_poll_link(tp);
5492
17375d25 5493 work_done = tg3_poll_work(tnapi, work_done, budget);
6f535763 5494
63c3a66f 5495 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6f535763
DM
5496 goto tx_recovery;
5497
5498 if (unlikely(work_done >= budget))
5499 break;
5500
63c3a66f 5501 if (tg3_flag(tp, TAGGED_STATUS)) {
17375d25 5502 /* tp->last_tag is used in tg3_int_reenable() below
4fd7ab59
MC
5503 * to tell the hw how much work has been processed,
5504 * so we must read it before checking for more work.
5505 */
898a56f8
MC
5506 tnapi->last_tag = sblk->status_tag;
5507 tnapi->last_irq_tag = tnapi->last_tag;
4fd7ab59
MC
5508 rmb();
5509 } else
5510 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 5511
17375d25 5512 if (likely(!tg3_has_work(tnapi))) {
288379f0 5513 napi_complete(napi);
17375d25 5514 tg3_int_reenable(tnapi);
6f535763
DM
5515 break;
5516 }
1da177e4
LT
5517 }
5518
bea3348e 5519 return work_done;
6f535763
DM
5520
5521tx_recovery:
4fd7ab59 5522 /* work_done is guaranteed to be less than budget. */
288379f0 5523 napi_complete(napi);
6f535763 5524 schedule_work(&tp->reset_task);
4fd7ab59 5525 return work_done;
1da177e4
LT
5526}
5527
66cfd1bd
MC
5528static void tg3_napi_disable(struct tg3 *tp)
5529{
5530 int i;
5531
5532 for (i = tp->irq_cnt - 1; i >= 0; i--)
5533 napi_disable(&tp->napi[i].napi);
5534}
5535
5536static void tg3_napi_enable(struct tg3 *tp)
5537{
5538 int i;
5539
5540 for (i = 0; i < tp->irq_cnt; i++)
5541 napi_enable(&tp->napi[i].napi);
5542}
5543
5544static void tg3_napi_init(struct tg3 *tp)
5545{
5546 int i;
5547
5548 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5549 for (i = 1; i < tp->irq_cnt; i++)
5550 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5551}
5552
5553static void tg3_napi_fini(struct tg3 *tp)
5554{
5555 int i;
5556
5557 for (i = 0; i < tp->irq_cnt; i++)
5558 netif_napi_del(&tp->napi[i].napi);
5559}
5560
5561static inline void tg3_netif_stop(struct tg3 *tp)
5562{
5563 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5564 tg3_napi_disable(tp);
5565 netif_tx_disable(tp->dev);
5566}
5567
5568static inline void tg3_netif_start(struct tg3 *tp)
5569{
5570 /* NOTE: unconditional netif_tx_wake_all_queues is only
5571 * appropriate so long as all callers are assured to
5572 * have free tx slots (such as after tg3_init_hw)
5573 */
5574 netif_tx_wake_all_queues(tp->dev);
5575
5576 tg3_napi_enable(tp);
5577 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5578 tg3_enable_ints(tp);
5579}
5580
f47c11ee
DM
5581static void tg3_irq_quiesce(struct tg3 *tp)
5582{
4f125f42
MC
5583 int i;
5584
f47c11ee
DM
5585 BUG_ON(tp->irq_sync);
5586
5587 tp->irq_sync = 1;
5588 smp_mb();
5589
4f125f42
MC
5590 for (i = 0; i < tp->irq_cnt; i++)
5591 synchronize_irq(tp->napi[i].irq_vec);
f47c11ee
DM
5592}
5593
f47c11ee
DM
5594/* Fully shutdown all tg3 driver activity elsewhere in the system.
5595 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5596 * with as well. Most of the time, this is not necessary except when
5597 * shutting down the device.
5598 */
5599static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5600{
46966545 5601 spin_lock_bh(&tp->lock);
f47c11ee
DM
5602 if (irq_sync)
5603 tg3_irq_quiesce(tp);
f47c11ee
DM
5604}
5605
5606static inline void tg3_full_unlock(struct tg3 *tp)
5607{
f47c11ee
DM
5608 spin_unlock_bh(&tp->lock);
5609}
5610
fcfa0a32
MC
5611/* One-shot MSI handler - Chip automatically disables interrupt
5612 * after sending MSI so driver doesn't have to do it.
5613 */
7d12e780 5614static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32 5615{
09943a18
MC
5616 struct tg3_napi *tnapi = dev_id;
5617 struct tg3 *tp = tnapi->tp;
fcfa0a32 5618
898a56f8 5619 prefetch(tnapi->hw_status);
0c1d0e2b
MC
5620 if (tnapi->rx_rcb)
5621 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
fcfa0a32
MC
5622
5623 if (likely(!tg3_irq_sync(tp)))
09943a18 5624 napi_schedule(&tnapi->napi);
fcfa0a32
MC
5625
5626 return IRQ_HANDLED;
5627}
5628
88b06bc2
MC
5629/* MSI ISR - No need to check for interrupt sharing and no need to
5630 * flush status block and interrupt mailbox. PCI ordering rules
5631 * guarantee that MSI will arrive after the status block.
5632 */
7d12e780 5633static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2 5634{
09943a18
MC
5635 struct tg3_napi *tnapi = dev_id;
5636 struct tg3 *tp = tnapi->tp;
88b06bc2 5637
898a56f8 5638 prefetch(tnapi->hw_status);
0c1d0e2b
MC
5639 if (tnapi->rx_rcb)
5640 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
88b06bc2 5641 /*
fac9b83e 5642 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 5643 * chip-internal interrupt pending events.
fac9b83e 5644 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
5645 * NIC to stop sending us irqs, engaging "in-intr-handler"
5646 * event coalescing.
5647 */
5648 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 5649 if (likely(!tg3_irq_sync(tp)))
09943a18 5650 napi_schedule(&tnapi->napi);
61487480 5651
88b06bc2
MC
5652 return IRQ_RETVAL(1);
5653}
5654
7d12e780 5655static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4 5656{
09943a18
MC
5657 struct tg3_napi *tnapi = dev_id;
5658 struct tg3 *tp = tnapi->tp;
898a56f8 5659 struct tg3_hw_status *sblk = tnapi->hw_status;
1da177e4
LT
5660 unsigned int handled = 1;
5661
1da177e4
LT
5662 /* In INTx mode, it is possible for the interrupt to arrive at
5663 * the CPU before the status block posted prior to the interrupt.
5664 * Reading the PCI State register will confirm whether the
5665 * interrupt is ours and will flush the status block.
5666 */
d18edcb2 5667 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
63c3a66f 5668 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
5669 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5670 handled = 0;
f47c11ee 5671 goto out;
fac9b83e 5672 }
d18edcb2
MC
5673 }
5674
5675 /*
5676 * Writing any value to intr-mbox-0 clears PCI INTA# and
5677 * chip-internal interrupt pending events.
5678 * Writing non-zero to intr-mbox-0 additional tells the
5679 * NIC to stop sending us irqs, engaging "in-intr-handler"
5680 * event coalescing.
c04cb347
MC
5681 *
5682 * Flush the mailbox to de-assert the IRQ immediately to prevent
5683 * spurious interrupts. The flush impacts performance but
5684 * excessive spurious interrupts can be worse in some cases.
d18edcb2 5685 */
c04cb347 5686 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
5687 if (tg3_irq_sync(tp))
5688 goto out;
5689 sblk->status &= ~SD_STATUS_UPDATED;
17375d25 5690 if (likely(tg3_has_work(tnapi))) {
72334482 5691 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
09943a18 5692 napi_schedule(&tnapi->napi);
d18edcb2
MC
5693 } else {
5694 /* No work, shared interrupt perhaps? re-enable
5695 * interrupts, and flush that PCI write
5696 */
5697 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5698 0x00000000);
fac9b83e 5699 }
f47c11ee 5700out:
fac9b83e
DM
5701 return IRQ_RETVAL(handled);
5702}
5703
7d12e780 5704static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e 5705{
09943a18
MC
5706 struct tg3_napi *tnapi = dev_id;
5707 struct tg3 *tp = tnapi->tp;
898a56f8 5708 struct tg3_hw_status *sblk = tnapi->hw_status;
fac9b83e
DM
5709 unsigned int handled = 1;
5710
fac9b83e
DM
5711 /* In INTx mode, it is possible for the interrupt to arrive at
5712 * the CPU before the status block posted prior to the interrupt.
5713 * Reading the PCI State register will confirm whether the
5714 * interrupt is ours and will flush the status block.
5715 */
898a56f8 5716 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
63c3a66f 5717 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
5718 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5719 handled = 0;
f47c11ee 5720 goto out;
1da177e4 5721 }
d18edcb2
MC
5722 }
5723
5724 /*
5725 * writing any value to intr-mbox-0 clears PCI INTA# and
5726 * chip-internal interrupt pending events.
5727 * writing non-zero to intr-mbox-0 additional tells the
5728 * NIC to stop sending us irqs, engaging "in-intr-handler"
5729 * event coalescing.
c04cb347
MC
5730 *
5731 * Flush the mailbox to de-assert the IRQ immediately to prevent
5732 * spurious interrupts. The flush impacts performance but
5733 * excessive spurious interrupts can be worse in some cases.
d18edcb2 5734 */
c04cb347 5735 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
624f8e50
MC
5736
5737 /*
5738 * In a shared interrupt configuration, sometimes other devices'
5739 * interrupts will scream. We record the current status tag here
5740 * so that the above check can report that the screaming interrupts
5741 * are unhandled. Eventually they will be silenced.
5742 */
898a56f8 5743 tnapi->last_irq_tag = sblk->status_tag;
624f8e50 5744
d18edcb2
MC
5745 if (tg3_irq_sync(tp))
5746 goto out;
624f8e50 5747
72334482 5748 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
624f8e50 5749
09943a18 5750 napi_schedule(&tnapi->napi);
624f8e50 5751
f47c11ee 5752out:
1da177e4
LT
5753 return IRQ_RETVAL(handled);
5754}
5755
7938109f 5756/* ISR for interrupt test */
7d12e780 5757static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f 5758{
09943a18
MC
5759 struct tg3_napi *tnapi = dev_id;
5760 struct tg3 *tp = tnapi->tp;
898a56f8 5761 struct tg3_hw_status *sblk = tnapi->hw_status;
7938109f 5762
f9804ddb
MC
5763 if ((sblk->status & SD_STATUS_UPDATED) ||
5764 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 5765 tg3_disable_ints(tp);
7938109f
MC
5766 return IRQ_RETVAL(1);
5767 }
5768 return IRQ_RETVAL(0);
5769}
5770
8e7a22e3 5771static int tg3_init_hw(struct tg3 *, int);
944d980e 5772static int tg3_halt(struct tg3 *, int, int);
1da177e4 5773
b9ec6c1b
MC
5774/* Restart hardware after configuration changes, self-test, etc.
5775 * Invoked with tp->lock held.
5776 */
5777static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
78c6146f
ED
5778 __releases(tp->lock)
5779 __acquires(tp->lock)
b9ec6c1b
MC
5780{
5781 int err;
5782
5783 err = tg3_init_hw(tp, reset_phy);
5784 if (err) {
5129c3a3
MC
5785 netdev_err(tp->dev,
5786 "Failed to re-initialize device, aborting\n");
b9ec6c1b
MC
5787 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5788 tg3_full_unlock(tp);
5789 del_timer_sync(&tp->timer);
5790 tp->irq_sync = 0;
fed97810 5791 tg3_napi_enable(tp);
b9ec6c1b
MC
5792 dev_close(tp->dev);
5793 tg3_full_lock(tp, 0);
5794 }
5795 return err;
5796}
5797
1da177e4
LT
5798#ifdef CONFIG_NET_POLL_CONTROLLER
5799static void tg3_poll_controller(struct net_device *dev)
5800{
4f125f42 5801 int i;
88b06bc2
MC
5802 struct tg3 *tp = netdev_priv(dev);
5803
4f125f42 5804 for (i = 0; i < tp->irq_cnt; i++)
fe234f0e 5805 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1da177e4
LT
5806}
5807#endif
5808
c4028958 5809static void tg3_reset_task(struct work_struct *work)
1da177e4 5810{
c4028958 5811 struct tg3 *tp = container_of(work, struct tg3, reset_task);
b02fd9e3 5812 int err;
1da177e4
LT
5813 unsigned int restart_timer;
5814
7faa006f 5815 tg3_full_lock(tp, 0);
7faa006f
MC
5816
5817 if (!netif_running(tp->dev)) {
7faa006f
MC
5818 tg3_full_unlock(tp);
5819 return;
5820 }
5821
5822 tg3_full_unlock(tp);
5823
b02fd9e3
MC
5824 tg3_phy_stop(tp);
5825
1da177e4
LT
5826 tg3_netif_stop(tp);
5827
f47c11ee 5828 tg3_full_lock(tp, 1);
1da177e4 5829
63c3a66f
JP
5830 restart_timer = tg3_flag(tp, RESTART_TIMER);
5831 tg3_flag_clear(tp, RESTART_TIMER);
1da177e4 5832
63c3a66f 5833 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
df3e6548
MC
5834 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5835 tp->write32_rx_mbox = tg3_write_flush_reg32;
63c3a66f
JP
5836 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5837 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
df3e6548
MC
5838 }
5839
944d980e 5840 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b02fd9e3
MC
5841 err = tg3_init_hw(tp, 1);
5842 if (err)
b9ec6c1b 5843 goto out;
1da177e4
LT
5844
5845 tg3_netif_start(tp);
5846
1da177e4
LT
5847 if (restart_timer)
5848 mod_timer(&tp->timer, jiffies + 1);
7faa006f 5849
b9ec6c1b 5850out:
7faa006f 5851 tg3_full_unlock(tp);
b02fd9e3
MC
5852
5853 if (!err)
5854 tg3_phy_start(tp);
1da177e4
LT
5855}
5856
5857static void tg3_tx_timeout(struct net_device *dev)
5858{
5859 struct tg3 *tp = netdev_priv(dev);
5860
b0408751 5861 if (netif_msg_tx_err(tp)) {
05dbe005 5862 netdev_err(dev, "transmit timed out, resetting\n");
97bd8e49 5863 tg3_dump_state(tp);
b0408751 5864 }
1da177e4
LT
5865
5866 schedule_work(&tp->reset_task);
5867}
5868
c58ec932
MC
5869/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5870static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5871{
5872 u32 base = (u32) mapping & 0xffffffff;
5873
807540ba 5874 return (base > 0xffffdcc0) && (base + len + 8 < base);
c58ec932
MC
5875}
5876
72f2afb8
MC
5877/* Test for DMA addresses > 40-bit */
5878static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5879 int len)
5880{
5881#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
63c3a66f 5882 if (tg3_flag(tp, 40BIT_DMA_BUG))
807540ba 5883 return ((u64) mapping + len) > DMA_BIT_MASK(40);
72f2afb8
MC
5884 return 0;
5885#else
5886 return 0;
5887#endif
5888}
5889
2ffcc981
MC
5890static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5891 dma_addr_t mapping, int len, u32 flags,
5892 u32 mss_and_is_end)
5893{
5894 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5895 int is_end = (mss_and_is_end & 0x1);
5896 u32 mss = (mss_and_is_end >> 1);
5897 u32 vlan_tag = 0;
5898
5899 if (is_end)
5900 flags |= TXD_FLAG_END;
5901 if (flags & TXD_FLAG_VLAN) {
5902 vlan_tag = flags >> 16;
5903 flags &= 0xffff;
5904 }
5905 vlan_tag |= (mss << TXD_MSS_SHIFT);
5906
5907 txd->addr_hi = ((u64) mapping >> 32);
5908 txd->addr_lo = ((u64) mapping & 0xffffffff);
5909 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5910 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5911}
1da177e4 5912
432aa7ed
MC
5913static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5914 struct sk_buff *skb, int last)
5915{
5916 int i;
5917 u32 entry = tnapi->tx_prod;
5918 struct ring_info *txb = &tnapi->tx_buffers[entry];
5919
5920 pci_unmap_single(tnapi->tp->pdev,
5921 dma_unmap_addr(txb, mapping),
5922 skb_headlen(skb),
5923 PCI_DMA_TODEVICE);
9a2e0fb0 5924 for (i = 0; i < last; i++) {
432aa7ed
MC
5925 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5926
5927 entry = NEXT_TX(entry);
5928 txb = &tnapi->tx_buffers[entry];
5929
5930 pci_unmap_page(tnapi->tp->pdev,
5931 dma_unmap_addr(txb, mapping),
5932 frag->size, PCI_DMA_TODEVICE);
5933 }
5934}
5935
72f2afb8 5936/* Workaround 4GB and 40-bit hardware DMA bugs. */
24f4efd4 5937static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
432aa7ed
MC
5938 struct sk_buff *skb,
5939 u32 base_flags, u32 mss)
1da177e4 5940{
24f4efd4 5941 struct tg3 *tp = tnapi->tp;
41588ba1 5942 struct sk_buff *new_skb;
c58ec932 5943 dma_addr_t new_addr = 0;
432aa7ed
MC
5944 u32 entry = tnapi->tx_prod;
5945 int ret = 0;
1da177e4 5946
41588ba1
MC
5947 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5948 new_skb = skb_copy(skb, GFP_ATOMIC);
5949 else {
5950 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5951
5952 new_skb = skb_copy_expand(skb,
5953 skb_headroom(skb) + more_headroom,
5954 skb_tailroom(skb), GFP_ATOMIC);
5955 }
5956
1da177e4 5957 if (!new_skb) {
c58ec932
MC
5958 ret = -1;
5959 } else {
5960 /* New SKB is guaranteed to be linear. */
f4188d8a
AD
5961 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5962 PCI_DMA_TODEVICE);
5963 /* Make sure the mapping succeeded */
5964 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5965 ret = -1;
5966 dev_kfree_skb(new_skb);
90079ce8 5967
c58ec932
MC
5968 /* Make sure new skb does not cross any 4G boundaries.
5969 * Drop the packet if it does.
5970 */
eb69d564 5971 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
f4188d8a
AD
5972 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5973 PCI_DMA_TODEVICE);
c58ec932
MC
5974 ret = -1;
5975 dev_kfree_skb(new_skb);
c58ec932 5976 } else {
432aa7ed
MC
5977 tnapi->tx_buffers[entry].skb = new_skb;
5978 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5979 mapping, new_addr);
5980
f3f3f27e 5981 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
c58ec932 5982 base_flags, 1 | (mss << 1));
f4188d8a 5983 }
1da177e4
LT
5984 }
5985
5986 dev_kfree_skb(skb);
5987
c58ec932 5988 return ret;
1da177e4
LT
5989}
5990
2ffcc981 5991static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
52c0fd83
MC
5992
5993/* Use GSO to workaround a rare TSO bug that may be triggered when the
5994 * TSO header is greater than 80 bytes.
5995 */
5996static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5997{
5998 struct sk_buff *segs, *nskb;
f3f3f27e 5999 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
52c0fd83
MC
6000
6001 /* Estimate the number of fragments in the worst case */
f3f3f27e 6002 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
52c0fd83 6003 netif_stop_queue(tp->dev);
f65aac16
MC
6004
6005 /* netif_tx_stop_queue() must be done before checking
6006 * checking tx index in tg3_tx_avail() below, because in
6007 * tg3_tx(), we update tx index before checking for
6008 * netif_tx_queue_stopped().
6009 */
6010 smp_mb();
f3f3f27e 6011 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7f62ad5d
MC
6012 return NETDEV_TX_BUSY;
6013
6014 netif_wake_queue(tp->dev);
52c0fd83
MC
6015 }
6016
6017 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 6018 if (IS_ERR(segs))
52c0fd83
MC
6019 goto tg3_tso_bug_end;
6020
6021 do {
6022 nskb = segs;
6023 segs = segs->next;
6024 nskb->next = NULL;
2ffcc981 6025 tg3_start_xmit(nskb, tp->dev);
52c0fd83
MC
6026 } while (segs);
6027
6028tg3_tso_bug_end:
6029 dev_kfree_skb(skb);
6030
6031 return NETDEV_TX_OK;
6032}
52c0fd83 6033
5a6f3074 6034/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
63c3a66f 6035 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5a6f3074 6036 */
2ffcc981 6037static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
6038{
6039 struct tg3 *tp = netdev_priv(dev);
1da177e4 6040 u32 len, entry, base_flags, mss;
432aa7ed 6041 int i = -1, would_hit_hwbug;
90079ce8 6042 dma_addr_t mapping;
24f4efd4
MC
6043 struct tg3_napi *tnapi;
6044 struct netdev_queue *txq;
432aa7ed 6045 unsigned int last;
f4188d8a 6046
24f4efd4
MC
6047 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6048 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
63c3a66f 6049 if (tg3_flag(tp, ENABLE_TSS))
24f4efd4 6050 tnapi++;
1da177e4 6051
00b70504 6052 /* We are running in BH disabled context with netif_tx_lock
bea3348e 6053 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
6054 * interrupt. Furthermore, IRQ processing runs lockless so we have
6055 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 6056 */
f3f3f27e 6057 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
24f4efd4
MC
6058 if (!netif_tx_queue_stopped(txq)) {
6059 netif_tx_stop_queue(txq);
1f064a87
SH
6060
6061 /* This is a hard error, log it. */
5129c3a3
MC
6062 netdev_err(dev,
6063 "BUG! Tx Ring full when queue awake!\n");
1f064a87 6064 }
1da177e4
LT
6065 return NETDEV_TX_BUSY;
6066 }
6067
f3f3f27e 6068 entry = tnapi->tx_prod;
1da177e4 6069 base_flags = 0;
84fa7933 6070 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 6071 base_flags |= TXD_FLAG_TCPUDP_CSUM;
24f4efd4 6072
be98da6a
MC
6073 mss = skb_shinfo(skb)->gso_size;
6074 if (mss) {
eddc9ec5 6075 struct iphdr *iph;
34195c3d 6076 u32 tcp_opt_len, hdr_len;
1da177e4
LT
6077
6078 if (skb_header_cloned(skb) &&
6079 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6080 dev_kfree_skb(skb);
6081 goto out_unlock;
6082 }
6083
34195c3d 6084 iph = ip_hdr(skb);
ab6a5bb6 6085 tcp_opt_len = tcp_optlen(skb);
1da177e4 6086
02e96080 6087 if (skb_is_gso_v6(skb)) {
34195c3d
MC
6088 hdr_len = skb_headlen(skb) - ETH_HLEN;
6089 } else {
6090 u32 ip_tcp_len;
6091
6092 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6093 hdr_len = ip_tcp_len + tcp_opt_len;
6094
6095 iph->check = 0;
6096 iph->tot_len = htons(mss + hdr_len);
6097 }
6098
52c0fd83 6099 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
63c3a66f 6100 tg3_flag(tp, TSO_BUG))
de6f31eb 6101 return tg3_tso_bug(tp, skb);
52c0fd83 6102
1da177e4
LT
6103 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6104 TXD_FLAG_CPU_POST_DMA);
6105
63c3a66f
JP
6106 if (tg3_flag(tp, HW_TSO_1) ||
6107 tg3_flag(tp, HW_TSO_2) ||
6108 tg3_flag(tp, HW_TSO_3)) {
aa8223c7 6109 tcp_hdr(skb)->check = 0;
1da177e4 6110 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
6111 } else
6112 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6113 iph->daddr, 0,
6114 IPPROTO_TCP,
6115 0);
1da177e4 6116
63c3a66f 6117 if (tg3_flag(tp, HW_TSO_3)) {
615774fe
MC
6118 mss |= (hdr_len & 0xc) << 12;
6119 if (hdr_len & 0x10)
6120 base_flags |= 0x00000010;
6121 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 6122 } else if (tg3_flag(tp, HW_TSO_2))
92c6b8d1 6123 mss |= hdr_len << 9;
63c3a66f 6124 else if (tg3_flag(tp, HW_TSO_1) ||
92c6b8d1 6125 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
eddc9ec5 6126 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
6127 int tsflags;
6128
eddc9ec5 6129 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
6130 mss |= (tsflags << 11);
6131 }
6132 } else {
eddc9ec5 6133 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
6134 int tsflags;
6135
eddc9ec5 6136 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
6137 base_flags |= tsflags << 12;
6138 }
6139 }
6140 }
bf933c80 6141
eab6d18d 6142 if (vlan_tx_tag_present(skb))
1da177e4
LT
6143 base_flags |= (TXD_FLAG_VLAN |
6144 (vlan_tx_tag_get(skb) << 16));
1da177e4 6145
63c3a66f 6146 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8fc2f995 6147 !mss && skb->len > VLAN_ETH_FRAME_LEN)
615774fe
MC
6148 base_flags |= TXD_FLAG_JMB_PKT;
6149
f4188d8a
AD
6150 len = skb_headlen(skb);
6151
6152 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6153 if (pci_dma_mapping_error(tp->pdev, mapping)) {
90079ce8
DM
6154 dev_kfree_skb(skb);
6155 goto out_unlock;
6156 }
6157
f3f3f27e 6158 tnapi->tx_buffers[entry].skb = skb;
4e5e4f0d 6159 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
1da177e4
LT
6160
6161 would_hit_hwbug = 0;
6162
63c3a66f 6163 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
92c6b8d1
MC
6164 would_hit_hwbug = 1;
6165
eb69d564 6166 if (tg3_4g_overflow_test(mapping, len))
0e1406dd
MC
6167 would_hit_hwbug = 1;
6168
daf9a553 6169 if (tg3_40bit_overflow_test(tp, mapping, len))
41588ba1 6170 would_hit_hwbug = 1;
0e1406dd 6171
63c3a66f 6172 if (tg3_flag(tp, 5701_DMA_BUG))
c58ec932 6173 would_hit_hwbug = 1;
1da177e4 6174
f3f3f27e 6175 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
1da177e4
LT
6176 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6177
6178 entry = NEXT_TX(entry);
6179
6180 /* Now loop through additional data fragments, and queue them. */
6181 if (skb_shinfo(skb)->nr_frags > 0) {
1da177e4
LT
6182 last = skb_shinfo(skb)->nr_frags - 1;
6183 for (i = 0; i <= last; i++) {
6184 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6185
6186 len = frag->size;
f4188d8a
AD
6187 mapping = pci_map_page(tp->pdev,
6188 frag->page,
6189 frag->page_offset,
6190 len, PCI_DMA_TODEVICE);
1da177e4 6191
f3f3f27e 6192 tnapi->tx_buffers[entry].skb = NULL;
4e5e4f0d 6193 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
f4188d8a
AD
6194 mapping);
6195 if (pci_dma_mapping_error(tp->pdev, mapping))
6196 goto dma_error;
1da177e4 6197
63c3a66f 6198 if (tg3_flag(tp, SHORT_DMA_BUG) &&
92c6b8d1
MC
6199 len <= 8)
6200 would_hit_hwbug = 1;
6201
eb69d564 6202 if (tg3_4g_overflow_test(mapping, len))
c58ec932 6203 would_hit_hwbug = 1;
1da177e4 6204
daf9a553 6205 if (tg3_40bit_overflow_test(tp, mapping, len))
72f2afb8
MC
6206 would_hit_hwbug = 1;
6207
63c3a66f
JP
6208 if (tg3_flag(tp, HW_TSO_1) ||
6209 tg3_flag(tp, HW_TSO_2) ||
6210 tg3_flag(tp, HW_TSO_3))
f3f3f27e 6211 tg3_set_txd(tnapi, entry, mapping, len,
1da177e4
LT
6212 base_flags, (i == last)|(mss << 1));
6213 else
f3f3f27e 6214 tg3_set_txd(tnapi, entry, mapping, len,
1da177e4
LT
6215 base_flags, (i == last));
6216
6217 entry = NEXT_TX(entry);
6218 }
6219 }
6220
6221 if (would_hit_hwbug) {
432aa7ed 6222 tg3_skb_error_unmap(tnapi, skb, i);
1da177e4
LT
6223
6224 /* If the workaround fails due to memory/mapping
6225 * failure, silently drop this packet.
6226 */
432aa7ed 6227 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
1da177e4
LT
6228 goto out_unlock;
6229
432aa7ed 6230 entry = NEXT_TX(tnapi->tx_prod);
1da177e4
LT
6231 }
6232
d515b450
RC
6233 skb_tx_timestamp(skb);
6234
1da177e4 6235 /* Packets are ready, update Tx producer idx local and on card. */
24f4efd4 6236 tw32_tx_mbox(tnapi->prodmbox, entry);
1da177e4 6237
f3f3f27e
MC
6238 tnapi->tx_prod = entry;
6239 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
24f4efd4 6240 netif_tx_stop_queue(txq);
f65aac16
MC
6241
6242 /* netif_tx_stop_queue() must be done before checking
6243 * checking tx index in tg3_tx_avail() below, because in
6244 * tg3_tx(), we update tx index before checking for
6245 * netif_tx_queue_stopped().
6246 */
6247 smp_mb();
f3f3f27e 6248 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
24f4efd4 6249 netif_tx_wake_queue(txq);
51b91468 6250 }
1da177e4
LT
6251
6252out_unlock:
cdd0db05 6253 mmiowb();
1da177e4
LT
6254
6255 return NETDEV_TX_OK;
f4188d8a
AD
6256
6257dma_error:
432aa7ed 6258 tg3_skb_error_unmap(tnapi, skb, i);
f4188d8a 6259 dev_kfree_skb(skb);
432aa7ed 6260 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
f4188d8a 6261 return NETDEV_TX_OK;
1da177e4
LT
6262}
6263
06c03c02
MB
6264static void tg3_set_loopback(struct net_device *dev, u32 features)
6265{
6266 struct tg3 *tp = netdev_priv(dev);
6267
6268 if (features & NETIF_F_LOOPBACK) {
6269 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6270 return;
6271
6272 /*
6273 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6274 * loopback mode if Half-Duplex mode was negotiated earlier.
6275 */
6276 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6277
6278 /* Enable internal MAC loopback mode */
6279 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6280 spin_lock_bh(&tp->lock);
6281 tw32(MAC_MODE, tp->mac_mode);
6282 netif_carrier_on(tp->dev);
6283 spin_unlock_bh(&tp->lock);
6284 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6285 } else {
6286 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6287 return;
6288
6289 /* Disable internal MAC loopback mode */
6290 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6291 spin_lock_bh(&tp->lock);
6292 tw32(MAC_MODE, tp->mac_mode);
6293 /* Force link status check */
6294 tg3_setup_phy(tp, 1);
6295 spin_unlock_bh(&tp->lock);
6296 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6297 }
6298}
6299
dc668910
MM
6300static u32 tg3_fix_features(struct net_device *dev, u32 features)
6301{
6302 struct tg3 *tp = netdev_priv(dev);
6303
63c3a66f 6304 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
dc668910
MM
6305 features &= ~NETIF_F_ALL_TSO;
6306
6307 return features;
6308}
6309
06c03c02
MB
6310static int tg3_set_features(struct net_device *dev, u32 features)
6311{
6312 u32 changed = dev->features ^ features;
6313
6314 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6315 tg3_set_loopback(dev, features);
6316
6317 return 0;
6318}
6319
1da177e4
LT
6320static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6321 int new_mtu)
6322{
6323 dev->mtu = new_mtu;
6324
ef7f5ec0 6325 if (new_mtu > ETH_DATA_LEN) {
63c3a66f 6326 if (tg3_flag(tp, 5780_CLASS)) {
dc668910 6327 netdev_update_features(dev);
63c3a66f 6328 tg3_flag_clear(tp, TSO_CAPABLE);
859a5887 6329 } else {
63c3a66f 6330 tg3_flag_set(tp, JUMBO_RING_ENABLE);
859a5887 6331 }
ef7f5ec0 6332 } else {
63c3a66f
JP
6333 if (tg3_flag(tp, 5780_CLASS)) {
6334 tg3_flag_set(tp, TSO_CAPABLE);
dc668910
MM
6335 netdev_update_features(dev);
6336 }
63c3a66f 6337 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
ef7f5ec0 6338 }
1da177e4
LT
6339}
6340
6341static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6342{
6343 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 6344 int err;
1da177e4
LT
6345
6346 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6347 return -EINVAL;
6348
6349 if (!netif_running(dev)) {
6350 /* We'll just catch it later when the
6351 * device is up'd.
6352 */
6353 tg3_set_mtu(dev, tp, new_mtu);
6354 return 0;
6355 }
6356
b02fd9e3
MC
6357 tg3_phy_stop(tp);
6358
1da177e4 6359 tg3_netif_stop(tp);
f47c11ee
DM
6360
6361 tg3_full_lock(tp, 1);
1da177e4 6362
944d980e 6363 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6364
6365 tg3_set_mtu(dev, tp, new_mtu);
6366
b9ec6c1b 6367 err = tg3_restart_hw(tp, 0);
1da177e4 6368
b9ec6c1b
MC
6369 if (!err)
6370 tg3_netif_start(tp);
1da177e4 6371
f47c11ee 6372 tg3_full_unlock(tp);
1da177e4 6373
b02fd9e3
MC
6374 if (!err)
6375 tg3_phy_start(tp);
6376
b9ec6c1b 6377 return err;
1da177e4
LT
6378}
6379
21f581a5
MC
6380static void tg3_rx_prodring_free(struct tg3 *tp,
6381 struct tg3_rx_prodring_set *tpr)
1da177e4 6382{
1da177e4
LT
6383 int i;
6384
8fea32b9 6385 if (tpr != &tp->napi[0].prodring) {
b196c7e4 6386 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
2c49a44d 6387 i = (i + 1) & tp->rx_std_ring_mask)
b196c7e4
MC
6388 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6389 tp->rx_pkt_map_sz);
6390
63c3a66f 6391 if (tg3_flag(tp, JUMBO_CAPABLE)) {
b196c7e4
MC
6392 for (i = tpr->rx_jmb_cons_idx;
6393 i != tpr->rx_jmb_prod_idx;
2c49a44d 6394 i = (i + 1) & tp->rx_jmb_ring_mask) {
b196c7e4
MC
6395 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6396 TG3_RX_JMB_MAP_SZ);
6397 }
6398 }
6399
2b2cdb65 6400 return;
b196c7e4 6401 }
1da177e4 6402
2c49a44d 6403 for (i = 0; i <= tp->rx_std_ring_mask; i++)
2b2cdb65
MC
6404 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6405 tp->rx_pkt_map_sz);
1da177e4 6406
63c3a66f 6407 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 6408 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
2b2cdb65
MC
6409 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6410 TG3_RX_JMB_MAP_SZ);
1da177e4
LT
6411 }
6412}
6413
c6cdf436 6414/* Initialize rx rings for packet processing.
1da177e4
LT
6415 *
6416 * The chip has been shut down and the driver detached from
6417 * the networking, so no interrupts or new tx packets will
6418 * end up in the driver. tp->{tx,}lock are held and thus
6419 * we may not sleep.
6420 */
21f581a5
MC
6421static int tg3_rx_prodring_alloc(struct tg3 *tp,
6422 struct tg3_rx_prodring_set *tpr)
1da177e4 6423{
287be12e 6424 u32 i, rx_pkt_dma_sz;
1da177e4 6425
b196c7e4
MC
6426 tpr->rx_std_cons_idx = 0;
6427 tpr->rx_std_prod_idx = 0;
6428 tpr->rx_jmb_cons_idx = 0;
6429 tpr->rx_jmb_prod_idx = 0;
6430
8fea32b9 6431 if (tpr != &tp->napi[0].prodring) {
2c49a44d
MC
6432 memset(&tpr->rx_std_buffers[0], 0,
6433 TG3_RX_STD_BUFF_RING_SIZE(tp));
48035728 6434 if (tpr->rx_jmb_buffers)
2b2cdb65 6435 memset(&tpr->rx_jmb_buffers[0], 0,
2c49a44d 6436 TG3_RX_JMB_BUFF_RING_SIZE(tp));
2b2cdb65
MC
6437 goto done;
6438 }
6439
1da177e4 6440 /* Zero out all descriptors. */
2c49a44d 6441 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
1da177e4 6442
287be12e 6443 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
63c3a66f 6444 if (tg3_flag(tp, 5780_CLASS) &&
287be12e
MC
6445 tp->dev->mtu > ETH_DATA_LEN)
6446 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6447 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7e72aad4 6448
1da177e4
LT
6449 /* Initialize invariants of the rings, we only set this
6450 * stuff once. This works because the card does not
6451 * write into the rx buffer posting rings.
6452 */
2c49a44d 6453 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
1da177e4
LT
6454 struct tg3_rx_buffer_desc *rxd;
6455
21f581a5 6456 rxd = &tpr->rx_std[i];
287be12e 6457 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
1da177e4
LT
6458 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6459 rxd->opaque = (RXD_OPAQUE_RING_STD |
6460 (i << RXD_OPAQUE_INDEX_SHIFT));
6461 }
6462
1da177e4
LT
6463 /* Now allocate fresh SKBs for each rx ring. */
6464 for (i = 0; i < tp->rx_pending; i++) {
86b21e59 6465 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5129c3a3
MC
6466 netdev_warn(tp->dev,
6467 "Using a smaller RX standard ring. Only "
6468 "%d out of %d buffers were allocated "
6469 "successfully\n", i, tp->rx_pending);
32d8c572 6470 if (i == 0)
cf7a7298 6471 goto initfail;
32d8c572 6472 tp->rx_pending = i;
1da177e4 6473 break;
32d8c572 6474 }
1da177e4
LT
6475 }
6476
63c3a66f 6477 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
cf7a7298
MC
6478 goto done;
6479
2c49a44d 6480 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
cf7a7298 6481
63c3a66f 6482 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
0d86df80 6483 goto done;
cf7a7298 6484
2c49a44d 6485 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
0d86df80
MC
6486 struct tg3_rx_buffer_desc *rxd;
6487
6488 rxd = &tpr->rx_jmb[i].std;
6489 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6490 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6491 RXD_FLAG_JUMBO;
6492 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6493 (i << RXD_OPAQUE_INDEX_SHIFT));
6494 }
6495
6496 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6497 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
5129c3a3
MC
6498 netdev_warn(tp->dev,
6499 "Using a smaller RX jumbo ring. Only %d "
6500 "out of %d buffers were allocated "
6501 "successfully\n", i, tp->rx_jumbo_pending);
0d86df80
MC
6502 if (i == 0)
6503 goto initfail;
6504 tp->rx_jumbo_pending = i;
6505 break;
1da177e4
LT
6506 }
6507 }
cf7a7298
MC
6508
6509done:
32d8c572 6510 return 0;
cf7a7298
MC
6511
6512initfail:
21f581a5 6513 tg3_rx_prodring_free(tp, tpr);
cf7a7298 6514 return -ENOMEM;
1da177e4
LT
6515}
6516
21f581a5
MC
6517static void tg3_rx_prodring_fini(struct tg3 *tp,
6518 struct tg3_rx_prodring_set *tpr)
1da177e4 6519{
21f581a5
MC
6520 kfree(tpr->rx_std_buffers);
6521 tpr->rx_std_buffers = NULL;
6522 kfree(tpr->rx_jmb_buffers);
6523 tpr->rx_jmb_buffers = NULL;
6524 if (tpr->rx_std) {
4bae65c8
MC
6525 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6526 tpr->rx_std, tpr->rx_std_mapping);
21f581a5 6527 tpr->rx_std = NULL;
1da177e4 6528 }
21f581a5 6529 if (tpr->rx_jmb) {
4bae65c8
MC
6530 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6531 tpr->rx_jmb, tpr->rx_jmb_mapping);
21f581a5 6532 tpr->rx_jmb = NULL;
1da177e4 6533 }
cf7a7298
MC
6534}
6535
21f581a5
MC
6536static int tg3_rx_prodring_init(struct tg3 *tp,
6537 struct tg3_rx_prodring_set *tpr)
cf7a7298 6538{
2c49a44d
MC
6539 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6540 GFP_KERNEL);
21f581a5 6541 if (!tpr->rx_std_buffers)
cf7a7298
MC
6542 return -ENOMEM;
6543
4bae65c8
MC
6544 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6545 TG3_RX_STD_RING_BYTES(tp),
6546 &tpr->rx_std_mapping,
6547 GFP_KERNEL);
21f581a5 6548 if (!tpr->rx_std)
cf7a7298
MC
6549 goto err_out;
6550
63c3a66f 6551 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 6552 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
21f581a5
MC
6553 GFP_KERNEL);
6554 if (!tpr->rx_jmb_buffers)
cf7a7298
MC
6555 goto err_out;
6556
4bae65c8
MC
6557 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6558 TG3_RX_JMB_RING_BYTES(tp),
6559 &tpr->rx_jmb_mapping,
6560 GFP_KERNEL);
21f581a5 6561 if (!tpr->rx_jmb)
cf7a7298
MC
6562 goto err_out;
6563 }
6564
6565 return 0;
6566
6567err_out:
21f581a5 6568 tg3_rx_prodring_fini(tp, tpr);
cf7a7298
MC
6569 return -ENOMEM;
6570}
6571
6572/* Free up pending packets in all rx/tx rings.
6573 *
6574 * The chip has been shut down and the driver detached from
6575 * the networking, so no interrupts or new tx packets will
6576 * end up in the driver. tp->{tx,}lock is not held and we are not
6577 * in an interrupt context and thus may sleep.
6578 */
6579static void tg3_free_rings(struct tg3 *tp)
6580{
f77a6a8e 6581 int i, j;
cf7a7298 6582
f77a6a8e
MC
6583 for (j = 0; j < tp->irq_cnt; j++) {
6584 struct tg3_napi *tnapi = &tp->napi[j];
cf7a7298 6585
8fea32b9 6586 tg3_rx_prodring_free(tp, &tnapi->prodring);
b28f6428 6587
0c1d0e2b
MC
6588 if (!tnapi->tx_buffers)
6589 continue;
6590
f77a6a8e 6591 for (i = 0; i < TG3_TX_RING_SIZE; ) {
f4188d8a 6592 struct ring_info *txp;
f77a6a8e 6593 struct sk_buff *skb;
f4188d8a 6594 unsigned int k;
cf7a7298 6595
f77a6a8e
MC
6596 txp = &tnapi->tx_buffers[i];
6597 skb = txp->skb;
cf7a7298 6598
f77a6a8e
MC
6599 if (skb == NULL) {
6600 i++;
6601 continue;
6602 }
cf7a7298 6603
f4188d8a 6604 pci_unmap_single(tp->pdev,
4e5e4f0d 6605 dma_unmap_addr(txp, mapping),
f4188d8a
AD
6606 skb_headlen(skb),
6607 PCI_DMA_TODEVICE);
f77a6a8e 6608 txp->skb = NULL;
cf7a7298 6609
f4188d8a
AD
6610 i++;
6611
6612 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6613 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6614 pci_unmap_page(tp->pdev,
4e5e4f0d 6615 dma_unmap_addr(txp, mapping),
f4188d8a
AD
6616 skb_shinfo(skb)->frags[k].size,
6617 PCI_DMA_TODEVICE);
6618 i++;
6619 }
f77a6a8e
MC
6620
6621 dev_kfree_skb_any(skb);
6622 }
2b2cdb65 6623 }
cf7a7298
MC
6624}
6625
6626/* Initialize tx/rx rings for packet processing.
6627 *
6628 * The chip has been shut down and the driver detached from
6629 * the networking, so no interrupts or new tx packets will
6630 * end up in the driver. tp->{tx,}lock are held and thus
6631 * we may not sleep.
6632 */
6633static int tg3_init_rings(struct tg3 *tp)
6634{
f77a6a8e 6635 int i;
72334482 6636
cf7a7298
MC
6637 /* Free up all the SKBs. */
6638 tg3_free_rings(tp);
6639
f77a6a8e
MC
6640 for (i = 0; i < tp->irq_cnt; i++) {
6641 struct tg3_napi *tnapi = &tp->napi[i];
6642
6643 tnapi->last_tag = 0;
6644 tnapi->last_irq_tag = 0;
6645 tnapi->hw_status->status = 0;
6646 tnapi->hw_status->status_tag = 0;
6647 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
cf7a7298 6648
f77a6a8e
MC
6649 tnapi->tx_prod = 0;
6650 tnapi->tx_cons = 0;
0c1d0e2b
MC
6651 if (tnapi->tx_ring)
6652 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
f77a6a8e
MC
6653
6654 tnapi->rx_rcb_ptr = 0;
0c1d0e2b
MC
6655 if (tnapi->rx_rcb)
6656 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
2b2cdb65 6657
8fea32b9 6658 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
e4af1af9 6659 tg3_free_rings(tp);
2b2cdb65 6660 return -ENOMEM;
e4af1af9 6661 }
f77a6a8e 6662 }
72334482 6663
2b2cdb65 6664 return 0;
cf7a7298
MC
6665}
6666
6667/*
6668 * Must not be invoked with interrupt sources disabled and
6669 * the hardware shutdown down.
6670 */
6671static void tg3_free_consistent(struct tg3 *tp)
6672{
f77a6a8e 6673 int i;
898a56f8 6674
f77a6a8e
MC
6675 for (i = 0; i < tp->irq_cnt; i++) {
6676 struct tg3_napi *tnapi = &tp->napi[i];
6677
6678 if (tnapi->tx_ring) {
4bae65c8 6679 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
f77a6a8e
MC
6680 tnapi->tx_ring, tnapi->tx_desc_mapping);
6681 tnapi->tx_ring = NULL;
6682 }
6683
6684 kfree(tnapi->tx_buffers);
6685 tnapi->tx_buffers = NULL;
6686
6687 if (tnapi->rx_rcb) {
4bae65c8
MC
6688 dma_free_coherent(&tp->pdev->dev,
6689 TG3_RX_RCB_RING_BYTES(tp),
6690 tnapi->rx_rcb,
6691 tnapi->rx_rcb_mapping);
f77a6a8e
MC
6692 tnapi->rx_rcb = NULL;
6693 }
6694
8fea32b9
MC
6695 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6696
f77a6a8e 6697 if (tnapi->hw_status) {
4bae65c8
MC
6698 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6699 tnapi->hw_status,
6700 tnapi->status_mapping);
f77a6a8e
MC
6701 tnapi->hw_status = NULL;
6702 }
1da177e4 6703 }
f77a6a8e 6704
1da177e4 6705 if (tp->hw_stats) {
4bae65c8
MC
6706 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6707 tp->hw_stats, tp->stats_mapping);
1da177e4
LT
6708 tp->hw_stats = NULL;
6709 }
6710}
6711
6712/*
6713 * Must not be invoked with interrupt sources disabled and
6714 * the hardware shutdown down. Can sleep.
6715 */
6716static int tg3_alloc_consistent(struct tg3 *tp)
6717{
f77a6a8e 6718 int i;
898a56f8 6719
4bae65c8
MC
6720 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6721 sizeof(struct tg3_hw_stats),
6722 &tp->stats_mapping,
6723 GFP_KERNEL);
f77a6a8e 6724 if (!tp->hw_stats)
1da177e4
LT
6725 goto err_out;
6726
f77a6a8e 6727 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
1da177e4 6728
f77a6a8e
MC
6729 for (i = 0; i < tp->irq_cnt; i++) {
6730 struct tg3_napi *tnapi = &tp->napi[i];
8d9d7cfc 6731 struct tg3_hw_status *sblk;
1da177e4 6732
4bae65c8
MC
6733 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6734 TG3_HW_STATUS_SIZE,
6735 &tnapi->status_mapping,
6736 GFP_KERNEL);
f77a6a8e
MC
6737 if (!tnapi->hw_status)
6738 goto err_out;
898a56f8 6739
f77a6a8e 6740 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8d9d7cfc
MC
6741 sblk = tnapi->hw_status;
6742
8fea32b9
MC
6743 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6744 goto err_out;
6745
19cfaecc
MC
6746 /* If multivector TSS is enabled, vector 0 does not handle
6747 * tx interrupts. Don't allocate any resources for it.
6748 */
63c3a66f
JP
6749 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6750 (i && tg3_flag(tp, ENABLE_TSS))) {
19cfaecc
MC
6751 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6752 TG3_TX_RING_SIZE,
6753 GFP_KERNEL);
6754 if (!tnapi->tx_buffers)
6755 goto err_out;
6756
4bae65c8
MC
6757 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6758 TG3_TX_RING_BYTES,
6759 &tnapi->tx_desc_mapping,
6760 GFP_KERNEL);
19cfaecc
MC
6761 if (!tnapi->tx_ring)
6762 goto err_out;
6763 }
6764
8d9d7cfc
MC
6765 /*
6766 * When RSS is enabled, the status block format changes
6767 * slightly. The "rx_jumbo_consumer", "reserved",
6768 * and "rx_mini_consumer" members get mapped to the
6769 * other three rx return ring producer indexes.
6770 */
6771 switch (i) {
6772 default:
6773 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6774 break;
6775 case 2:
6776 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6777 break;
6778 case 3:
6779 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6780 break;
6781 case 4:
6782 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6783 break;
6784 }
72334482 6785
0c1d0e2b
MC
6786 /*
6787 * If multivector RSS is enabled, vector 0 does not handle
6788 * rx or tx interrupts. Don't allocate any resources for it.
6789 */
63c3a66f 6790 if (!i && tg3_flag(tp, ENABLE_RSS))
0c1d0e2b
MC
6791 continue;
6792
4bae65c8
MC
6793 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6794 TG3_RX_RCB_RING_BYTES(tp),
6795 &tnapi->rx_rcb_mapping,
6796 GFP_KERNEL);
f77a6a8e
MC
6797 if (!tnapi->rx_rcb)
6798 goto err_out;
72334482 6799
f77a6a8e 6800 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
f77a6a8e 6801 }
1da177e4
LT
6802
6803 return 0;
6804
6805err_out:
6806 tg3_free_consistent(tp);
6807 return -ENOMEM;
6808}
6809
6810#define MAX_WAIT_CNT 1000
6811
6812/* To stop a block, clear the enable bit and poll till it
6813 * clears. tp->lock is held.
6814 */
b3b7d6be 6815static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
6816{
6817 unsigned int i;
6818 u32 val;
6819
63c3a66f 6820 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
6821 switch (ofs) {
6822 case RCVLSC_MODE:
6823 case DMAC_MODE:
6824 case MBFREE_MODE:
6825 case BUFMGR_MODE:
6826 case MEMARB_MODE:
6827 /* We can't enable/disable these bits of the
6828 * 5705/5750, just say success.
6829 */
6830 return 0;
6831
6832 default:
6833 break;
855e1111 6834 }
1da177e4
LT
6835 }
6836
6837 val = tr32(ofs);
6838 val &= ~enable_bit;
6839 tw32_f(ofs, val);
6840
6841 for (i = 0; i < MAX_WAIT_CNT; i++) {
6842 udelay(100);
6843 val = tr32(ofs);
6844 if ((val & enable_bit) == 0)
6845 break;
6846 }
6847
b3b7d6be 6848 if (i == MAX_WAIT_CNT && !silent) {
2445e461
MC
6849 dev_err(&tp->pdev->dev,
6850 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6851 ofs, enable_bit);
1da177e4
LT
6852 return -ENODEV;
6853 }
6854
6855 return 0;
6856}
6857
6858/* tp->lock is held. */
b3b7d6be 6859static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
6860{
6861 int i, err;
6862
6863 tg3_disable_ints(tp);
6864
6865 tp->rx_mode &= ~RX_MODE_ENABLE;
6866 tw32_f(MAC_RX_MODE, tp->rx_mode);
6867 udelay(10);
6868
b3b7d6be
DM
6869 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6870 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6871 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6872 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6873 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6874 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6875
6876 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6877 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6878 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6879 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6880 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6881 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6882 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
6883
6884 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6885 tw32_f(MAC_MODE, tp->mac_mode);
6886 udelay(40);
6887
6888 tp->tx_mode &= ~TX_MODE_ENABLE;
6889 tw32_f(MAC_TX_MODE, tp->tx_mode);
6890
6891 for (i = 0; i < MAX_WAIT_CNT; i++) {
6892 udelay(100);
6893 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6894 break;
6895 }
6896 if (i >= MAX_WAIT_CNT) {
ab96b241
MC
6897 dev_err(&tp->pdev->dev,
6898 "%s timed out, TX_MODE_ENABLE will not clear "
6899 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
e6de8ad1 6900 err |= -ENODEV;
1da177e4
LT
6901 }
6902
e6de8ad1 6903 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
6904 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6905 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
6906
6907 tw32(FTQ_RESET, 0xffffffff);
6908 tw32(FTQ_RESET, 0x00000000);
6909
b3b7d6be
DM
6910 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6911 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4 6912
f77a6a8e
MC
6913 for (i = 0; i < tp->irq_cnt; i++) {
6914 struct tg3_napi *tnapi = &tp->napi[i];
6915 if (tnapi->hw_status)
6916 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6917 }
1da177e4
LT
6918 if (tp->hw_stats)
6919 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6920
1da177e4
LT
6921 return err;
6922}
6923
0d3031d9
MC
6924static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6925{
6926 int i;
6927 u32 apedata;
6928
dc6d0744 6929 /* NCSI does not support APE events */
63c3a66f 6930 if (tg3_flag(tp, APE_HAS_NCSI))
dc6d0744
MC
6931 return;
6932
0d3031d9
MC
6933 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6934 if (apedata != APE_SEG_SIG_MAGIC)
6935 return;
6936
6937 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
731fd79c 6938 if (!(apedata & APE_FW_STATUS_READY))
0d3031d9
MC
6939 return;
6940
6941 /* Wait for up to 1 millisecond for APE to service previous event. */
6942 for (i = 0; i < 10; i++) {
6943 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6944 return;
6945
6946 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6947
6948 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6949 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6950 event | APE_EVENT_STATUS_EVENT_PENDING);
6951
6952 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6953
6954 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6955 break;
6956
6957 udelay(100);
6958 }
6959
6960 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6961 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6962}
6963
6964static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6965{
6966 u32 event;
6967 u32 apedata;
6968
63c3a66f 6969 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
6970 return;
6971
6972 switch (kind) {
33f401ae
MC
6973 case RESET_KIND_INIT:
6974 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6975 APE_HOST_SEG_SIG_MAGIC);
6976 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6977 APE_HOST_SEG_LEN_MAGIC);
6978 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6979 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6980 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6867c843 6981 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
33f401ae
MC
6982 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6983 APE_HOST_BEHAV_NO_PHYLOCK);
dc6d0744
MC
6984 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6985 TG3_APE_HOST_DRVR_STATE_START);
33f401ae
MC
6986
6987 event = APE_EVENT_STATUS_STATE_START;
6988 break;
6989 case RESET_KIND_SHUTDOWN:
6990 /* With the interface we are currently using,
6991 * APE does not track driver state. Wiping
6992 * out the HOST SEGMENT SIGNATURE forces
6993 * the APE to assume OS absent status.
6994 */
6995 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
b2aee154 6996
dc6d0744 6997 if (device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 6998 tg3_flag(tp, WOL_ENABLE)) {
dc6d0744
MC
6999 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7000 TG3_APE_HOST_WOL_SPEED_AUTO);
7001 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7002 } else
7003 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7004
7005 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7006
33f401ae
MC
7007 event = APE_EVENT_STATUS_STATE_UNLOAD;
7008 break;
7009 case RESET_KIND_SUSPEND:
7010 event = APE_EVENT_STATUS_STATE_SUSPEND;
7011 break;
7012 default:
7013 return;
0d3031d9
MC
7014 }
7015
7016 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7017
7018 tg3_ape_send_event(tp, event);
7019}
7020
1da177e4
LT
7021/* tp->lock is held. */
7022static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7023{
f49639e6
DM
7024 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7025 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4 7026
63c3a66f 7027 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1da177e4
LT
7028 switch (kind) {
7029 case RESET_KIND_INIT:
7030 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7031 DRV_STATE_START);
7032 break;
7033
7034 case RESET_KIND_SHUTDOWN:
7035 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7036 DRV_STATE_UNLOAD);
7037 break;
7038
7039 case RESET_KIND_SUSPEND:
7040 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7041 DRV_STATE_SUSPEND);
7042 break;
7043
7044 default:
7045 break;
855e1111 7046 }
1da177e4 7047 }
0d3031d9
MC
7048
7049 if (kind == RESET_KIND_INIT ||
7050 kind == RESET_KIND_SUSPEND)
7051 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
7052}
7053
7054/* tp->lock is held. */
7055static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7056{
63c3a66f 7057 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1da177e4
LT
7058 switch (kind) {
7059 case RESET_KIND_INIT:
7060 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7061 DRV_STATE_START_DONE);
7062 break;
7063
7064 case RESET_KIND_SHUTDOWN:
7065 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7066 DRV_STATE_UNLOAD_DONE);
7067 break;
7068
7069 default:
7070 break;
855e1111 7071 }
1da177e4 7072 }
0d3031d9
MC
7073
7074 if (kind == RESET_KIND_SHUTDOWN)
7075 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
7076}
7077
7078/* tp->lock is held. */
7079static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7080{
63c3a66f 7081 if (tg3_flag(tp, ENABLE_ASF)) {
1da177e4
LT
7082 switch (kind) {
7083 case RESET_KIND_INIT:
7084 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7085 DRV_STATE_START);
7086 break;
7087
7088 case RESET_KIND_SHUTDOWN:
7089 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7090 DRV_STATE_UNLOAD);
7091 break;
7092
7093 case RESET_KIND_SUSPEND:
7094 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7095 DRV_STATE_SUSPEND);
7096 break;
7097
7098 default:
7099 break;
855e1111 7100 }
1da177e4
LT
7101 }
7102}
7103
7a6f4369
MC
7104static int tg3_poll_fw(struct tg3 *tp)
7105{
7106 int i;
7107 u32 val;
7108
b5d3772c 7109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
7110 /* Wait up to 20ms for init done. */
7111 for (i = 0; i < 200; i++) {
b5d3772c
MC
7112 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7113 return 0;
0ccead18 7114 udelay(100);
b5d3772c
MC
7115 }
7116 return -ENODEV;
7117 }
7118
7a6f4369
MC
7119 /* Wait for firmware initialization to complete. */
7120 for (i = 0; i < 100000; i++) {
7121 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7122 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7123 break;
7124 udelay(10);
7125 }
7126
7127 /* Chip might not be fitted with firmware. Some Sun onboard
7128 * parts are configured like that. So don't signal the timeout
7129 * of the above loop as an error, but do report the lack of
7130 * running firmware once.
7131 */
63c3a66f
JP
7132 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7133 tg3_flag_set(tp, NO_FWARE_REPORTED);
7a6f4369 7134
05dbe005 7135 netdev_info(tp->dev, "No firmware running\n");
7a6f4369
MC
7136 }
7137
6b10c165
MC
7138 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7139 /* The 57765 A0 needs a little more
7140 * time to do some important work.
7141 */
7142 mdelay(10);
7143 }
7144
7a6f4369
MC
7145 return 0;
7146}
7147
ee6a99b5
MC
7148/* Save PCI command register before chip reset */
7149static void tg3_save_pci_state(struct tg3 *tp)
7150{
8a6eac90 7151 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
7152}
7153
7154/* Restore PCI state after chip reset */
7155static void tg3_restore_pci_state(struct tg3 *tp)
7156{
7157 u32 val;
7158
7159 /* Re-enable indirect register accesses. */
7160 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7161 tp->misc_host_ctrl);
7162
7163 /* Set MAX PCI retry to zero. */
7164 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7165 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 7166 tg3_flag(tp, PCIX_MODE))
ee6a99b5 7167 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9 7168 /* Allow reads and writes to the APE register and memory space. */
63c3a66f 7169 if (tg3_flag(tp, ENABLE_APE))
0d3031d9 7170 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
7171 PCISTATE_ALLOW_APE_SHMEM_WR |
7172 PCISTATE_ALLOW_APE_PSPACE_WR;
ee6a99b5
MC
7173 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7174
8a6eac90 7175 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 7176
fcb389df 7177 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
63c3a66f 7178 if (tg3_flag(tp, PCI_EXPRESS))
cf79003d 7179 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
fcb389df
MC
7180 else {
7181 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7182 tp->pci_cacheline_sz);
7183 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7184 tp->pci_lat_timer);
7185 }
114342f2 7186 }
5f5c51e3 7187
ee6a99b5 7188 /* Make sure PCI-X relaxed ordering bit is clear. */
63c3a66f 7189 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
7190 u16 pcix_cmd;
7191
7192 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7193 &pcix_cmd);
7194 pcix_cmd &= ~PCI_X_CMD_ERO;
7195 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7196 pcix_cmd);
7197 }
ee6a99b5 7198
63c3a66f 7199 if (tg3_flag(tp, 5780_CLASS)) {
ee6a99b5
MC
7200
7201 /* Chip reset on 5780 will reset MSI enable bit,
7202 * so need to restore it.
7203 */
63c3a66f 7204 if (tg3_flag(tp, USING_MSI)) {
ee6a99b5
MC
7205 u16 ctrl;
7206
7207 pci_read_config_word(tp->pdev,
7208 tp->msi_cap + PCI_MSI_FLAGS,
7209 &ctrl);
7210 pci_write_config_word(tp->pdev,
7211 tp->msi_cap + PCI_MSI_FLAGS,
7212 ctrl | PCI_MSI_FLAGS_ENABLE);
7213 val = tr32(MSGINT_MODE);
7214 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7215 }
7216 }
7217}
7218
1da177e4
LT
7219static void tg3_stop_fw(struct tg3 *);
7220
7221/* tp->lock is held. */
7222static int tg3_chip_reset(struct tg3 *tp)
7223{
7224 u32 val;
1ee582d8 7225 void (*write_op)(struct tg3 *, u32, u32);
4f125f42 7226 int i, err;
1da177e4 7227
f49639e6
DM
7228 tg3_nvram_lock(tp);
7229
77b483f1
MC
7230 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7231
f49639e6
DM
7232 /* No matching tg3_nvram_unlock() after this because
7233 * chip reset below will undo the nvram lock.
7234 */
7235 tp->nvram_lock_cnt = 0;
1da177e4 7236
ee6a99b5
MC
7237 /* GRC_MISC_CFG core clock reset will clear the memory
7238 * enable bit in PCI register 4 and the MSI enable bit
7239 * on some chips, so we save relevant registers here.
7240 */
7241 tg3_save_pci_state(tp);
7242
d9ab5ad1 7243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
63c3a66f 7244 tg3_flag(tp, 5755_PLUS))
d9ab5ad1
MC
7245 tw32(GRC_FASTBOOT_PC, 0);
7246
1da177e4
LT
7247 /*
7248 * We must avoid the readl() that normally takes place.
7249 * It locks machines, causes machine checks, and other
7250 * fun things. So, temporarily disable the 5701
7251 * hardware workaround, while we do the reset.
7252 */
1ee582d8
MC
7253 write_op = tp->write32;
7254 if (write_op == tg3_write_flush_reg32)
7255 tp->write32 = tg3_write32;
1da177e4 7256
d18edcb2
MC
7257 /* Prevent the irq handler from reading or writing PCI registers
7258 * during chip reset when the memory enable bit in the PCI command
7259 * register may be cleared. The chip does not generate interrupt
7260 * at this time, but the irq handler may still be called due to irq
7261 * sharing or irqpoll.
7262 */
63c3a66f 7263 tg3_flag_set(tp, CHIP_RESETTING);
f77a6a8e
MC
7264 for (i = 0; i < tp->irq_cnt; i++) {
7265 struct tg3_napi *tnapi = &tp->napi[i];
7266 if (tnapi->hw_status) {
7267 tnapi->hw_status->status = 0;
7268 tnapi->hw_status->status_tag = 0;
7269 }
7270 tnapi->last_tag = 0;
7271 tnapi->last_irq_tag = 0;
b8fa2f3a 7272 }
d18edcb2 7273 smp_mb();
4f125f42
MC
7274
7275 for (i = 0; i < tp->irq_cnt; i++)
7276 synchronize_irq(tp->napi[i].irq_vec);
d18edcb2 7277
255ca311
MC
7278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7279 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7280 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7281 }
7282
1da177e4
LT
7283 /* do the reset */
7284 val = GRC_MISC_CFG_CORECLK_RESET;
7285
63c3a66f 7286 if (tg3_flag(tp, PCI_EXPRESS)) {
88075d91
MC
7287 /* Force PCIe 1.0a mode */
7288 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 7289 !tg3_flag(tp, 57765_PLUS) &&
88075d91
MC
7290 tr32(TG3_PCIE_PHY_TSTCTL) ==
7291 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7292 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7293
1da177e4
LT
7294 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7295 tw32(GRC_MISC_CFG, (1 << 29));
7296 val |= (1 << 29);
7297 }
7298 }
7299
b5d3772c
MC
7300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7301 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7302 tw32(GRC_VCPU_EXT_CTRL,
7303 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7304 }
7305
f37500d3 7306 /* Manage gphy power for all CPMU absent PCIe devices. */
63c3a66f 7307 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1da177e4 7308 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
f37500d3 7309
1da177e4
LT
7310 tw32(GRC_MISC_CFG, val);
7311
1ee582d8
MC
7312 /* restore 5701 hardware bug workaround write method */
7313 tp->write32 = write_op;
1da177e4
LT
7314
7315 /* Unfortunately, we have to delay before the PCI read back.
7316 * Some 575X chips even will not respond to a PCI cfg access
7317 * when the reset command is given to the chip.
7318 *
7319 * How do these hardware designers expect things to work
7320 * properly if the PCI write is posted for a long period
7321 * of time? It is always necessary to have some method by
7322 * which a register read back can occur to push the write
7323 * out which does the reset.
7324 *
7325 * For most tg3 variants the trick below was working.
7326 * Ho hum...
7327 */
7328 udelay(120);
7329
7330 /* Flush PCI posted writes. The normal MMIO registers
7331 * are inaccessible at this time so this is the only
7332 * way to make this reliably (actually, this is no longer
7333 * the case, see above). I tried to use indirect
7334 * register read/write but this upset some 5701 variants.
7335 */
7336 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7337
7338 udelay(120);
7339
708ebb3a 7340 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
e7126997
MC
7341 u16 val16;
7342
1da177e4
LT
7343 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7344 int i;
7345 u32 cfg_val;
7346
7347 /* Wait for link training to complete. */
7348 for (i = 0; i < 5000; i++)
7349 udelay(100);
7350
7351 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7352 pci_write_config_dword(tp->pdev, 0xc4,
7353 cfg_val | (1 << 15));
7354 }
5e7dfd0f 7355
e7126997
MC
7356 /* Clear the "no snoop" and "relaxed ordering" bits. */
7357 pci_read_config_word(tp->pdev,
708ebb3a 7358 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
e7126997
MC
7359 &val16);
7360 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7361 PCI_EXP_DEVCTL_NOSNOOP_EN);
7362 /*
7363 * Older PCIe devices only support the 128 byte
7364 * MPS setting. Enforce the restriction.
5e7dfd0f 7365 */
63c3a66f 7366 if (!tg3_flag(tp, CPMU_PRESENT))
e7126997 7367 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
5e7dfd0f 7368 pci_write_config_word(tp->pdev,
708ebb3a 7369 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
e7126997 7370 val16);
5e7dfd0f 7371
cf79003d 7372 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
5e7dfd0f
MC
7373
7374 /* Clear error status */
7375 pci_write_config_word(tp->pdev,
708ebb3a 7376 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
5e7dfd0f
MC
7377 PCI_EXP_DEVSTA_CED |
7378 PCI_EXP_DEVSTA_NFED |
7379 PCI_EXP_DEVSTA_FED |
7380 PCI_EXP_DEVSTA_URD);
1da177e4
LT
7381 }
7382
ee6a99b5 7383 tg3_restore_pci_state(tp);
1da177e4 7384
63c3a66f
JP
7385 tg3_flag_clear(tp, CHIP_RESETTING);
7386 tg3_flag_clear(tp, ERROR_PROCESSED);
d18edcb2 7387
ee6a99b5 7388 val = 0;
63c3a66f 7389 if (tg3_flag(tp, 5780_CLASS))
4cf78e4f 7390 val = tr32(MEMARB_MODE);
ee6a99b5 7391 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
7392
7393 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7394 tg3_stop_fw(tp);
7395 tw32(0x5000, 0x400);
7396 }
7397
7398 tw32(GRC_MODE, tp->grc_mode);
7399
7400 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 7401 val = tr32(0xc4);
1da177e4
LT
7402
7403 tw32(0xc4, val | (1 << 15));
7404 }
7405
7406 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7407 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7408 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7409 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7410 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7411 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7412 }
7413
63c3a66f 7414 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
7415 tp->mac_mode = MAC_MODE_APE_TX_EN |
7416 MAC_MODE_APE_RX_EN |
7417 MAC_MODE_TDE_ENABLE;
7418
f07e9af3 7419 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
d2394e6b
MC
7420 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7421 val = tp->mac_mode;
f07e9af3 7422 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
d2394e6b
MC
7423 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7424 val = tp->mac_mode;
1da177e4 7425 } else
d2394e6b
MC
7426 val = 0;
7427
7428 tw32_f(MAC_MODE, val);
1da177e4
LT
7429 udelay(40);
7430
77b483f1
MC
7431 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7432
7a6f4369
MC
7433 err = tg3_poll_fw(tp);
7434 if (err)
7435 return err;
1da177e4 7436
0a9140cf
MC
7437 tg3_mdio_start(tp);
7438
63c3a66f 7439 if (tg3_flag(tp, PCI_EXPRESS) &&
f6eb9b1f
MC
7440 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7441 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 7442 !tg3_flag(tp, 57765_PLUS)) {
ab0049b4 7443 val = tr32(0x7c00);
1da177e4
LT
7444
7445 tw32(0x7c00, val | (1 << 25));
7446 }
7447
d78b59f5
MC
7448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7449 val = tr32(TG3_CPMU_CLCK_ORIDE);
7450 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7451 }
7452
1da177e4 7453 /* Reprobe ASF enable state. */
63c3a66f
JP
7454 tg3_flag_clear(tp, ENABLE_ASF);
7455 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
7456 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7457 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7458 u32 nic_cfg;
7459
7460 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7461 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f 7462 tg3_flag_set(tp, ENABLE_ASF);
4ba526ce 7463 tp->last_event_jiffies = jiffies;
63c3a66f
JP
7464 if (tg3_flag(tp, 5750_PLUS))
7465 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
7466 }
7467 }
7468
7469 return 0;
7470}
7471
7472/* tp->lock is held. */
7473static void tg3_stop_fw(struct tg3 *tp)
7474{
63c3a66f 7475 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
7476 /* Wait for RX cpu to ACK the previous event. */
7477 tg3_wait_for_event_ack(tp);
1da177e4
LT
7478
7479 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4ba526ce
MC
7480
7481 tg3_generate_fw_event(tp);
1da177e4 7482
7c5026aa
MC
7483 /* Wait for RX cpu to ACK this event. */
7484 tg3_wait_for_event_ack(tp);
1da177e4
LT
7485 }
7486}
7487
7488/* tp->lock is held. */
944d980e 7489static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
7490{
7491 int err;
7492
7493 tg3_stop_fw(tp);
7494
944d980e 7495 tg3_write_sig_pre_reset(tp, kind);
1da177e4 7496
b3b7d6be 7497 tg3_abort_hw(tp, silent);
1da177e4
LT
7498 err = tg3_chip_reset(tp);
7499
daba2a63
MC
7500 __tg3_set_mac_addr(tp, 0);
7501
944d980e
MC
7502 tg3_write_sig_legacy(tp, kind);
7503 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
7504
7505 if (err)
7506 return err;
7507
7508 return 0;
7509}
7510
1da177e4
LT
7511#define RX_CPU_SCRATCH_BASE 0x30000
7512#define RX_CPU_SCRATCH_SIZE 0x04000
7513#define TX_CPU_SCRATCH_BASE 0x34000
7514#define TX_CPU_SCRATCH_SIZE 0x04000
7515
7516/* tp->lock is held. */
7517static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7518{
7519 int i;
7520
63c3a66f 7521 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
1da177e4 7522
b5d3772c
MC
7523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7524 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7525
7526 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7527 return 0;
7528 }
1da177e4
LT
7529 if (offset == RX_CPU_BASE) {
7530 for (i = 0; i < 10000; i++) {
7531 tw32(offset + CPU_STATE, 0xffffffff);
7532 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7533 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7534 break;
7535 }
7536
7537 tw32(offset + CPU_STATE, 0xffffffff);
7538 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7539 udelay(10);
7540 } else {
7541 for (i = 0; i < 10000; i++) {
7542 tw32(offset + CPU_STATE, 0xffffffff);
7543 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7544 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7545 break;
7546 }
7547 }
7548
7549 if (i >= 10000) {
05dbe005
JP
7550 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7551 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
1da177e4
LT
7552 return -ENODEV;
7553 }
ec41c7df
MC
7554
7555 /* Clear firmware's nvram arbitration. */
63c3a66f 7556 if (tg3_flag(tp, NVRAM))
ec41c7df 7557 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
7558 return 0;
7559}
7560
7561struct fw_info {
077f849d
JSR
7562 unsigned int fw_base;
7563 unsigned int fw_len;
7564 const __be32 *fw_data;
1da177e4
LT
7565};
7566
7567/* tp->lock is held. */
7568static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7569 int cpu_scratch_size, struct fw_info *info)
7570{
ec41c7df 7571 int err, lock_err, i;
1da177e4
LT
7572 void (*write_op)(struct tg3 *, u32, u32);
7573
63c3a66f 7574 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
5129c3a3
MC
7575 netdev_err(tp->dev,
7576 "%s: Trying to load TX cpu firmware which is 5705\n",
05dbe005 7577 __func__);
1da177e4
LT
7578 return -EINVAL;
7579 }
7580
63c3a66f 7581 if (tg3_flag(tp, 5705_PLUS))
1da177e4
LT
7582 write_op = tg3_write_mem;
7583 else
7584 write_op = tg3_write_indirect_reg32;
7585
1b628151
MC
7586 /* It is possible that bootcode is still loading at this point.
7587 * Get the nvram lock first before halting the cpu.
7588 */
ec41c7df 7589 lock_err = tg3_nvram_lock(tp);
1da177e4 7590 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
7591 if (!lock_err)
7592 tg3_nvram_unlock(tp);
1da177e4
LT
7593 if (err)
7594 goto out;
7595
7596 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7597 write_op(tp, cpu_scratch_base + i, 0);
7598 tw32(cpu_base + CPU_STATE, 0xffffffff);
7599 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
077f849d 7600 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
1da177e4 7601 write_op(tp, (cpu_scratch_base +
077f849d 7602 (info->fw_base & 0xffff) +
1da177e4 7603 (i * sizeof(u32))),
077f849d 7604 be32_to_cpu(info->fw_data[i]));
1da177e4
LT
7605
7606 err = 0;
7607
7608out:
1da177e4
LT
7609 return err;
7610}
7611
7612/* tp->lock is held. */
7613static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7614{
7615 struct fw_info info;
077f849d 7616 const __be32 *fw_data;
1da177e4
LT
7617 int err, i;
7618
077f849d
JSR
7619 fw_data = (void *)tp->fw->data;
7620
7621 /* Firmware blob starts with version numbers, followed by
7622 start address and length. We are setting complete length.
7623 length = end_address_of_bss - start_address_of_text.
7624 Remainder is the blob to be loaded contiguously
7625 from start address. */
7626
7627 info.fw_base = be32_to_cpu(fw_data[1]);
7628 info.fw_len = tp->fw->size - 12;
7629 info.fw_data = &fw_data[3];
1da177e4
LT
7630
7631 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7632 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7633 &info);
7634 if (err)
7635 return err;
7636
7637 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7638 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7639 &info);
7640 if (err)
7641 return err;
7642
7643 /* Now startup only the RX cpu. */
7644 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
077f849d 7645 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
1da177e4
LT
7646
7647 for (i = 0; i < 5; i++) {
077f849d 7648 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
1da177e4
LT
7649 break;
7650 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7651 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
077f849d 7652 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
1da177e4
LT
7653 udelay(1000);
7654 }
7655 if (i >= 5) {
5129c3a3
MC
7656 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7657 "should be %08x\n", __func__,
05dbe005 7658 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
1da177e4
LT
7659 return -ENODEV;
7660 }
7661 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7662 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7663
7664 return 0;
7665}
7666
1da177e4
LT
7667/* tp->lock is held. */
7668static int tg3_load_tso_firmware(struct tg3 *tp)
7669{
7670 struct fw_info info;
077f849d 7671 const __be32 *fw_data;
1da177e4
LT
7672 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7673 int err, i;
7674
63c3a66f
JP
7675 if (tg3_flag(tp, HW_TSO_1) ||
7676 tg3_flag(tp, HW_TSO_2) ||
7677 tg3_flag(tp, HW_TSO_3))
1da177e4
LT
7678 return 0;
7679
077f849d
JSR
7680 fw_data = (void *)tp->fw->data;
7681
7682 /* Firmware blob starts with version numbers, followed by
7683 start address and length. We are setting complete length.
7684 length = end_address_of_bss - start_address_of_text.
7685 Remainder is the blob to be loaded contiguously
7686 from start address. */
7687
7688 info.fw_base = be32_to_cpu(fw_data[1]);
7689 cpu_scratch_size = tp->fw_len;
7690 info.fw_len = tp->fw->size - 12;
7691 info.fw_data = &fw_data[3];
7692
1da177e4 7693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
7694 cpu_base = RX_CPU_BASE;
7695 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
1da177e4 7696 } else {
1da177e4
LT
7697 cpu_base = TX_CPU_BASE;
7698 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7699 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7700 }
7701
7702 err = tg3_load_firmware_cpu(tp, cpu_base,
7703 cpu_scratch_base, cpu_scratch_size,
7704 &info);
7705 if (err)
7706 return err;
7707
7708 /* Now startup the cpu. */
7709 tw32(cpu_base + CPU_STATE, 0xffffffff);
077f849d 7710 tw32_f(cpu_base + CPU_PC, info.fw_base);
1da177e4
LT
7711
7712 for (i = 0; i < 5; i++) {
077f849d 7713 if (tr32(cpu_base + CPU_PC) == info.fw_base)
1da177e4
LT
7714 break;
7715 tw32(cpu_base + CPU_STATE, 0xffffffff);
7716 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
077f849d 7717 tw32_f(cpu_base + CPU_PC, info.fw_base);
1da177e4
LT
7718 udelay(1000);
7719 }
7720 if (i >= 5) {
5129c3a3
MC
7721 netdev_err(tp->dev,
7722 "%s fails to set CPU PC, is %08x should be %08x\n",
05dbe005 7723 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
1da177e4
LT
7724 return -ENODEV;
7725 }
7726 tw32(cpu_base + CPU_STATE, 0xffffffff);
7727 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7728 return 0;
7729}
7730
1da177e4 7731
1da177e4
LT
7732static int tg3_set_mac_addr(struct net_device *dev, void *p)
7733{
7734 struct tg3 *tp = netdev_priv(dev);
7735 struct sockaddr *addr = p;
986e0aeb 7736 int err = 0, skip_mac_1 = 0;
1da177e4 7737
f9804ddb
MC
7738 if (!is_valid_ether_addr(addr->sa_data))
7739 return -EINVAL;
7740
1da177e4
LT
7741 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7742
e75f7c90
MC
7743 if (!netif_running(dev))
7744 return 0;
7745
63c3a66f 7746 if (tg3_flag(tp, ENABLE_ASF)) {
986e0aeb 7747 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 7748
986e0aeb
MC
7749 addr0_high = tr32(MAC_ADDR_0_HIGH);
7750 addr0_low = tr32(MAC_ADDR_0_LOW);
7751 addr1_high = tr32(MAC_ADDR_1_HIGH);
7752 addr1_low = tr32(MAC_ADDR_1_LOW);
7753
7754 /* Skip MAC addr 1 if ASF is using it. */
7755 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7756 !(addr1_high == 0 && addr1_low == 0))
7757 skip_mac_1 = 1;
58712ef9 7758 }
986e0aeb
MC
7759 spin_lock_bh(&tp->lock);
7760 __tg3_set_mac_addr(tp, skip_mac_1);
7761 spin_unlock_bh(&tp->lock);
1da177e4 7762
b9ec6c1b 7763 return err;
1da177e4
LT
7764}
7765
7766/* tp->lock is held. */
7767static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7768 dma_addr_t mapping, u32 maxlen_flags,
7769 u32 nic_addr)
7770{
7771 tg3_write_mem(tp,
7772 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7773 ((u64) mapping >> 32));
7774 tg3_write_mem(tp,
7775 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7776 ((u64) mapping & 0xffffffff));
7777 tg3_write_mem(tp,
7778 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7779 maxlen_flags);
7780
63c3a66f 7781 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
7782 tg3_write_mem(tp,
7783 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7784 nic_addr);
7785}
7786
7787static void __tg3_set_rx_mode(struct net_device *);
d244c892 7788static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d 7789{
b6080e12
MC
7790 int i;
7791
63c3a66f 7792 if (!tg3_flag(tp, ENABLE_TSS)) {
b6080e12
MC
7793 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7794 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7795 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
b6080e12
MC
7796 } else {
7797 tw32(HOSTCC_TXCOL_TICKS, 0);
7798 tw32(HOSTCC_TXMAX_FRAMES, 0);
7799 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
19cfaecc 7800 }
b6080e12 7801
63c3a66f 7802 if (!tg3_flag(tp, ENABLE_RSS)) {
19cfaecc
MC
7803 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7804 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7805 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7806 } else {
b6080e12
MC
7807 tw32(HOSTCC_RXCOL_TICKS, 0);
7808 tw32(HOSTCC_RXMAX_FRAMES, 0);
7809 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
15f9850d 7810 }
b6080e12 7811
63c3a66f 7812 if (!tg3_flag(tp, 5705_PLUS)) {
15f9850d
DM
7813 u32 val = ec->stats_block_coalesce_usecs;
7814
b6080e12
MC
7815 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7816 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7817
15f9850d
DM
7818 if (!netif_carrier_ok(tp->dev))
7819 val = 0;
7820
7821 tw32(HOSTCC_STAT_COAL_TICKS, val);
7822 }
b6080e12
MC
7823
7824 for (i = 0; i < tp->irq_cnt - 1; i++) {
7825 u32 reg;
7826
7827 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7828 tw32(reg, ec->rx_coalesce_usecs);
b6080e12
MC
7829 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7830 tw32(reg, ec->rx_max_coalesced_frames);
b6080e12
MC
7831 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7832 tw32(reg, ec->rx_max_coalesced_frames_irq);
19cfaecc 7833
63c3a66f 7834 if (tg3_flag(tp, ENABLE_TSS)) {
19cfaecc
MC
7835 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7836 tw32(reg, ec->tx_coalesce_usecs);
7837 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7838 tw32(reg, ec->tx_max_coalesced_frames);
7839 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7840 tw32(reg, ec->tx_max_coalesced_frames_irq);
7841 }
b6080e12
MC
7842 }
7843
7844 for (; i < tp->irq_max - 1; i++) {
7845 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
b6080e12 7846 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
b6080e12 7847 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
19cfaecc 7848
63c3a66f 7849 if (tg3_flag(tp, ENABLE_TSS)) {
19cfaecc
MC
7850 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7851 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7852 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7853 }
b6080e12 7854 }
15f9850d 7855}
1da177e4 7856
2d31ecaf
MC
7857/* tp->lock is held. */
7858static void tg3_rings_reset(struct tg3 *tp)
7859{
7860 int i;
f77a6a8e 7861 u32 stblk, txrcb, rxrcb, limit;
2d31ecaf
MC
7862 struct tg3_napi *tnapi = &tp->napi[0];
7863
7864 /* Disable all transmit rings but the first. */
63c3a66f 7865 if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 7866 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
63c3a66f 7867 else if (tg3_flag(tp, 5717_PLUS))
3d37728b 7868 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
b703df6f
MC
7869 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7870 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
2d31ecaf
MC
7871 else
7872 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7873
7874 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7875 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7876 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7877 BDINFO_FLAGS_DISABLED);
7878
7879
7880 /* Disable all receive return rings but the first. */
63c3a66f 7881 if (tg3_flag(tp, 5717_PLUS))
f6eb9b1f 7882 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
63c3a66f 7883 else if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 7884 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
b703df6f
MC
7885 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7886 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2d31ecaf
MC
7887 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7888 else
7889 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7890
7891 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7892 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7893 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7894 BDINFO_FLAGS_DISABLED);
7895
7896 /* Disable interrupts */
7897 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
0e6cf6a9
MC
7898 tp->napi[0].chk_msi_cnt = 0;
7899 tp->napi[0].last_rx_cons = 0;
7900 tp->napi[0].last_tx_cons = 0;
2d31ecaf
MC
7901
7902 /* Zero mailbox registers. */
63c3a66f 7903 if (tg3_flag(tp, SUPPORT_MSIX)) {
6fd45cb8 7904 for (i = 1; i < tp->irq_max; i++) {
f77a6a8e
MC
7905 tp->napi[i].tx_prod = 0;
7906 tp->napi[i].tx_cons = 0;
63c3a66f 7907 if (tg3_flag(tp, ENABLE_TSS))
c2353a32 7908 tw32_mailbox(tp->napi[i].prodmbox, 0);
f77a6a8e
MC
7909 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7910 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
0e6cf6a9
MC
7911 tp->napi[0].chk_msi_cnt = 0;
7912 tp->napi[i].last_rx_cons = 0;
7913 tp->napi[i].last_tx_cons = 0;
f77a6a8e 7914 }
63c3a66f 7915 if (!tg3_flag(tp, ENABLE_TSS))
c2353a32 7916 tw32_mailbox(tp->napi[0].prodmbox, 0);
f77a6a8e
MC
7917 } else {
7918 tp->napi[0].tx_prod = 0;
7919 tp->napi[0].tx_cons = 0;
7920 tw32_mailbox(tp->napi[0].prodmbox, 0);
7921 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7922 }
2d31ecaf
MC
7923
7924 /* Make sure the NIC-based send BD rings are disabled. */
63c3a66f 7925 if (!tg3_flag(tp, 5705_PLUS)) {
2d31ecaf
MC
7926 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7927 for (i = 0; i < 16; i++)
7928 tw32_tx_mbox(mbox + i * 8, 0);
7929 }
7930
7931 txrcb = NIC_SRAM_SEND_RCB;
7932 rxrcb = NIC_SRAM_RCV_RET_RCB;
7933
7934 /* Clear status block in ram. */
7935 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7936
7937 /* Set status block DMA address */
7938 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7939 ((u64) tnapi->status_mapping >> 32));
7940 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7941 ((u64) tnapi->status_mapping & 0xffffffff));
7942
f77a6a8e
MC
7943 if (tnapi->tx_ring) {
7944 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7945 (TG3_TX_RING_SIZE <<
7946 BDINFO_FLAGS_MAXLEN_SHIFT),
7947 NIC_SRAM_TX_BUFFER_DESC);
7948 txrcb += TG3_BDINFO_SIZE;
7949 }
7950
7951 if (tnapi->rx_rcb) {
7952 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2
MC
7953 (tp->rx_ret_ring_mask + 1) <<
7954 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
f77a6a8e
MC
7955 rxrcb += TG3_BDINFO_SIZE;
7956 }
7957
7958 stblk = HOSTCC_STATBLCK_RING1;
2d31ecaf 7959
f77a6a8e
MC
7960 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7961 u64 mapping = (u64)tnapi->status_mapping;
7962 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7963 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7964
7965 /* Clear status block in ram. */
7966 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7967
19cfaecc
MC
7968 if (tnapi->tx_ring) {
7969 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7970 (TG3_TX_RING_SIZE <<
7971 BDINFO_FLAGS_MAXLEN_SHIFT),
7972 NIC_SRAM_TX_BUFFER_DESC);
7973 txrcb += TG3_BDINFO_SIZE;
7974 }
f77a6a8e
MC
7975
7976 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2 7977 ((tp->rx_ret_ring_mask + 1) <<
f77a6a8e
MC
7978 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7979
7980 stblk += 8;
f77a6a8e
MC
7981 rxrcb += TG3_BDINFO_SIZE;
7982 }
2d31ecaf
MC
7983}
7984
eb07a940
MC
7985static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7986{
7987 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7988
63c3a66f
JP
7989 if (!tg3_flag(tp, 5750_PLUS) ||
7990 tg3_flag(tp, 5780_CLASS) ||
eb07a940
MC
7991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7993 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7994 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7996 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7997 else
7998 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7999
8000 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8001 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8002
8003 val = min(nic_rep_thresh, host_rep_thresh);
8004 tw32(RCVBDI_STD_THRESH, val);
8005
63c3a66f 8006 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8007 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8008
63c3a66f 8009 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
eb07a940
MC
8010 return;
8011
63c3a66f 8012 if (!tg3_flag(tp, 5705_PLUS))
eb07a940
MC
8013 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8014 else
8015 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8016
8017 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8018
8019 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8020 tw32(RCVBDI_JUMBO_THRESH, val);
8021
63c3a66f 8022 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8023 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8024}
8025
1da177e4 8026/* tp->lock is held. */
8e7a22e3 8027static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
8028{
8029 u32 val, rdmac_mode;
8030 int i, err, limit;
8fea32b9 8031 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
1da177e4
LT
8032
8033 tg3_disable_ints(tp);
8034
8035 tg3_stop_fw(tp);
8036
8037 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8038
63c3a66f 8039 if (tg3_flag(tp, INIT_COMPLETE))
e6de8ad1 8040 tg3_abort_hw(tp, 1);
1da177e4 8041
699c0193
MC
8042 /* Enable MAC control of LPI */
8043 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8044 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8045 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8046 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8047
8048 tw32_f(TG3_CPMU_EEE_CTRL,
8049 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8050
a386b901
MC
8051 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8052 TG3_CPMU_EEEMD_LPI_IN_TX |
8053 TG3_CPMU_EEEMD_LPI_IN_RX |
8054 TG3_CPMU_EEEMD_EEE_ENABLE;
8055
8056 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8057 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8058
63c3a66f 8059 if (tg3_flag(tp, ENABLE_APE))
a386b901
MC
8060 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8061
8062 tw32_f(TG3_CPMU_EEE_MODE, val);
8063
8064 tw32_f(TG3_CPMU_EEE_DBTMR1,
8065 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8066 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8067
8068 tw32_f(TG3_CPMU_EEE_DBTMR2,
d7f2ab20 8069 TG3_CPMU_DBTMR2_APE_TX_2047US |
a386b901 8070 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
699c0193
MC
8071 }
8072
603f1173 8073 if (reset_phy)
d4d2c558
MC
8074 tg3_phy_reset(tp);
8075
1da177e4
LT
8076 err = tg3_chip_reset(tp);
8077 if (err)
8078 return err;
8079
8080 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8081
bcb37f6c 8082 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
d30cdd28
MC
8083 val = tr32(TG3_CPMU_CTRL);
8084 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8085 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
8086
8087 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8088 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8089 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8090 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8091
8092 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8093 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8094 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8095 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8096
8097 val = tr32(TG3_CPMU_HST_ACC);
8098 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8099 val |= CPMU_HST_ACC_MACCLK_6_25;
8100 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
8101 }
8102
33466d93
MC
8103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8104 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8105 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8106 PCIE_PWR_MGMT_L1_THRESH_4MS;
8107 tw32(PCIE_PWR_MGMT_THRESH, val);
521e6b90
MC
8108
8109 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8110 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8111
8112 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
33466d93 8113
f40386c8
MC
8114 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8115 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
255ca311
MC
8116 }
8117
63c3a66f 8118 if (tg3_flag(tp, L1PLLPD_EN)) {
614b0590
MC
8119 u32 grc_mode = tr32(GRC_MODE);
8120
8121 /* Access the lower 1K of PL PCIE block registers. */
8122 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8123 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8124
8125 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8126 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8127 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8128
8129 tw32(GRC_MODE, grc_mode);
8130 }
8131
5093eedc
MC
8132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8133 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8134 u32 grc_mode = tr32(GRC_MODE);
cea46462 8135
5093eedc
MC
8136 /* Access the lower 1K of PL PCIE block registers. */
8137 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8138 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
cea46462 8139
5093eedc
MC
8140 val = tr32(TG3_PCIE_TLDLPL_PORT +
8141 TG3_PCIE_PL_LO_PHYCTL5);
8142 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8143 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
cea46462 8144
5093eedc
MC
8145 tw32(GRC_MODE, grc_mode);
8146 }
a977dbe8 8147
1ff30a59
MC
8148 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8149 u32 grc_mode = tr32(GRC_MODE);
8150
8151 /* Access the lower 1K of DL PCIE block registers. */
8152 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8153 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8154
8155 val = tr32(TG3_PCIE_TLDLPL_PORT +
8156 TG3_PCIE_DL_LO_FTSMAX);
8157 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8158 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8159 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8160
8161 tw32(GRC_MODE, grc_mode);
8162 }
8163
a977dbe8
MC
8164 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8165 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8166 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8167 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
cea46462
MC
8168 }
8169
1da177e4
LT
8170 /* This works around an issue with Athlon chipsets on
8171 * B3 tigon3 silicon. This bit has no effect on any
8172 * other revision. But do not set this on PCI Express
795d01c5 8173 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 8174 */
63c3a66f
JP
8175 if (!tg3_flag(tp, CPMU_PRESENT)) {
8176 if (!tg3_flag(tp, PCI_EXPRESS))
795d01c5
MC
8177 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8178 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8179 }
1da177e4
LT
8180
8181 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 8182 tg3_flag(tp, PCIX_MODE)) {
1da177e4
LT
8183 val = tr32(TG3PCI_PCISTATE);
8184 val |= PCISTATE_RETRY_SAME_DMA;
8185 tw32(TG3PCI_PCISTATE, val);
8186 }
8187
63c3a66f 8188 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
8189 /* Allow reads and writes to the
8190 * APE register and memory space.
8191 */
8192 val = tr32(TG3PCI_PCISTATE);
8193 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
8194 PCISTATE_ALLOW_APE_SHMEM_WR |
8195 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
8196 tw32(TG3PCI_PCISTATE, val);
8197 }
8198
1da177e4
LT
8199 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8200 /* Enable some hw fixes. */
8201 val = tr32(TG3PCI_MSI_DATA);
8202 val |= (1 << 26) | (1 << 28) | (1 << 29);
8203 tw32(TG3PCI_MSI_DATA, val);
8204 }
8205
8206 /* Descriptor ring init may make accesses to the
8207 * NIC SRAM area to setup the TX descriptors, so we
8208 * can only do this after the hardware has been
8209 * successfully reset.
8210 */
32d8c572
MC
8211 err = tg3_init_rings(tp);
8212 if (err)
8213 return err;
1da177e4 8214
63c3a66f 8215 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
8216 val = tr32(TG3PCI_DMA_RW_CTRL) &
8217 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
1a319025
MC
8218 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8219 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
0aebff48
MC
8220 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8221 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8222 val |= DMA_RWCTRL_TAGGED_STAT_WA;
cbf9ca6c
MC
8223 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8224 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8225 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
8226 /* This value is determined during the probe time DMA
8227 * engine test, tg3_test_dma.
8228 */
8229 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8230 }
1da177e4
LT
8231
8232 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8233 GRC_MODE_4X_NIC_SEND_RINGS |
8234 GRC_MODE_NO_TX_PHDR_CSUM |
8235 GRC_MODE_NO_RX_PHDR_CSUM);
8236 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
8237
8238 /* Pseudo-header checksum is done by hardware logic and not
8239 * the offload processers, so make the chip do the pseudo-
8240 * header checksums on receive. For transmit it is more
8241 * convenient to do the pseudo-header checksum in software
8242 * as Linux does that on transmit for us in all cases.
8243 */
8244 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
8245
8246 tw32(GRC_MODE,
8247 tp->grc_mode |
8248 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8249
8250 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8251 val = tr32(GRC_MISC_CFG);
8252 val &= ~0xff;
8253 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8254 tw32(GRC_MISC_CFG, val);
8255
8256 /* Initialize MBUF/DESC pool. */
63c3a66f 8257 if (tg3_flag(tp, 5750_PLUS)) {
1da177e4
LT
8258 /* Do nothing. */
8259 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8260 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8262 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8263 else
8264 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8265 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8266 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
63c3a66f 8267 } else if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8268 int fw_len;
8269
077f849d 8270 fw_len = tp->fw_len;
1da177e4
LT
8271 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8272 tw32(BUFMGR_MB_POOL_ADDR,
8273 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8274 tw32(BUFMGR_MB_POOL_SIZE,
8275 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8276 }
1da177e4 8277
0f893dc6 8278 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
8279 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8280 tp->bufmgr_config.mbuf_read_dma_low_water);
8281 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8282 tp->bufmgr_config.mbuf_mac_rx_low_water);
8283 tw32(BUFMGR_MB_HIGH_WATER,
8284 tp->bufmgr_config.mbuf_high_water);
8285 } else {
8286 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8287 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8288 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8289 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8290 tw32(BUFMGR_MB_HIGH_WATER,
8291 tp->bufmgr_config.mbuf_high_water_jumbo);
8292 }
8293 tw32(BUFMGR_DMA_LOW_WATER,
8294 tp->bufmgr_config.dma_low_water);
8295 tw32(BUFMGR_DMA_HIGH_WATER,
8296 tp->bufmgr_config.dma_high_water);
8297
d309a46e
MC
8298 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8300 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
4d958473
MC
8301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8302 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8303 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8304 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
d309a46e 8305 tw32(BUFMGR_MODE, val);
1da177e4
LT
8306 for (i = 0; i < 2000; i++) {
8307 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8308 break;
8309 udelay(10);
8310 }
8311 if (i >= 2000) {
05dbe005 8312 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
1da177e4
LT
8313 return -ENODEV;
8314 }
8315
eb07a940
MC
8316 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8317 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
b5d3772c 8318
eb07a940 8319 tg3_setup_rxbd_thresholds(tp);
1da177e4
LT
8320
8321 /* Initialize TG3_BDINFO's at:
8322 * RCVDBDI_STD_BD: standard eth size rx ring
8323 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8324 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8325 *
8326 * like so:
8327 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8328 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8329 * ring attribute flags
8330 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8331 *
8332 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8333 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8334 *
8335 * The size of each ring is fixed in the firmware, but the location is
8336 * configurable.
8337 */
8338 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 8339 ((u64) tpr->rx_std_mapping >> 32));
1da177e4 8340 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 8341 ((u64) tpr->rx_std_mapping & 0xffffffff));
63c3a66f 8342 if (!tg3_flag(tp, 5717_PLUS))
87668d35
MC
8343 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8344 NIC_SRAM_RX_BUFFER_DESC);
1da177e4 8345
fdb72b38 8346 /* Disable the mini ring */
63c3a66f 8347 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8348 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8349 BDINFO_FLAGS_DISABLED);
8350
fdb72b38
MC
8351 /* Program the jumbo buffer descriptor ring control
8352 * blocks on those devices that have them.
8353 */
bb18bb94 8354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
63c3a66f 8355 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
1da177e4 8356
63c3a66f 8357 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
1da177e4 8358 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 8359 ((u64) tpr->rx_jmb_mapping >> 32));
1da177e4 8360 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 8361 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
de9f5230
MC
8362 val = TG3_RX_JMB_RING_SIZE(tp) <<
8363 BDINFO_FLAGS_MAXLEN_SHIFT;
1da177e4 8364 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
de9f5230 8365 val | BDINFO_FLAGS_USE_EXT_RECV);
63c3a66f 8366 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
a50d0796 8367 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
87668d35
MC
8368 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8369 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
1da177e4
LT
8370 } else {
8371 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8372 BDINFO_FLAGS_DISABLED);
8373 }
8374
63c3a66f 8375 if (tg3_flag(tp, 57765_PLUS)) {
7cb32cf2 8376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
de9f5230 8377 val = TG3_RX_STD_MAX_SIZE_5700;
7cb32cf2 8378 else
de9f5230 8379 val = TG3_RX_STD_MAX_SIZE_5717;
7cb32cf2
MC
8380 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8381 val |= (TG3_RX_STD_DMA_SZ << 2);
8382 } else
04380d40 8383 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38 8384 } else
de9f5230 8385 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38
MC
8386
8387 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
1da177e4 8388
411da640 8389 tpr->rx_std_prod_idx = tp->rx_pending;
66711e66 8390 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
1da177e4 8391
63c3a66f
JP
8392 tpr->rx_jmb_prod_idx =
8393 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
66711e66 8394 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
1da177e4 8395
2d31ecaf
MC
8396 tg3_rings_reset(tp);
8397
1da177e4 8398 /* Initialize MAC address and backoff seed. */
986e0aeb 8399 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
8400
8401 /* MTU + ethernet header + FCS + optional VLAN tag */
f7b493e0
MC
8402 tw32(MAC_RX_MTU_SIZE,
8403 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
1da177e4
LT
8404
8405 /* The slot time is changed by tg3_setup_phy if we
8406 * run at gigabit with half duplex.
8407 */
f2096f94
MC
8408 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8409 (6 << TX_LENGTHS_IPG_SHIFT) |
8410 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8411
8412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8413 val |= tr32(MAC_TX_LENGTHS) &
8414 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8415 TX_LENGTHS_CNT_DWN_VAL_MSK);
8416
8417 tw32(MAC_TX_LENGTHS, val);
1da177e4
LT
8418
8419 /* Receive rules. */
8420 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8421 tw32(RCVLPC_CONFIG, 0x0181);
8422
8423 /* Calculate RDMAC_MODE setting early, we need it to determine
8424 * the RCVLPC_STATE_ENABLE mask.
8425 */
8426 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8427 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8428 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8429 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8430 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 8431
deabaac8 8432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
0339e4e3
MC
8433 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8434
57e6983c 8435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0
MC
8436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8437 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
d30cdd28
MC
8438 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8439 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8440 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8441
c5908939
MC
8442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8443 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 8444 if (tg3_flag(tp, TSO_CAPABLE) &&
c13e3713 8445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
8446 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8447 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 8448 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
8449 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8450 }
8451 }
8452
63c3a66f 8453 if (tg3_flag(tp, PCI_EXPRESS))
85e94ced
MC
8454 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8455
63c3a66f
JP
8456 if (tg3_flag(tp, HW_TSO_1) ||
8457 tg3_flag(tp, HW_TSO_2) ||
8458 tg3_flag(tp, HW_TSO_3))
027455ad
MC
8459 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8460
108a6c16 8461 if (tg3_flag(tp, 57765_PLUS) ||
e849cdc3 8462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
027455ad
MC
8463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8464 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
1da177e4 8465
f2096f94
MC
8466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8467 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8468
41a8a7ee
MC
8469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f 8473 tg3_flag(tp, 57765_PLUS)) {
41a8a7ee 8474 val = tr32(TG3_RDMA_RSRVCTRL_REG);
d78b59f5
MC
8475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
b4495ed8
MC
8477 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8478 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8479 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8480 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8481 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8482 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
b75cc0e4 8483 }
41a8a7ee
MC
8484 tw32(TG3_RDMA_RSRVCTRL_REG,
8485 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8486 }
8487
d78b59f5
MC
8488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
d309a46e
MC
8490 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8491 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8492 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8493 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8494 }
8495
1da177e4 8496 /* Receive/send statistics. */
63c3a66f 8497 if (tg3_flag(tp, 5750_PLUS)) {
1661394e
MC
8498 val = tr32(RCVLPC_STATS_ENABLE);
8499 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8500 tw32(RCVLPC_STATS_ENABLE, val);
8501 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
63c3a66f 8502 tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8503 val = tr32(RCVLPC_STATS_ENABLE);
8504 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8505 tw32(RCVLPC_STATS_ENABLE, val);
8506 } else {
8507 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8508 }
8509 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8510 tw32(SNDDATAI_STATSENAB, 0xffffff);
8511 tw32(SNDDATAI_STATSCTRL,
8512 (SNDDATAI_SCTRL_ENABLE |
8513 SNDDATAI_SCTRL_FASTUPD));
8514
8515 /* Setup host coalescing engine. */
8516 tw32(HOSTCC_MODE, 0);
8517 for (i = 0; i < 2000; i++) {
8518 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8519 break;
8520 udelay(10);
8521 }
8522
d244c892 8523 __tg3_set_coalesce(tp, &tp->coal);
1da177e4 8524
63c3a66f 8525 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
8526 /* Status/statistics block address. See tg3_timer,
8527 * the tg3_periodic_fetch_stats call there, and
8528 * tg3_get_stats to see how this works for 5705/5750 chips.
8529 */
1da177e4
LT
8530 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8531 ((u64) tp->stats_mapping >> 32));
8532 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8533 ((u64) tp->stats_mapping & 0xffffffff));
8534 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
2d31ecaf 8535
1da177e4 8536 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2d31ecaf
MC
8537
8538 /* Clear statistics and status block memory areas */
8539 for (i = NIC_SRAM_STATS_BLK;
8540 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8541 i += sizeof(u32)) {
8542 tg3_write_mem(tp, i, 0);
8543 udelay(40);
8544 }
1da177e4
LT
8545 }
8546
8547 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8548
8549 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8550 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
63c3a66f 8551 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8552 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8553
f07e9af3
MC
8554 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8555 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c94e3941
MC
8556 /* reset to prevent losing 1st rx packet intermittently */
8557 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8558 udelay(10);
8559 }
8560
63c3a66f 8561 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 8562 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258
MC
8563 else
8564 tp->mac_mode = 0;
8565 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
1da177e4 8566 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
63c3a66f 8567 if (!tg3_flag(tp, 5705_PLUS) &&
f07e9af3 8568 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
e8f3f6ca
MC
8569 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8570 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
8571 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8572 udelay(40);
8573
314fba34 8574 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
63c3a66f 8575 * If TG3_FLAG_IS_NIC is zero, we should read the
314fba34
MC
8576 * register to preserve the GPIO settings for LOMs. The GPIOs,
8577 * whether used as inputs or outputs, are set by boot code after
8578 * reset.
8579 */
63c3a66f 8580 if (!tg3_flag(tp, IS_NIC)) {
314fba34
MC
8581 u32 gpio_mask;
8582
9d26e213
MC
8583 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8584 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8585 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
8586
8587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8588 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8589 GRC_LCLCTRL_GPIO_OUTPUT3;
8590
af36e6b6
MC
8591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8592 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8593
aaf84465 8594 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
8595 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8596
8597 /* GPIO1 must be driven high for eeprom write protect */
63c3a66f 8598 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9d26e213
MC
8599 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8600 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 8601 }
1da177e4
LT
8602 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8603 udelay(100);
8604
63c3a66f 8605 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
baf8a94a
MC
8606 val = tr32(MSGINT_MODE);
8607 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8608 tw32(MSGINT_MODE, val);
8609 }
8610
63c3a66f 8611 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
8612 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8613 udelay(40);
8614 }
8615
8616 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8617 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8618 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8619 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8620 WDMAC_MODE_LNGREAD_ENAB);
8621
c5908939
MC
8622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8623 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 8624 if (tg3_flag(tp, TSO_CAPABLE) &&
1da177e4
LT
8625 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8626 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8627 /* nothing */
8628 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 8629 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
8630 val |= WDMAC_MODE_RX_ACCEL;
8631 }
8632 }
8633
d9ab5ad1 8634 /* Enable host coalescing bug fix */
63c3a66f 8635 if (tg3_flag(tp, 5755_PLUS))
f51f3562 8636 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 8637
788a035e
MC
8638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8639 val |= WDMAC_MODE_BURST_ALL_DATA;
8640
1da177e4
LT
8641 tw32_f(WDMAC_MODE, val);
8642 udelay(40);
8643
63c3a66f 8644 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
8645 u16 pcix_cmd;
8646
8647 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8648 &pcix_cmd);
1da177e4 8649 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
8650 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8651 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 8652 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
8653 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8654 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 8655 }
9974a356
MC
8656 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8657 pcix_cmd);
1da177e4
LT
8658 }
8659
8660 tw32_f(RDMAC_MODE, rdmac_mode);
8661 udelay(40);
8662
8663 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
63c3a66f 8664 if (!tg3_flag(tp, 5705_PLUS))
1da177e4 8665 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
8666
8667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8668 tw32(SNDDATAC_MODE,
8669 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8670 else
8671 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8672
1da177e4
LT
8673 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8674 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7cb32cf2 8675 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
63c3a66f 8676 if (tg3_flag(tp, LRG_PROD_RING_CAP))
7cb32cf2
MC
8677 val |= RCVDBDI_MODE_LRG_RING_SZ;
8678 tw32(RCVDBDI_MODE, val);
1da177e4 8679 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
63c3a66f
JP
8680 if (tg3_flag(tp, HW_TSO_1) ||
8681 tg3_flag(tp, HW_TSO_2) ||
8682 tg3_flag(tp, HW_TSO_3))
1da177e4 8683 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
baf8a94a 8684 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
63c3a66f 8685 if (tg3_flag(tp, ENABLE_TSS))
baf8a94a
MC
8686 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8687 tw32(SNDBDI_MODE, val);
1da177e4
LT
8688 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8689
8690 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8691 err = tg3_load_5701_a0_firmware_fix(tp);
8692 if (err)
8693 return err;
8694 }
8695
63c3a66f 8696 if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8697 err = tg3_load_tso_firmware(tp);
8698 if (err)
8699 return err;
8700 }
1da177e4
LT
8701
8702 tp->tx_mode = TX_MODE_ENABLE;
f2096f94 8703
63c3a66f 8704 if (tg3_flag(tp, 5755_PLUS) ||
b1d05210
MC
8705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8706 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
f2096f94
MC
8707
8708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8709 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8710 tp->tx_mode &= ~val;
8711 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8712 }
8713
1da177e4
LT
8714 tw32_f(MAC_TX_MODE, tp->tx_mode);
8715 udelay(100);
8716
63c3a66f 8717 if (tg3_flag(tp, ENABLE_RSS)) {
baf8a94a
MC
8718 u32 reg = MAC_RSS_INDIR_TBL_0;
8719 u8 *ent = (u8 *)&val;
8720
8721 /* Setup the indirection table */
8722 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8723 int idx = i % sizeof(val);
8724
5efeeea1 8725 ent[idx] = i % (tp->irq_cnt - 1);
baf8a94a
MC
8726 if (idx == sizeof(val) - 1) {
8727 tw32(reg, val);
8728 reg += 4;
8729 }
8730 }
8731
8732 /* Setup the "secret" hash key. */
8733 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8734 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8735 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8736 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8737 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8738 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8739 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8740 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8741 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8742 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8743 }
8744
1da177e4 8745 tp->rx_mode = RX_MODE_ENABLE;
63c3a66f 8746 if (tg3_flag(tp, 5755_PLUS))
af36e6b6
MC
8747 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8748
63c3a66f 8749 if (tg3_flag(tp, ENABLE_RSS))
baf8a94a
MC
8750 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8751 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8752 RX_MODE_RSS_IPV6_HASH_EN |
8753 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8754 RX_MODE_RSS_IPV4_HASH_EN |
8755 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8756
1da177e4
LT
8757 tw32_f(MAC_RX_MODE, tp->rx_mode);
8758 udelay(10);
8759
1da177e4
LT
8760 tw32(MAC_LED_CTRL, tp->led_ctrl);
8761
8762 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
f07e9af3 8763 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4
LT
8764 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8765 udelay(10);
8766 }
8767 tw32_f(MAC_RX_MODE, tp->rx_mode);
8768 udelay(10);
8769
f07e9af3 8770 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4 8771 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
f07e9af3 8772 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
1da177e4
LT
8773 /* Set drive transmission level to 1.2V */
8774 /* only if the signal pre-emphasis bit is not set */
8775 val = tr32(MAC_SERDES_CFG);
8776 val &= 0xfffff000;
8777 val |= 0x880;
8778 tw32(MAC_SERDES_CFG, val);
8779 }
8780 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8781 tw32(MAC_SERDES_CFG, 0x616000);
8782 }
8783
8784 /* Prevent chip from dropping frames when flow control
8785 * is enabled.
8786 */
666bc831
MC
8787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8788 val = 1;
8789 else
8790 val = 2;
8791 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
1da177e4
LT
8792
8793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
f07e9af3 8794 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
1da177e4 8795 /* Use hardware link auto-negotiation */
63c3a66f 8796 tg3_flag_set(tp, HW_AUTONEG);
1da177e4
LT
8797 }
8798
f07e9af3 8799 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6ff6f81d 8800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
d4d2c558
MC
8801 u32 tmp;
8802
8803 tmp = tr32(SERDES_RX_CTRL);
8804 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8805 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8806 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8807 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8808 }
8809
63c3a66f 8810 if (!tg3_flag(tp, USE_PHYLIB)) {
80096068
MC
8811 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8812 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
dd477003
MC
8813 tp->link_config.speed = tp->link_config.orig_speed;
8814 tp->link_config.duplex = tp->link_config.orig_duplex;
8815 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8816 }
1da177e4 8817
dd477003
MC
8818 err = tg3_setup_phy(tp, 0);
8819 if (err)
8820 return err;
1da177e4 8821
f07e9af3
MC
8822 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8823 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
dd477003
MC
8824 u32 tmp;
8825
8826 /* Clear CRC stats. */
8827 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8828 tg3_writephy(tp, MII_TG3_TEST1,
8829 tmp | MII_TG3_TEST1_CRC_EN);
f08aa1a8 8830 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
dd477003 8831 }
1da177e4
LT
8832 }
8833 }
8834
8835 __tg3_set_rx_mode(tp->dev);
8836
8837 /* Initialize receive rules. */
8838 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8839 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8840 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8841 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8842
63c3a66f 8843 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
1da177e4
LT
8844 limit = 8;
8845 else
8846 limit = 16;
63c3a66f 8847 if (tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
8848 limit -= 4;
8849 switch (limit) {
8850 case 16:
8851 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8852 case 15:
8853 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8854 case 14:
8855 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8856 case 13:
8857 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8858 case 12:
8859 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8860 case 11:
8861 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8862 case 10:
8863 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8864 case 9:
8865 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8866 case 8:
8867 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8868 case 7:
8869 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8870 case 6:
8871 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8872 case 5:
8873 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8874 case 4:
8875 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8876 case 3:
8877 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8878 case 2:
8879 case 1:
8880
8881 default:
8882 break;
855e1111 8883 }
1da177e4 8884
63c3a66f 8885 if (tg3_flag(tp, ENABLE_APE))
9ce768ea
MC
8886 /* Write our heartbeat update interval to APE. */
8887 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8888 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 8889
1da177e4
LT
8890 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8891
1da177e4
LT
8892 return 0;
8893}
8894
8895/* Called at device open time to get the chip ready for
8896 * packet processing. Invoked with tp->lock held.
8897 */
8e7a22e3 8898static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4 8899{
1da177e4
LT
8900 tg3_switch_clocks(tp);
8901
8902 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8903
2f751b67 8904 return tg3_reset_hw(tp, reset_phy);
1da177e4
LT
8905}
8906
8907#define TG3_STAT_ADD32(PSTAT, REG) \
8908do { u32 __val = tr32(REG); \
8909 (PSTAT)->low += __val; \
8910 if ((PSTAT)->low < __val) \
8911 (PSTAT)->high += 1; \
8912} while (0)
8913
8914static void tg3_periodic_fetch_stats(struct tg3 *tp)
8915{
8916 struct tg3_hw_stats *sp = tp->hw_stats;
8917
8918 if (!netif_carrier_ok(tp->dev))
8919 return;
8920
8921 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8922 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8923 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8924 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8925 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8926 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8927 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8928 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8929 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8930 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8931 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8932 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8933 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8934
8935 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8936 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8937 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8938 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8939 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8940 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8941 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8942 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8943 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8944 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8945 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8946 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8947 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8948 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
8949
8950 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
310050fa
MC
8951 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8952 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8953 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
4d958473
MC
8954 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8955 } else {
8956 u32 val = tr32(HOSTCC_FLOW_ATTN);
8957 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8958 if (val) {
8959 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8960 sp->rx_discards.low += val;
8961 if (sp->rx_discards.low < val)
8962 sp->rx_discards.high += 1;
8963 }
8964 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8965 }
463d305b 8966 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
8967}
8968
0e6cf6a9
MC
8969static void tg3_chk_missed_msi(struct tg3 *tp)
8970{
8971 u32 i;
8972
8973 for (i = 0; i < tp->irq_cnt; i++) {
8974 struct tg3_napi *tnapi = &tp->napi[i];
8975
8976 if (tg3_has_work(tnapi)) {
8977 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8978 tnapi->last_tx_cons == tnapi->tx_cons) {
8979 if (tnapi->chk_msi_cnt < 1) {
8980 tnapi->chk_msi_cnt++;
8981 return;
8982 }
8983 tw32_mailbox(tnapi->int_mbox,
8984 tnapi->last_tag << 24);
8985 }
8986 }
8987 tnapi->chk_msi_cnt = 0;
8988 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8989 tnapi->last_tx_cons = tnapi->tx_cons;
8990 }
8991}
8992
1da177e4
LT
8993static void tg3_timer(unsigned long __opaque)
8994{
8995 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 8996
f475f163
MC
8997 if (tp->irq_sync)
8998 goto restart_timer;
8999
f47c11ee 9000 spin_lock(&tp->lock);
1da177e4 9001
0e6cf6a9
MC
9002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9003 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9004 tg3_chk_missed_msi(tp);
9005
63c3a66f 9006 if (!tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
9007 /* All of this garbage is because when using non-tagged
9008 * IRQ status the mailbox/status_block protocol the chip
9009 * uses with the cpu is race prone.
9010 */
898a56f8 9011 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
fac9b83e
DM
9012 tw32(GRC_LOCAL_CTRL,
9013 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9014 } else {
9015 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 9016 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
fac9b83e 9017 }
1da177e4 9018
fac9b83e 9019 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
63c3a66f 9020 tg3_flag_set(tp, RESTART_TIMER);
f47c11ee 9021 spin_unlock(&tp->lock);
fac9b83e
DM
9022 schedule_work(&tp->reset_task);
9023 return;
9024 }
1da177e4
LT
9025 }
9026
1da177e4
LT
9027 /* This part only runs once per second. */
9028 if (!--tp->timer_counter) {
63c3a66f 9029 if (tg3_flag(tp, 5705_PLUS))
fac9b83e
DM
9030 tg3_periodic_fetch_stats(tp);
9031
b0c5943f
MC
9032 if (tp->setlpicnt && !--tp->setlpicnt)
9033 tg3_phy_eee_enable(tp);
52b02d04 9034
63c3a66f 9035 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
9036 u32 mac_stat;
9037 int phy_event;
9038
9039 mac_stat = tr32(MAC_STATUS);
9040
9041 phy_event = 0;
f07e9af3 9042 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
1da177e4
LT
9043 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9044 phy_event = 1;
9045 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9046 phy_event = 1;
9047
9048 if (phy_event)
9049 tg3_setup_phy(tp, 0);
63c3a66f 9050 } else if (tg3_flag(tp, POLL_SERDES)) {
1da177e4
LT
9051 u32 mac_stat = tr32(MAC_STATUS);
9052 int need_setup = 0;
9053
9054 if (netif_carrier_ok(tp->dev) &&
9055 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9056 need_setup = 1;
9057 }
be98da6a 9058 if (!netif_carrier_ok(tp->dev) &&
1da177e4
LT
9059 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9060 MAC_STATUS_SIGNAL_DET))) {
9061 need_setup = 1;
9062 }
9063 if (need_setup) {
3d3ebe74
MC
9064 if (!tp->serdes_counter) {
9065 tw32_f(MAC_MODE,
9066 (tp->mac_mode &
9067 ~MAC_MODE_PORT_MODE_MASK));
9068 udelay(40);
9069 tw32_f(MAC_MODE, tp->mac_mode);
9070 udelay(40);
9071 }
1da177e4
LT
9072 tg3_setup_phy(tp, 0);
9073 }
f07e9af3 9074 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
63c3a66f 9075 tg3_flag(tp, 5780_CLASS)) {
747e8f8b 9076 tg3_serdes_parallel_detect(tp);
57d8b880 9077 }
1da177e4
LT
9078
9079 tp->timer_counter = tp->timer_multiplier;
9080 }
9081
130b8e4d
MC
9082 /* Heartbeat is only sent once every 2 seconds.
9083 *
9084 * The heartbeat is to tell the ASF firmware that the host
9085 * driver is still alive. In the event that the OS crashes,
9086 * ASF needs to reset the hardware to free up the FIFO space
9087 * that may be filled with rx packets destined for the host.
9088 * If the FIFO is full, ASF will no longer function properly.
9089 *
9090 * Unintended resets have been reported on real time kernels
9091 * where the timer doesn't run on time. Netpoll will also have
9092 * same problem.
9093 *
9094 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9095 * to check the ring condition when the heartbeat is expiring
9096 * before doing the reset. This will prevent most unintended
9097 * resets.
9098 */
1da177e4 9099 if (!--tp->asf_counter) {
63c3a66f 9100 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
9101 tg3_wait_for_event_ack(tp);
9102
bbadf503 9103 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 9104 FWCMD_NICDRV_ALIVE3);
bbadf503 9105 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
c6cdf436
MC
9106 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9107 TG3_FW_UPDATE_TIMEOUT_SEC);
4ba526ce
MC
9108
9109 tg3_generate_fw_event(tp);
1da177e4
LT
9110 }
9111 tp->asf_counter = tp->asf_multiplier;
9112 }
9113
f47c11ee 9114 spin_unlock(&tp->lock);
1da177e4 9115
f475f163 9116restart_timer:
1da177e4
LT
9117 tp->timer.expires = jiffies + tp->timer_offset;
9118 add_timer(&tp->timer);
9119}
9120
4f125f42 9121static int tg3_request_irq(struct tg3 *tp, int irq_num)
fcfa0a32 9122{
7d12e780 9123 irq_handler_t fn;
fcfa0a32 9124 unsigned long flags;
4f125f42
MC
9125 char *name;
9126 struct tg3_napi *tnapi = &tp->napi[irq_num];
9127
9128 if (tp->irq_cnt == 1)
9129 name = tp->dev->name;
9130 else {
9131 name = &tnapi->irq_lbl[0];
9132 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9133 name[IFNAMSIZ-1] = 0;
9134 }
fcfa0a32 9135
63c3a66f 9136 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fcfa0a32 9137 fn = tg3_msi;
63c3a66f 9138 if (tg3_flag(tp, 1SHOT_MSI))
fcfa0a32 9139 fn = tg3_msi_1shot;
ab392d2d 9140 flags = 0;
fcfa0a32
MC
9141 } else {
9142 fn = tg3_interrupt;
63c3a66f 9143 if (tg3_flag(tp, TAGGED_STATUS))
fcfa0a32 9144 fn = tg3_interrupt_tagged;
ab392d2d 9145 flags = IRQF_SHARED;
fcfa0a32 9146 }
4f125f42
MC
9147
9148 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
fcfa0a32
MC
9149}
9150
7938109f
MC
9151static int tg3_test_interrupt(struct tg3 *tp)
9152{
09943a18 9153 struct tg3_napi *tnapi = &tp->napi[0];
7938109f 9154 struct net_device *dev = tp->dev;
b16250e3 9155 int err, i, intr_ok = 0;
f6eb9b1f 9156 u32 val;
7938109f 9157
d4bc3927
MC
9158 if (!netif_running(dev))
9159 return -ENODEV;
9160
7938109f
MC
9161 tg3_disable_ints(tp);
9162
4f125f42 9163 free_irq(tnapi->irq_vec, tnapi);
7938109f 9164
f6eb9b1f
MC
9165 /*
9166 * Turn off MSI one shot mode. Otherwise this test has no
9167 * observable way to know whether the interrupt was delivered.
9168 */
63c3a66f 9169 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f
MC
9170 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9171 tw32(MSGINT_MODE, val);
9172 }
9173
4f125f42 9174 err = request_irq(tnapi->irq_vec, tg3_test_isr,
09943a18 9175 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
7938109f
MC
9176 if (err)
9177 return err;
9178
898a56f8 9179 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
9180 tg3_enable_ints(tp);
9181
9182 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 9183 tnapi->coal_now);
7938109f
MC
9184
9185 for (i = 0; i < 5; i++) {
b16250e3
MC
9186 u32 int_mbox, misc_host_ctrl;
9187
898a56f8 9188 int_mbox = tr32_mailbox(tnapi->int_mbox);
b16250e3
MC
9189 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9190
9191 if ((int_mbox != 0) ||
9192 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9193 intr_ok = 1;
7938109f 9194 break;
b16250e3
MC
9195 }
9196
7938109f
MC
9197 msleep(10);
9198 }
9199
9200 tg3_disable_ints(tp);
9201
4f125f42 9202 free_irq(tnapi->irq_vec, tnapi);
6aa20a22 9203
4f125f42 9204 err = tg3_request_irq(tp, 0);
7938109f
MC
9205
9206 if (err)
9207 return err;
9208
f6eb9b1f
MC
9209 if (intr_ok) {
9210 /* Reenable MSI one shot mode. */
63c3a66f 9211 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f
MC
9212 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9213 tw32(MSGINT_MODE, val);
9214 }
7938109f 9215 return 0;
f6eb9b1f 9216 }
7938109f
MC
9217
9218 return -EIO;
9219}
9220
9221/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9222 * successfully restored
9223 */
9224static int tg3_test_msi(struct tg3 *tp)
9225{
7938109f
MC
9226 int err;
9227 u16 pci_cmd;
9228
63c3a66f 9229 if (!tg3_flag(tp, USING_MSI))
7938109f
MC
9230 return 0;
9231
9232 /* Turn off SERR reporting in case MSI terminates with Master
9233 * Abort.
9234 */
9235 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9236 pci_write_config_word(tp->pdev, PCI_COMMAND,
9237 pci_cmd & ~PCI_COMMAND_SERR);
9238
9239 err = tg3_test_interrupt(tp);
9240
9241 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9242
9243 if (!err)
9244 return 0;
9245
9246 /* other failures */
9247 if (err != -EIO)
9248 return err;
9249
9250 /* MSI test failed, go back to INTx mode */
5129c3a3
MC
9251 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9252 "to INTx mode. Please report this failure to the PCI "
9253 "maintainer and include system chipset information\n");
7938109f 9254
4f125f42 9255 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
09943a18 9256
7938109f
MC
9257 pci_disable_msi(tp->pdev);
9258
63c3a66f 9259 tg3_flag_clear(tp, USING_MSI);
dc8bf1b1 9260 tp->napi[0].irq_vec = tp->pdev->irq;
7938109f 9261
4f125f42 9262 err = tg3_request_irq(tp, 0);
7938109f
MC
9263 if (err)
9264 return err;
9265
9266 /* Need to reset the chip because the MSI cycle may have terminated
9267 * with Master Abort.
9268 */
f47c11ee 9269 tg3_full_lock(tp, 1);
7938109f 9270
944d980e 9271 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 9272 err = tg3_init_hw(tp, 1);
7938109f 9273
f47c11ee 9274 tg3_full_unlock(tp);
7938109f
MC
9275
9276 if (err)
4f125f42 9277 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7938109f
MC
9278
9279 return err;
9280}
9281
9e9fd12d
MC
9282static int tg3_request_firmware(struct tg3 *tp)
9283{
9284 const __be32 *fw_data;
9285
9286 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
05dbe005
JP
9287 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9288 tp->fw_needed);
9e9fd12d
MC
9289 return -ENOENT;
9290 }
9291
9292 fw_data = (void *)tp->fw->data;
9293
9294 /* Firmware blob starts with version numbers, followed by
9295 * start address and _full_ length including BSS sections
9296 * (which must be longer than the actual data, of course
9297 */
9298
9299 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9300 if (tp->fw_len < (tp->fw->size - 12)) {
05dbe005
JP
9301 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9302 tp->fw_len, tp->fw_needed);
9e9fd12d
MC
9303 release_firmware(tp->fw);
9304 tp->fw = NULL;
9305 return -EINVAL;
9306 }
9307
9308 /* We no longer need firmware; we have it. */
9309 tp->fw_needed = NULL;
9310 return 0;
9311}
9312
679563f4
MC
9313static bool tg3_enable_msix(struct tg3 *tp)
9314{
9315 int i, rc, cpus = num_online_cpus();
9316 struct msix_entry msix_ent[tp->irq_max];
9317
9318 if (cpus == 1)
9319 /* Just fallback to the simpler MSI mode. */
9320 return false;
9321
9322 /*
9323 * We want as many rx rings enabled as there are cpus.
9324 * The first MSIX vector only deals with link interrupts, etc,
9325 * so we add one to the number of vectors we are requesting.
9326 */
9327 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9328
9329 for (i = 0; i < tp->irq_max; i++) {
9330 msix_ent[i].entry = i;
9331 msix_ent[i].vector = 0;
9332 }
9333
9334 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
2430b031
MC
9335 if (rc < 0) {
9336 return false;
9337 } else if (rc != 0) {
679563f4
MC
9338 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9339 return false;
05dbe005
JP
9340 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9341 tp->irq_cnt, rc);
679563f4
MC
9342 tp->irq_cnt = rc;
9343 }
9344
9345 for (i = 0; i < tp->irq_max; i++)
9346 tp->napi[i].irq_vec = msix_ent[i].vector;
9347
2ddaad39
BH
9348 netif_set_real_num_tx_queues(tp->dev, 1);
9349 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9350 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9351 pci_disable_msix(tp->pdev);
9352 return false;
9353 }
b92b9040
MC
9354
9355 if (tp->irq_cnt > 1) {
63c3a66f 9356 tg3_flag_set(tp, ENABLE_RSS);
d78b59f5
MC
9357
9358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
63c3a66f 9360 tg3_flag_set(tp, ENABLE_TSS);
b92b9040
MC
9361 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9362 }
9363 }
2430b031 9364
679563f4
MC
9365 return true;
9366}
9367
07b0173c
MC
9368static void tg3_ints_init(struct tg3 *tp)
9369{
63c3a66f
JP
9370 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9371 !tg3_flag(tp, TAGGED_STATUS)) {
07b0173c
MC
9372 /* All MSI supporting chips should support tagged
9373 * status. Assert that this is the case.
9374 */
5129c3a3
MC
9375 netdev_warn(tp->dev,
9376 "MSI without TAGGED_STATUS? Not using MSI\n");
679563f4 9377 goto defcfg;
07b0173c 9378 }
4f125f42 9379
63c3a66f
JP
9380 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9381 tg3_flag_set(tp, USING_MSIX);
9382 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9383 tg3_flag_set(tp, USING_MSI);
679563f4 9384
63c3a66f 9385 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
679563f4 9386 u32 msi_mode = tr32(MSGINT_MODE);
63c3a66f 9387 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
baf8a94a 9388 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
679563f4
MC
9389 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9390 }
9391defcfg:
63c3a66f 9392 if (!tg3_flag(tp, USING_MSIX)) {
679563f4
MC
9393 tp->irq_cnt = 1;
9394 tp->napi[0].irq_vec = tp->pdev->irq;
2ddaad39 9395 netif_set_real_num_tx_queues(tp->dev, 1);
85407885 9396 netif_set_real_num_rx_queues(tp->dev, 1);
679563f4 9397 }
07b0173c
MC
9398}
9399
9400static void tg3_ints_fini(struct tg3 *tp)
9401{
63c3a66f 9402 if (tg3_flag(tp, USING_MSIX))
679563f4 9403 pci_disable_msix(tp->pdev);
63c3a66f 9404 else if (tg3_flag(tp, USING_MSI))
679563f4 9405 pci_disable_msi(tp->pdev);
63c3a66f
JP
9406 tg3_flag_clear(tp, USING_MSI);
9407 tg3_flag_clear(tp, USING_MSIX);
9408 tg3_flag_clear(tp, ENABLE_RSS);
9409 tg3_flag_clear(tp, ENABLE_TSS);
07b0173c
MC
9410}
9411
1da177e4
LT
9412static int tg3_open(struct net_device *dev)
9413{
9414 struct tg3 *tp = netdev_priv(dev);
4f125f42 9415 int i, err;
1da177e4 9416
9e9fd12d
MC
9417 if (tp->fw_needed) {
9418 err = tg3_request_firmware(tp);
9419 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9420 if (err)
9421 return err;
9422 } else if (err) {
05dbe005 9423 netdev_warn(tp->dev, "TSO capability disabled\n");
63c3a66f
JP
9424 tg3_flag_clear(tp, TSO_CAPABLE);
9425 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
05dbe005 9426 netdev_notice(tp->dev, "TSO capability restored\n");
63c3a66f 9427 tg3_flag_set(tp, TSO_CAPABLE);
9e9fd12d
MC
9428 }
9429 }
9430
c49a1561
MC
9431 netif_carrier_off(tp->dev);
9432
c866b7ea 9433 err = tg3_power_up(tp);
2f751b67 9434 if (err)
bc1c7567 9435 return err;
2f751b67
MC
9436
9437 tg3_full_lock(tp, 0);
bc1c7567 9438
1da177e4 9439 tg3_disable_ints(tp);
63c3a66f 9440 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 9441
f47c11ee 9442 tg3_full_unlock(tp);
1da177e4 9443
679563f4
MC
9444 /*
9445 * Setup interrupts first so we know how
9446 * many NAPI resources to allocate
9447 */
9448 tg3_ints_init(tp);
9449
1da177e4
LT
9450 /* The placement of this call is tied
9451 * to the setup and use of Host TX descriptors.
9452 */
9453 err = tg3_alloc_consistent(tp);
9454 if (err)
679563f4 9455 goto err_out1;
88b06bc2 9456
66cfd1bd
MC
9457 tg3_napi_init(tp);
9458
fed97810 9459 tg3_napi_enable(tp);
1da177e4 9460
4f125f42
MC
9461 for (i = 0; i < tp->irq_cnt; i++) {
9462 struct tg3_napi *tnapi = &tp->napi[i];
9463 err = tg3_request_irq(tp, i);
9464 if (err) {
9465 for (i--; i >= 0; i--)
9466 free_irq(tnapi->irq_vec, tnapi);
9467 break;
9468 }
9469 }
1da177e4 9470
07b0173c 9471 if (err)
679563f4 9472 goto err_out2;
bea3348e 9473
f47c11ee 9474 tg3_full_lock(tp, 0);
1da177e4 9475
8e7a22e3 9476 err = tg3_init_hw(tp, 1);
1da177e4 9477 if (err) {
944d980e 9478 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
9479 tg3_free_rings(tp);
9480 } else {
0e6cf6a9
MC
9481 if (tg3_flag(tp, TAGGED_STATUS) &&
9482 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9483 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
fac9b83e
DM
9484 tp->timer_offset = HZ;
9485 else
9486 tp->timer_offset = HZ / 10;
9487
9488 BUG_ON(tp->timer_offset > HZ);
9489 tp->timer_counter = tp->timer_multiplier =
9490 (HZ / tp->timer_offset);
9491 tp->asf_counter = tp->asf_multiplier =
28fbef78 9492 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
9493
9494 init_timer(&tp->timer);
9495 tp->timer.expires = jiffies + tp->timer_offset;
9496 tp->timer.data = (unsigned long) tp;
9497 tp->timer.function = tg3_timer;
1da177e4
LT
9498 }
9499
f47c11ee 9500 tg3_full_unlock(tp);
1da177e4 9501
07b0173c 9502 if (err)
679563f4 9503 goto err_out3;
1da177e4 9504
63c3a66f 9505 if (tg3_flag(tp, USING_MSI)) {
7938109f 9506 err = tg3_test_msi(tp);
fac9b83e 9507
7938109f 9508 if (err) {
f47c11ee 9509 tg3_full_lock(tp, 0);
944d980e 9510 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f 9511 tg3_free_rings(tp);
f47c11ee 9512 tg3_full_unlock(tp);
7938109f 9513
679563f4 9514 goto err_out2;
7938109f 9515 }
fcfa0a32 9516
63c3a66f 9517 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f 9518 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 9519
f6eb9b1f
MC
9520 tw32(PCIE_TRANSACTION_CFG,
9521 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32 9522 }
7938109f
MC
9523 }
9524
b02fd9e3
MC
9525 tg3_phy_start(tp);
9526
f47c11ee 9527 tg3_full_lock(tp, 0);
1da177e4 9528
7938109f 9529 add_timer(&tp->timer);
63c3a66f 9530 tg3_flag_set(tp, INIT_COMPLETE);
1da177e4
LT
9531 tg3_enable_ints(tp);
9532
f47c11ee 9533 tg3_full_unlock(tp);
1da177e4 9534
fe5f5787 9535 netif_tx_start_all_queues(dev);
1da177e4 9536
06c03c02
MB
9537 /*
9538 * Reset loopback feature if it was turned on while the device was down
9539 * make sure that it's installed properly now.
9540 */
9541 if (dev->features & NETIF_F_LOOPBACK)
9542 tg3_set_loopback(dev, dev->features);
9543
1da177e4 9544 return 0;
07b0173c 9545
679563f4 9546err_out3:
4f125f42
MC
9547 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9548 struct tg3_napi *tnapi = &tp->napi[i];
9549 free_irq(tnapi->irq_vec, tnapi);
9550 }
07b0173c 9551
679563f4 9552err_out2:
fed97810 9553 tg3_napi_disable(tp);
66cfd1bd 9554 tg3_napi_fini(tp);
07b0173c 9555 tg3_free_consistent(tp);
679563f4
MC
9556
9557err_out1:
9558 tg3_ints_fini(tp);
07b0173c 9559 return err;
1da177e4
LT
9560}
9561
511d2224
ED
9562static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9563 struct rtnl_link_stats64 *);
1da177e4
LT
9564static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9565
9566static int tg3_close(struct net_device *dev)
9567{
4f125f42 9568 int i;
1da177e4
LT
9569 struct tg3 *tp = netdev_priv(dev);
9570
fed97810 9571 tg3_napi_disable(tp);
28e53bdd 9572 cancel_work_sync(&tp->reset_task);
7faa006f 9573
fe5f5787 9574 netif_tx_stop_all_queues(dev);
1da177e4
LT
9575
9576 del_timer_sync(&tp->timer);
9577
24bb4fb6
MC
9578 tg3_phy_stop(tp);
9579
f47c11ee 9580 tg3_full_lock(tp, 1);
1da177e4
LT
9581
9582 tg3_disable_ints(tp);
9583
944d980e 9584 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 9585 tg3_free_rings(tp);
63c3a66f 9586 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 9587
f47c11ee 9588 tg3_full_unlock(tp);
1da177e4 9589
4f125f42
MC
9590 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9591 struct tg3_napi *tnapi = &tp->napi[i];
9592 free_irq(tnapi->irq_vec, tnapi);
9593 }
07b0173c
MC
9594
9595 tg3_ints_fini(tp);
1da177e4 9596
511d2224
ED
9597 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9598
1da177e4
LT
9599 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9600 sizeof(tp->estats_prev));
9601
66cfd1bd
MC
9602 tg3_napi_fini(tp);
9603
1da177e4
LT
9604 tg3_free_consistent(tp);
9605
c866b7ea 9606 tg3_power_down(tp);
bc1c7567
MC
9607
9608 netif_carrier_off(tp->dev);
9609
1da177e4
LT
9610 return 0;
9611}
9612
511d2224 9613static inline u64 get_stat64(tg3_stat64_t *val)
816f8b86
SB
9614{
9615 return ((u64)val->high << 32) | ((u64)val->low);
9616}
9617
511d2224 9618static u64 calc_crc_errors(struct tg3 *tp)
1da177e4
LT
9619{
9620 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9621
f07e9af3 9622 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
1da177e4
LT
9623 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
9625 u32 val;
9626
f47c11ee 9627 spin_lock_bh(&tp->lock);
569a5df8
MC
9628 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9629 tg3_writephy(tp, MII_TG3_TEST1,
9630 val | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9631 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
1da177e4
LT
9632 } else
9633 val = 0;
f47c11ee 9634 spin_unlock_bh(&tp->lock);
1da177e4
LT
9635
9636 tp->phy_crc_errors += val;
9637
9638 return tp->phy_crc_errors;
9639 }
9640
9641 return get_stat64(&hw_stats->rx_fcs_errors);
9642}
9643
9644#define ESTAT_ADD(member) \
9645 estats->member = old_estats->member + \
511d2224 9646 get_stat64(&hw_stats->member)
1da177e4
LT
9647
9648static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9649{
9650 struct tg3_ethtool_stats *estats = &tp->estats;
9651 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9652 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9653
9654 if (!hw_stats)
9655 return old_estats;
9656
9657 ESTAT_ADD(rx_octets);
9658 ESTAT_ADD(rx_fragments);
9659 ESTAT_ADD(rx_ucast_packets);
9660 ESTAT_ADD(rx_mcast_packets);
9661 ESTAT_ADD(rx_bcast_packets);
9662 ESTAT_ADD(rx_fcs_errors);
9663 ESTAT_ADD(rx_align_errors);
9664 ESTAT_ADD(rx_xon_pause_rcvd);
9665 ESTAT_ADD(rx_xoff_pause_rcvd);
9666 ESTAT_ADD(rx_mac_ctrl_rcvd);
9667 ESTAT_ADD(rx_xoff_entered);
9668 ESTAT_ADD(rx_frame_too_long_errors);
9669 ESTAT_ADD(rx_jabbers);
9670 ESTAT_ADD(rx_undersize_packets);
9671 ESTAT_ADD(rx_in_length_errors);
9672 ESTAT_ADD(rx_out_length_errors);
9673 ESTAT_ADD(rx_64_or_less_octet_packets);
9674 ESTAT_ADD(rx_65_to_127_octet_packets);
9675 ESTAT_ADD(rx_128_to_255_octet_packets);
9676 ESTAT_ADD(rx_256_to_511_octet_packets);
9677 ESTAT_ADD(rx_512_to_1023_octet_packets);
9678 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9679 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9680 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9681 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9682 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9683
9684 ESTAT_ADD(tx_octets);
9685 ESTAT_ADD(tx_collisions);
9686 ESTAT_ADD(tx_xon_sent);
9687 ESTAT_ADD(tx_xoff_sent);
9688 ESTAT_ADD(tx_flow_control);
9689 ESTAT_ADD(tx_mac_errors);
9690 ESTAT_ADD(tx_single_collisions);
9691 ESTAT_ADD(tx_mult_collisions);
9692 ESTAT_ADD(tx_deferred);
9693 ESTAT_ADD(tx_excessive_collisions);
9694 ESTAT_ADD(tx_late_collisions);
9695 ESTAT_ADD(tx_collide_2times);
9696 ESTAT_ADD(tx_collide_3times);
9697 ESTAT_ADD(tx_collide_4times);
9698 ESTAT_ADD(tx_collide_5times);
9699 ESTAT_ADD(tx_collide_6times);
9700 ESTAT_ADD(tx_collide_7times);
9701 ESTAT_ADD(tx_collide_8times);
9702 ESTAT_ADD(tx_collide_9times);
9703 ESTAT_ADD(tx_collide_10times);
9704 ESTAT_ADD(tx_collide_11times);
9705 ESTAT_ADD(tx_collide_12times);
9706 ESTAT_ADD(tx_collide_13times);
9707 ESTAT_ADD(tx_collide_14times);
9708 ESTAT_ADD(tx_collide_15times);
9709 ESTAT_ADD(tx_ucast_packets);
9710 ESTAT_ADD(tx_mcast_packets);
9711 ESTAT_ADD(tx_bcast_packets);
9712 ESTAT_ADD(tx_carrier_sense_errors);
9713 ESTAT_ADD(tx_discards);
9714 ESTAT_ADD(tx_errors);
9715
9716 ESTAT_ADD(dma_writeq_full);
9717 ESTAT_ADD(dma_write_prioq_full);
9718 ESTAT_ADD(rxbds_empty);
9719 ESTAT_ADD(rx_discards);
9720 ESTAT_ADD(rx_errors);
9721 ESTAT_ADD(rx_threshold_hit);
9722
9723 ESTAT_ADD(dma_readq_full);
9724 ESTAT_ADD(dma_read_prioq_full);
9725 ESTAT_ADD(tx_comp_queue_full);
9726
9727 ESTAT_ADD(ring_set_send_prod_index);
9728 ESTAT_ADD(ring_status_update);
9729 ESTAT_ADD(nic_irqs);
9730 ESTAT_ADD(nic_avoided_irqs);
9731 ESTAT_ADD(nic_tx_threshold_hit);
9732
4452d099
MC
9733 ESTAT_ADD(mbuf_lwm_thresh_hit);
9734
1da177e4
LT
9735 return estats;
9736}
9737
511d2224
ED
9738static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9739 struct rtnl_link_stats64 *stats)
1da177e4
LT
9740{
9741 struct tg3 *tp = netdev_priv(dev);
511d2224 9742 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
1da177e4
LT
9743 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9744
9745 if (!hw_stats)
9746 return old_stats;
9747
9748 stats->rx_packets = old_stats->rx_packets +
9749 get_stat64(&hw_stats->rx_ucast_packets) +
9750 get_stat64(&hw_stats->rx_mcast_packets) +
9751 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 9752
1da177e4
LT
9753 stats->tx_packets = old_stats->tx_packets +
9754 get_stat64(&hw_stats->tx_ucast_packets) +
9755 get_stat64(&hw_stats->tx_mcast_packets) +
9756 get_stat64(&hw_stats->tx_bcast_packets);
9757
9758 stats->rx_bytes = old_stats->rx_bytes +
9759 get_stat64(&hw_stats->rx_octets);
9760 stats->tx_bytes = old_stats->tx_bytes +
9761 get_stat64(&hw_stats->tx_octets);
9762
9763 stats->rx_errors = old_stats->rx_errors +
4f63b877 9764 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
9765 stats->tx_errors = old_stats->tx_errors +
9766 get_stat64(&hw_stats->tx_errors) +
9767 get_stat64(&hw_stats->tx_mac_errors) +
9768 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9769 get_stat64(&hw_stats->tx_discards);
9770
9771 stats->multicast = old_stats->multicast +
9772 get_stat64(&hw_stats->rx_mcast_packets);
9773 stats->collisions = old_stats->collisions +
9774 get_stat64(&hw_stats->tx_collisions);
9775
9776 stats->rx_length_errors = old_stats->rx_length_errors +
9777 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9778 get_stat64(&hw_stats->rx_undersize_packets);
9779
9780 stats->rx_over_errors = old_stats->rx_over_errors +
9781 get_stat64(&hw_stats->rxbds_empty);
9782 stats->rx_frame_errors = old_stats->rx_frame_errors +
9783 get_stat64(&hw_stats->rx_align_errors);
9784 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9785 get_stat64(&hw_stats->tx_discards);
9786 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9787 get_stat64(&hw_stats->tx_carrier_sense_errors);
9788
9789 stats->rx_crc_errors = old_stats->rx_crc_errors +
9790 calc_crc_errors(tp);
9791
4f63b877
JL
9792 stats->rx_missed_errors = old_stats->rx_missed_errors +
9793 get_stat64(&hw_stats->rx_discards);
9794
b0057c51
ED
9795 stats->rx_dropped = tp->rx_dropped;
9796
1da177e4
LT
9797 return stats;
9798}
9799
9800static inline u32 calc_crc(unsigned char *buf, int len)
9801{
9802 u32 reg;
9803 u32 tmp;
9804 int j, k;
9805
9806 reg = 0xffffffff;
9807
9808 for (j = 0; j < len; j++) {
9809 reg ^= buf[j];
9810
9811 for (k = 0; k < 8; k++) {
9812 tmp = reg & 0x01;
9813
9814 reg >>= 1;
9815
859a5887 9816 if (tmp)
1da177e4 9817 reg ^= 0xedb88320;
1da177e4
LT
9818 }
9819 }
9820
9821 return ~reg;
9822}
9823
9824static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9825{
9826 /* accept or reject all multicast frames */
9827 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9828 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9829 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9830 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9831}
9832
9833static void __tg3_set_rx_mode(struct net_device *dev)
9834{
9835 struct tg3 *tp = netdev_priv(dev);
9836 u32 rx_mode;
9837
9838 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9839 RX_MODE_KEEP_VLAN_TAG);
9840
bf933c80 9841#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
1da177e4
LT
9842 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9843 * flag clear.
9844 */
63c3a66f 9845 if (!tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
9846 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9847#endif
9848
9849 if (dev->flags & IFF_PROMISC) {
9850 /* Promiscuous mode. */
9851 rx_mode |= RX_MODE_PROMISC;
9852 } else if (dev->flags & IFF_ALLMULTI) {
9853 /* Accept all multicast. */
de6f31eb 9854 tg3_set_multi(tp, 1);
4cd24eaf 9855 } else if (netdev_mc_empty(dev)) {
1da177e4 9856 /* Reject all multicast. */
de6f31eb 9857 tg3_set_multi(tp, 0);
1da177e4
LT
9858 } else {
9859 /* Accept one or more multicast(s). */
22bedad3 9860 struct netdev_hw_addr *ha;
1da177e4
LT
9861 u32 mc_filter[4] = { 0, };
9862 u32 regidx;
9863 u32 bit;
9864 u32 crc;
9865
22bedad3
JP
9866 netdev_for_each_mc_addr(ha, dev) {
9867 crc = calc_crc(ha->addr, ETH_ALEN);
1da177e4
LT
9868 bit = ~crc & 0x7f;
9869 regidx = (bit & 0x60) >> 5;
9870 bit &= 0x1f;
9871 mc_filter[regidx] |= (1 << bit);
9872 }
9873
9874 tw32(MAC_HASH_REG_0, mc_filter[0]);
9875 tw32(MAC_HASH_REG_1, mc_filter[1]);
9876 tw32(MAC_HASH_REG_2, mc_filter[2]);
9877 tw32(MAC_HASH_REG_3, mc_filter[3]);
9878 }
9879
9880 if (rx_mode != tp->rx_mode) {
9881 tp->rx_mode = rx_mode;
9882 tw32_f(MAC_RX_MODE, rx_mode);
9883 udelay(10);
9884 }
9885}
9886
9887static void tg3_set_rx_mode(struct net_device *dev)
9888{
9889 struct tg3 *tp = netdev_priv(dev);
9890
e75f7c90
MC
9891 if (!netif_running(dev))
9892 return;
9893
f47c11ee 9894 tg3_full_lock(tp, 0);
1da177e4 9895 __tg3_set_rx_mode(dev);
f47c11ee 9896 tg3_full_unlock(tp);
1da177e4
LT
9897}
9898
1da177e4
LT
9899static int tg3_get_regs_len(struct net_device *dev)
9900{
97bd8e49 9901 return TG3_REG_BLK_SIZE;
1da177e4
LT
9902}
9903
9904static void tg3_get_regs(struct net_device *dev,
9905 struct ethtool_regs *regs, void *_p)
9906{
1da177e4 9907 struct tg3 *tp = netdev_priv(dev);
1da177e4
LT
9908
9909 regs->version = 0;
9910
97bd8e49 9911 memset(_p, 0, TG3_REG_BLK_SIZE);
1da177e4 9912
80096068 9913 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
9914 return;
9915
f47c11ee 9916 tg3_full_lock(tp, 0);
1da177e4 9917
97bd8e49 9918 tg3_dump_legacy_regs(tp, (u32 *)_p);
1da177e4 9919
f47c11ee 9920 tg3_full_unlock(tp);
1da177e4
LT
9921}
9922
9923static int tg3_get_eeprom_len(struct net_device *dev)
9924{
9925 struct tg3 *tp = netdev_priv(dev);
9926
9927 return tp->nvram_size;
9928}
9929
1da177e4
LT
9930static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9931{
9932 struct tg3 *tp = netdev_priv(dev);
9933 int ret;
9934 u8 *pd;
b9fc7dc5 9935 u32 i, offset, len, b_offset, b_count;
a9dc529d 9936 __be32 val;
1da177e4 9937
63c3a66f 9938 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
9939 return -EINVAL;
9940
80096068 9941 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
9942 return -EAGAIN;
9943
1da177e4
LT
9944 offset = eeprom->offset;
9945 len = eeprom->len;
9946 eeprom->len = 0;
9947
9948 eeprom->magic = TG3_EEPROM_MAGIC;
9949
9950 if (offset & 3) {
9951 /* adjustments to start on required 4 byte boundary */
9952 b_offset = offset & 3;
9953 b_count = 4 - b_offset;
9954 if (b_count > len) {
9955 /* i.e. offset=1 len=2 */
9956 b_count = len;
9957 }
a9dc529d 9958 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
1da177e4
LT
9959 if (ret)
9960 return ret;
be98da6a 9961 memcpy(data, ((char *)&val) + b_offset, b_count);
1da177e4
LT
9962 len -= b_count;
9963 offset += b_count;
c6cdf436 9964 eeprom->len += b_count;
1da177e4
LT
9965 }
9966
25985edc 9967 /* read bytes up to the last 4 byte boundary */
1da177e4
LT
9968 pd = &data[eeprom->len];
9969 for (i = 0; i < (len - (len & 3)); i += 4) {
a9dc529d 9970 ret = tg3_nvram_read_be32(tp, offset + i, &val);
1da177e4
LT
9971 if (ret) {
9972 eeprom->len += i;
9973 return ret;
9974 }
1da177e4
LT
9975 memcpy(pd + i, &val, 4);
9976 }
9977 eeprom->len += i;
9978
9979 if (len & 3) {
9980 /* read last bytes not ending on 4 byte boundary */
9981 pd = &data[eeprom->len];
9982 b_count = len & 3;
9983 b_offset = offset + len - b_count;
a9dc529d 9984 ret = tg3_nvram_read_be32(tp, b_offset, &val);
1da177e4
LT
9985 if (ret)
9986 return ret;
b9fc7dc5 9987 memcpy(pd, &val, b_count);
1da177e4
LT
9988 eeprom->len += b_count;
9989 }
9990 return 0;
9991}
9992
6aa20a22 9993static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
9994
9995static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9996{
9997 struct tg3 *tp = netdev_priv(dev);
9998 int ret;
b9fc7dc5 9999 u32 offset, len, b_offset, odd_len;
1da177e4 10000 u8 *buf;
a9dc529d 10001 __be32 start, end;
1da177e4 10002
80096068 10003 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10004 return -EAGAIN;
10005
63c3a66f 10006 if (tg3_flag(tp, NO_NVRAM) ||
df259d8c 10007 eeprom->magic != TG3_EEPROM_MAGIC)
1da177e4
LT
10008 return -EINVAL;
10009
10010 offset = eeprom->offset;
10011 len = eeprom->len;
10012
10013 if ((b_offset = (offset & 3))) {
10014 /* adjustments to start on required 4 byte boundary */
a9dc529d 10015 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
1da177e4
LT
10016 if (ret)
10017 return ret;
1da177e4
LT
10018 len += b_offset;
10019 offset &= ~3;
1c8594b4
MC
10020 if (len < 4)
10021 len = 4;
1da177e4
LT
10022 }
10023
10024 odd_len = 0;
1c8594b4 10025 if (len & 3) {
1da177e4
LT
10026 /* adjustments to end on required 4 byte boundary */
10027 odd_len = 1;
10028 len = (len + 3) & ~3;
a9dc529d 10029 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
1da177e4
LT
10030 if (ret)
10031 return ret;
1da177e4
LT
10032 }
10033
10034 buf = data;
10035 if (b_offset || odd_len) {
10036 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 10037 if (!buf)
1da177e4
LT
10038 return -ENOMEM;
10039 if (b_offset)
10040 memcpy(buf, &start, 4);
10041 if (odd_len)
10042 memcpy(buf+len-4, &end, 4);
10043 memcpy(buf + b_offset, data, eeprom->len);
10044 }
10045
10046 ret = tg3_nvram_write_block(tp, offset, len, buf);
10047
10048 if (buf != data)
10049 kfree(buf);
10050
10051 return ret;
10052}
10053
10054static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10055{
b02fd9e3
MC
10056 struct tg3 *tp = netdev_priv(dev);
10057
63c3a66f 10058 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10059 struct phy_device *phydev;
f07e9af3 10060 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10061 return -EAGAIN;
3f0e3ad7
MC
10062 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10063 return phy_ethtool_gset(phydev, cmd);
b02fd9e3 10064 }
6aa20a22 10065
1da177e4
LT
10066 cmd->supported = (SUPPORTED_Autoneg);
10067
f07e9af3 10068 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
1da177e4
LT
10069 cmd->supported |= (SUPPORTED_1000baseT_Half |
10070 SUPPORTED_1000baseT_Full);
10071
f07e9af3 10072 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
1da177e4
LT
10073 cmd->supported |= (SUPPORTED_100baseT_Half |
10074 SUPPORTED_100baseT_Full |
10075 SUPPORTED_10baseT_Half |
10076 SUPPORTED_10baseT_Full |
3bebab59 10077 SUPPORTED_TP);
ef348144
KK
10078 cmd->port = PORT_TP;
10079 } else {
1da177e4 10080 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
10081 cmd->port = PORT_FIBRE;
10082 }
6aa20a22 10083
1da177e4 10084 cmd->advertising = tp->link_config.advertising;
5bb09778
MC
10085 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10086 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10087 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10088 cmd->advertising |= ADVERTISED_Pause;
10089 } else {
10090 cmd->advertising |= ADVERTISED_Pause |
10091 ADVERTISED_Asym_Pause;
10092 }
10093 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10094 cmd->advertising |= ADVERTISED_Asym_Pause;
10095 }
10096 }
1da177e4 10097 if (netif_running(dev)) {
70739497 10098 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
1da177e4 10099 cmd->duplex = tp->link_config.active_duplex;
64c22182 10100 } else {
70739497 10101 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
64c22182 10102 cmd->duplex = DUPLEX_INVALID;
1da177e4 10103 }
882e9793 10104 cmd->phy_address = tp->phy_addr;
7e5856bd 10105 cmd->transceiver = XCVR_INTERNAL;
1da177e4
LT
10106 cmd->autoneg = tp->link_config.autoneg;
10107 cmd->maxtxpkt = 0;
10108 cmd->maxrxpkt = 0;
10109 return 0;
10110}
6aa20a22 10111
1da177e4
LT
10112static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10113{
10114 struct tg3 *tp = netdev_priv(dev);
25db0338 10115 u32 speed = ethtool_cmd_speed(cmd);
6aa20a22 10116
63c3a66f 10117 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10118 struct phy_device *phydev;
f07e9af3 10119 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10120 return -EAGAIN;
3f0e3ad7
MC
10121 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10122 return phy_ethtool_sset(phydev, cmd);
b02fd9e3
MC
10123 }
10124
7e5856bd
MC
10125 if (cmd->autoneg != AUTONEG_ENABLE &&
10126 cmd->autoneg != AUTONEG_DISABLE)
37ff238d 10127 return -EINVAL;
7e5856bd
MC
10128
10129 if (cmd->autoneg == AUTONEG_DISABLE &&
10130 cmd->duplex != DUPLEX_FULL &&
10131 cmd->duplex != DUPLEX_HALF)
37ff238d 10132 return -EINVAL;
1da177e4 10133
7e5856bd
MC
10134 if (cmd->autoneg == AUTONEG_ENABLE) {
10135 u32 mask = ADVERTISED_Autoneg |
10136 ADVERTISED_Pause |
10137 ADVERTISED_Asym_Pause;
10138
f07e9af3 10139 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
7e5856bd
MC
10140 mask |= ADVERTISED_1000baseT_Half |
10141 ADVERTISED_1000baseT_Full;
10142
f07e9af3 10143 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
7e5856bd
MC
10144 mask |= ADVERTISED_100baseT_Half |
10145 ADVERTISED_100baseT_Full |
10146 ADVERTISED_10baseT_Half |
10147 ADVERTISED_10baseT_Full |
10148 ADVERTISED_TP;
10149 else
10150 mask |= ADVERTISED_FIBRE;
10151
10152 if (cmd->advertising & ~mask)
10153 return -EINVAL;
10154
10155 mask &= (ADVERTISED_1000baseT_Half |
10156 ADVERTISED_1000baseT_Full |
10157 ADVERTISED_100baseT_Half |
10158 ADVERTISED_100baseT_Full |
10159 ADVERTISED_10baseT_Half |
10160 ADVERTISED_10baseT_Full);
10161
10162 cmd->advertising &= mask;
10163 } else {
f07e9af3 10164 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
25db0338 10165 if (speed != SPEED_1000)
7e5856bd
MC
10166 return -EINVAL;
10167
10168 if (cmd->duplex != DUPLEX_FULL)
10169 return -EINVAL;
10170 } else {
25db0338
DD
10171 if (speed != SPEED_100 &&
10172 speed != SPEED_10)
7e5856bd
MC
10173 return -EINVAL;
10174 }
10175 }
10176
f47c11ee 10177 tg3_full_lock(tp, 0);
1da177e4
LT
10178
10179 tp->link_config.autoneg = cmd->autoneg;
10180 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
10181 tp->link_config.advertising = (cmd->advertising |
10182 ADVERTISED_Autoneg);
1da177e4
LT
10183 tp->link_config.speed = SPEED_INVALID;
10184 tp->link_config.duplex = DUPLEX_INVALID;
10185 } else {
10186 tp->link_config.advertising = 0;
25db0338 10187 tp->link_config.speed = speed;
1da177e4 10188 tp->link_config.duplex = cmd->duplex;
b02fd9e3 10189 }
6aa20a22 10190
24fcad6b
MC
10191 tp->link_config.orig_speed = tp->link_config.speed;
10192 tp->link_config.orig_duplex = tp->link_config.duplex;
10193 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10194
1da177e4
LT
10195 if (netif_running(dev))
10196 tg3_setup_phy(tp, 1);
10197
f47c11ee 10198 tg3_full_unlock(tp);
6aa20a22 10199
1da177e4
LT
10200 return 0;
10201}
6aa20a22 10202
1da177e4
LT
10203static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10204{
10205 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10206
1da177e4
LT
10207 strcpy(info->driver, DRV_MODULE_NAME);
10208 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 10209 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
10210 strcpy(info->bus_info, pci_name(tp->pdev));
10211}
6aa20a22 10212
1da177e4
LT
10213static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10214{
10215 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10216
63c3a66f 10217 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
a85feb8c
GZ
10218 wol->supported = WAKE_MAGIC;
10219 else
10220 wol->supported = 0;
1da177e4 10221 wol->wolopts = 0;
63c3a66f 10222 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
1da177e4
LT
10223 wol->wolopts = WAKE_MAGIC;
10224 memset(&wol->sopass, 0, sizeof(wol->sopass));
10225}
6aa20a22 10226
1da177e4
LT
10227static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10228{
10229 struct tg3 *tp = netdev_priv(dev);
12dac075 10230 struct device *dp = &tp->pdev->dev;
6aa20a22 10231
1da177e4
LT
10232 if (wol->wolopts & ~WAKE_MAGIC)
10233 return -EINVAL;
10234 if ((wol->wolopts & WAKE_MAGIC) &&
63c3a66f 10235 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
1da177e4 10236 return -EINVAL;
6aa20a22 10237
f2dc0d18
RW
10238 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10239
f47c11ee 10240 spin_lock_bh(&tp->lock);
f2dc0d18 10241 if (device_may_wakeup(dp))
63c3a66f 10242 tg3_flag_set(tp, WOL_ENABLE);
f2dc0d18 10243 else
63c3a66f 10244 tg3_flag_clear(tp, WOL_ENABLE);
f47c11ee 10245 spin_unlock_bh(&tp->lock);
6aa20a22 10246
1da177e4
LT
10247 return 0;
10248}
6aa20a22 10249
1da177e4
LT
10250static u32 tg3_get_msglevel(struct net_device *dev)
10251{
10252 struct tg3 *tp = netdev_priv(dev);
10253 return tp->msg_enable;
10254}
6aa20a22 10255
1da177e4
LT
10256static void tg3_set_msglevel(struct net_device *dev, u32 value)
10257{
10258 struct tg3 *tp = netdev_priv(dev);
10259 tp->msg_enable = value;
10260}
6aa20a22 10261
1da177e4
LT
10262static int tg3_nway_reset(struct net_device *dev)
10263{
10264 struct tg3 *tp = netdev_priv(dev);
1da177e4 10265 int r;
6aa20a22 10266
1da177e4
LT
10267 if (!netif_running(dev))
10268 return -EAGAIN;
10269
f07e9af3 10270 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
c94e3941
MC
10271 return -EINVAL;
10272
63c3a66f 10273 if (tg3_flag(tp, USE_PHYLIB)) {
f07e9af3 10274 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10275 return -EAGAIN;
3f0e3ad7 10276 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
10277 } else {
10278 u32 bmcr;
10279
10280 spin_lock_bh(&tp->lock);
10281 r = -EINVAL;
10282 tg3_readphy(tp, MII_BMCR, &bmcr);
10283 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10284 ((bmcr & BMCR_ANENABLE) ||
f07e9af3 10285 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
b02fd9e3
MC
10286 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10287 BMCR_ANENABLE);
10288 r = 0;
10289 }
10290 spin_unlock_bh(&tp->lock);
1da177e4 10291 }
6aa20a22 10292
1da177e4
LT
10293 return r;
10294}
6aa20a22 10295
1da177e4
LT
10296static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10297{
10298 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10299
2c49a44d 10300 ering->rx_max_pending = tp->rx_std_ring_mask;
1da177e4 10301 ering->rx_mini_max_pending = 0;
63c3a66f 10302 if (tg3_flag(tp, JUMBO_RING_ENABLE))
2c49a44d 10303 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
4f81c32b
MC
10304 else
10305 ering->rx_jumbo_max_pending = 0;
10306
10307 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
10308
10309 ering->rx_pending = tp->rx_pending;
10310 ering->rx_mini_pending = 0;
63c3a66f 10311 if (tg3_flag(tp, JUMBO_RING_ENABLE))
4f81c32b
MC
10312 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10313 else
10314 ering->rx_jumbo_pending = 0;
10315
f3f3f27e 10316 ering->tx_pending = tp->napi[0].tx_pending;
1da177e4 10317}
6aa20a22 10318
1da177e4
LT
10319static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10320{
10321 struct tg3 *tp = netdev_priv(dev);
646c9edd 10322 int i, irq_sync = 0, err = 0;
6aa20a22 10323
2c49a44d
MC
10324 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10325 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
bc3a9254
MC
10326 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10327 (ering->tx_pending <= MAX_SKB_FRAGS) ||
63c3a66f 10328 (tg3_flag(tp, TSO_BUG) &&
bc3a9254 10329 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 10330 return -EINVAL;
6aa20a22 10331
bbe832c0 10332 if (netif_running(dev)) {
b02fd9e3 10333 tg3_phy_stop(tp);
1da177e4 10334 tg3_netif_stop(tp);
bbe832c0
MC
10335 irq_sync = 1;
10336 }
1da177e4 10337
bbe832c0 10338 tg3_full_lock(tp, irq_sync);
6aa20a22 10339
1da177e4
LT
10340 tp->rx_pending = ering->rx_pending;
10341
63c3a66f 10342 if (tg3_flag(tp, MAX_RXPEND_64) &&
1da177e4
LT
10343 tp->rx_pending > 63)
10344 tp->rx_pending = 63;
10345 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
646c9edd 10346
6fd45cb8 10347 for (i = 0; i < tp->irq_max; i++)
646c9edd 10348 tp->napi[i].tx_pending = ering->tx_pending;
1da177e4
LT
10349
10350 if (netif_running(dev)) {
944d980e 10351 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
10352 err = tg3_restart_hw(tp, 1);
10353 if (!err)
10354 tg3_netif_start(tp);
1da177e4
LT
10355 }
10356
f47c11ee 10357 tg3_full_unlock(tp);
6aa20a22 10358
b02fd9e3
MC
10359 if (irq_sync && !err)
10360 tg3_phy_start(tp);
10361
b9ec6c1b 10362 return err;
1da177e4 10363}
6aa20a22 10364
1da177e4
LT
10365static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10366{
10367 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10368
63c3a66f 10369 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
8d018621 10370
e18ce346 10371 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
8d018621
MC
10372 epause->rx_pause = 1;
10373 else
10374 epause->rx_pause = 0;
10375
e18ce346 10376 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
8d018621
MC
10377 epause->tx_pause = 1;
10378 else
10379 epause->tx_pause = 0;
1da177e4 10380}
6aa20a22 10381
1da177e4
LT
10382static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10383{
10384 struct tg3 *tp = netdev_priv(dev);
b02fd9e3 10385 int err = 0;
6aa20a22 10386
63c3a66f 10387 if (tg3_flag(tp, USE_PHYLIB)) {
2712168f
MC
10388 u32 newadv;
10389 struct phy_device *phydev;
1da177e4 10390
2712168f 10391 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
f47c11ee 10392
2712168f
MC
10393 if (!(phydev->supported & SUPPORTED_Pause) ||
10394 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
2259dca3 10395 (epause->rx_pause != epause->tx_pause)))
2712168f 10396 return -EINVAL;
1da177e4 10397
2712168f
MC
10398 tp->link_config.flowctrl = 0;
10399 if (epause->rx_pause) {
10400 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10401
10402 if (epause->tx_pause) {
10403 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10404 newadv = ADVERTISED_Pause;
b02fd9e3 10405 } else
2712168f
MC
10406 newadv = ADVERTISED_Pause |
10407 ADVERTISED_Asym_Pause;
10408 } else if (epause->tx_pause) {
10409 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10410 newadv = ADVERTISED_Asym_Pause;
10411 } else
10412 newadv = 0;
10413
10414 if (epause->autoneg)
63c3a66f 10415 tg3_flag_set(tp, PAUSE_AUTONEG);
2712168f 10416 else
63c3a66f 10417 tg3_flag_clear(tp, PAUSE_AUTONEG);
2712168f 10418
f07e9af3 10419 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2712168f
MC
10420 u32 oldadv = phydev->advertising &
10421 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10422 if (oldadv != newadv) {
10423 phydev->advertising &=
10424 ~(ADVERTISED_Pause |
10425 ADVERTISED_Asym_Pause);
10426 phydev->advertising |= newadv;
10427 if (phydev->autoneg) {
10428 /*
10429 * Always renegotiate the link to
10430 * inform our link partner of our
10431 * flow control settings, even if the
10432 * flow control is forced. Let
10433 * tg3_adjust_link() do the final
10434 * flow control setup.
10435 */
10436 return phy_start_aneg(phydev);
b02fd9e3 10437 }
b02fd9e3 10438 }
b02fd9e3 10439
2712168f 10440 if (!epause->autoneg)
b02fd9e3 10441 tg3_setup_flow_control(tp, 0, 0);
2712168f
MC
10442 } else {
10443 tp->link_config.orig_advertising &=
10444 ~(ADVERTISED_Pause |
10445 ADVERTISED_Asym_Pause);
10446 tp->link_config.orig_advertising |= newadv;
b02fd9e3
MC
10447 }
10448 } else {
10449 int irq_sync = 0;
10450
10451 if (netif_running(dev)) {
10452 tg3_netif_stop(tp);
10453 irq_sync = 1;
10454 }
10455
10456 tg3_full_lock(tp, irq_sync);
10457
10458 if (epause->autoneg)
63c3a66f 10459 tg3_flag_set(tp, PAUSE_AUTONEG);
b02fd9e3 10460 else
63c3a66f 10461 tg3_flag_clear(tp, PAUSE_AUTONEG);
b02fd9e3 10462 if (epause->rx_pause)
e18ce346 10463 tp->link_config.flowctrl |= FLOW_CTRL_RX;
b02fd9e3 10464 else
e18ce346 10465 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
b02fd9e3 10466 if (epause->tx_pause)
e18ce346 10467 tp->link_config.flowctrl |= FLOW_CTRL_TX;
b02fd9e3 10468 else
e18ce346 10469 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
b02fd9e3
MC
10470
10471 if (netif_running(dev)) {
10472 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10473 err = tg3_restart_hw(tp, 1);
10474 if (!err)
10475 tg3_netif_start(tp);
10476 }
10477
10478 tg3_full_unlock(tp);
10479 }
6aa20a22 10480
b9ec6c1b 10481 return err;
1da177e4 10482}
6aa20a22 10483
de6f31eb 10484static int tg3_get_sset_count(struct net_device *dev, int sset)
1da177e4 10485{
b9f2c044
JG
10486 switch (sset) {
10487 case ETH_SS_TEST:
10488 return TG3_NUM_TEST;
10489 case ETH_SS_STATS:
10490 return TG3_NUM_STATS;
10491 default:
10492 return -EOPNOTSUPP;
10493 }
4cafd3f5
MC
10494}
10495
de6f31eb 10496static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1da177e4
LT
10497{
10498 switch (stringset) {
10499 case ETH_SS_STATS:
10500 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10501 break;
4cafd3f5
MC
10502 case ETH_SS_TEST:
10503 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10504 break;
1da177e4
LT
10505 default:
10506 WARN_ON(1); /* we need a WARN() */
10507 break;
10508 }
10509}
10510
81b8709c 10511static int tg3_set_phys_id(struct net_device *dev,
10512 enum ethtool_phys_id_state state)
4009a93d
MC
10513{
10514 struct tg3 *tp = netdev_priv(dev);
4009a93d
MC
10515
10516 if (!netif_running(tp->dev))
10517 return -EAGAIN;
10518
81b8709c 10519 switch (state) {
10520 case ETHTOOL_ID_ACTIVE:
fce55922 10521 return 1; /* cycle on/off once per second */
4009a93d 10522
81b8709c 10523 case ETHTOOL_ID_ON:
10524 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10525 LED_CTRL_1000MBPS_ON |
10526 LED_CTRL_100MBPS_ON |
10527 LED_CTRL_10MBPS_ON |
10528 LED_CTRL_TRAFFIC_OVERRIDE |
10529 LED_CTRL_TRAFFIC_BLINK |
10530 LED_CTRL_TRAFFIC_LED);
10531 break;
6aa20a22 10532
81b8709c 10533 case ETHTOOL_ID_OFF:
10534 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10535 LED_CTRL_TRAFFIC_OVERRIDE);
10536 break;
4009a93d 10537
81b8709c 10538 case ETHTOOL_ID_INACTIVE:
10539 tw32(MAC_LED_CTRL, tp->led_ctrl);
10540 break;
4009a93d 10541 }
81b8709c 10542
4009a93d
MC
10543 return 0;
10544}
10545
de6f31eb 10546static void tg3_get_ethtool_stats(struct net_device *dev,
1da177e4
LT
10547 struct ethtool_stats *estats, u64 *tmp_stats)
10548{
10549 struct tg3 *tp = netdev_priv(dev);
10550 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10551}
10552
c3e94500
MC
10553static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10554{
10555 int i;
10556 __be32 *buf;
10557 u32 offset = 0, len = 0;
10558 u32 magic, val;
10559
63c3a66f 10560 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
c3e94500
MC
10561 return NULL;
10562
10563 if (magic == TG3_EEPROM_MAGIC) {
10564 for (offset = TG3_NVM_DIR_START;
10565 offset < TG3_NVM_DIR_END;
10566 offset += TG3_NVM_DIRENT_SIZE) {
10567 if (tg3_nvram_read(tp, offset, &val))
10568 return NULL;
10569
10570 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10571 TG3_NVM_DIRTYPE_EXTVPD)
10572 break;
10573 }
10574
10575 if (offset != TG3_NVM_DIR_END) {
10576 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10577 if (tg3_nvram_read(tp, offset + 4, &offset))
10578 return NULL;
10579
10580 offset = tg3_nvram_logical_addr(tp, offset);
10581 }
10582 }
10583
10584 if (!offset || !len) {
10585 offset = TG3_NVM_VPD_OFF;
10586 len = TG3_NVM_VPD_LEN;
10587 }
10588
10589 buf = kmalloc(len, GFP_KERNEL);
10590 if (buf == NULL)
10591 return NULL;
10592
10593 if (magic == TG3_EEPROM_MAGIC) {
10594 for (i = 0; i < len; i += 4) {
10595 /* The data is in little-endian format in NVRAM.
10596 * Use the big-endian read routines to preserve
10597 * the byte order as it exists in NVRAM.
10598 */
10599 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10600 goto error;
10601 }
10602 } else {
10603 u8 *ptr;
10604 ssize_t cnt;
10605 unsigned int pos = 0;
10606
10607 ptr = (u8 *)&buf[0];
10608 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10609 cnt = pci_read_vpd(tp->pdev, pos,
10610 len - pos, ptr);
10611 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10612 cnt = 0;
10613 else if (cnt < 0)
10614 goto error;
10615 }
10616 if (pos != len)
10617 goto error;
10618 }
10619
10620 return buf;
10621
10622error:
10623 kfree(buf);
10624 return NULL;
10625}
10626
566f86ad 10627#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
10628#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10629#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10630#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
727a6d9f
MC
10631#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10632#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10633#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x4c
b16250e3
MC
10634#define NVRAM_SELFBOOT_HW_SIZE 0x20
10635#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
10636
10637static int tg3_test_nvram(struct tg3 *tp)
10638{
b9fc7dc5 10639 u32 csum, magic;
a9dc529d 10640 __be32 *buf;
ab0049b4 10641 int i, j, k, err = 0, size;
566f86ad 10642
63c3a66f 10643 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
10644 return 0;
10645
e4f34110 10646 if (tg3_nvram_read(tp, 0, &magic) != 0)
1b27777a
MC
10647 return -EIO;
10648
1b27777a
MC
10649 if (magic == TG3_EEPROM_MAGIC)
10650 size = NVRAM_TEST_SIZE;
b16250e3 10651 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
10652 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10653 TG3_EEPROM_SB_FORMAT_1) {
10654 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10655 case TG3_EEPROM_SB_REVISION_0:
10656 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10657 break;
10658 case TG3_EEPROM_SB_REVISION_2:
10659 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10660 break;
10661 case TG3_EEPROM_SB_REVISION_3:
10662 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10663 break;
727a6d9f
MC
10664 case TG3_EEPROM_SB_REVISION_4:
10665 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10666 break;
10667 case TG3_EEPROM_SB_REVISION_5:
10668 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10669 break;
10670 case TG3_EEPROM_SB_REVISION_6:
10671 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10672 break;
a5767dec 10673 default:
727a6d9f 10674 return -EIO;
a5767dec
MC
10675 }
10676 } else
1b27777a 10677 return 0;
b16250e3
MC
10678 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10679 size = NVRAM_SELFBOOT_HW_SIZE;
10680 else
1b27777a
MC
10681 return -EIO;
10682
10683 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
10684 if (buf == NULL)
10685 return -ENOMEM;
10686
1b27777a
MC
10687 err = -EIO;
10688 for (i = 0, j = 0; i < size; i += 4, j++) {
a9dc529d
MC
10689 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10690 if (err)
566f86ad 10691 break;
566f86ad 10692 }
1b27777a 10693 if (i < size)
566f86ad
MC
10694 goto out;
10695
1b27777a 10696 /* Selfboot format */
a9dc529d 10697 magic = be32_to_cpu(buf[0]);
b9fc7dc5 10698 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 10699 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
10700 u8 *buf8 = (u8 *) buf, csum8 = 0;
10701
b9fc7dc5 10702 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
10703 TG3_EEPROM_SB_REVISION_2) {
10704 /* For rev 2, the csum doesn't include the MBA. */
10705 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10706 csum8 += buf8[i];
10707 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10708 csum8 += buf8[i];
10709 } else {
10710 for (i = 0; i < size; i++)
10711 csum8 += buf8[i];
10712 }
1b27777a 10713
ad96b485
AB
10714 if (csum8 == 0) {
10715 err = 0;
10716 goto out;
10717 }
10718
10719 err = -EIO;
10720 goto out;
1b27777a 10721 }
566f86ad 10722
b9fc7dc5 10723 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
10724 TG3_EEPROM_MAGIC_HW) {
10725 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
a9dc529d 10726 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
b16250e3 10727 u8 *buf8 = (u8 *) buf;
b16250e3
MC
10728
10729 /* Separate the parity bits and the data bytes. */
10730 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10731 if ((i == 0) || (i == 8)) {
10732 int l;
10733 u8 msk;
10734
10735 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10736 parity[k++] = buf8[i] & msk;
10737 i++;
859a5887 10738 } else if (i == 16) {
b16250e3
MC
10739 int l;
10740 u8 msk;
10741
10742 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10743 parity[k++] = buf8[i] & msk;
10744 i++;
10745
10746 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10747 parity[k++] = buf8[i] & msk;
10748 i++;
10749 }
10750 data[j++] = buf8[i];
10751 }
10752
10753 err = -EIO;
10754 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10755 u8 hw8 = hweight8(data[i]);
10756
10757 if ((hw8 & 0x1) && parity[i])
10758 goto out;
10759 else if (!(hw8 & 0x1) && !parity[i])
10760 goto out;
10761 }
10762 err = 0;
10763 goto out;
10764 }
10765
01c3a392
MC
10766 err = -EIO;
10767
566f86ad
MC
10768 /* Bootstrap checksum at offset 0x10 */
10769 csum = calc_crc((unsigned char *) buf, 0x10);
01c3a392 10770 if (csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
10771 goto out;
10772
10773 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10774 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
01c3a392 10775 if (csum != le32_to_cpu(buf[0xfc/4]))
a9dc529d 10776 goto out;
566f86ad 10777
c3e94500
MC
10778 kfree(buf);
10779
10780 buf = tg3_vpd_readblock(tp);
10781 if (!buf)
10782 return -ENOMEM;
d4894f3e
MC
10783
10784 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10785 PCI_VPD_LRDT_RO_DATA);
10786 if (i > 0) {
10787 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10788 if (j < 0)
10789 goto out;
10790
10791 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10792 goto out;
10793
10794 i += PCI_VPD_LRDT_TAG_SIZE;
10795 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10796 PCI_VPD_RO_KEYWORD_CHKSUM);
10797 if (j > 0) {
10798 u8 csum8 = 0;
10799
10800 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10801
10802 for (i = 0; i <= j; i++)
10803 csum8 += ((u8 *)buf)[i];
10804
10805 if (csum8)
10806 goto out;
10807 }
10808 }
10809
566f86ad
MC
10810 err = 0;
10811
10812out:
10813 kfree(buf);
10814 return err;
10815}
10816
ca43007a
MC
10817#define TG3_SERDES_TIMEOUT_SEC 2
10818#define TG3_COPPER_TIMEOUT_SEC 6
10819
10820static int tg3_test_link(struct tg3 *tp)
10821{
10822 int i, max;
10823
10824 if (!netif_running(tp->dev))
10825 return -ENODEV;
10826
f07e9af3 10827 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
ca43007a
MC
10828 max = TG3_SERDES_TIMEOUT_SEC;
10829 else
10830 max = TG3_COPPER_TIMEOUT_SEC;
10831
10832 for (i = 0; i < max; i++) {
10833 if (netif_carrier_ok(tp->dev))
10834 return 0;
10835
10836 if (msleep_interruptible(1000))
10837 break;
10838 }
10839
10840 return -EIO;
10841}
10842
a71116d1 10843/* Only test the commonly used registers */
30ca3e37 10844static int tg3_test_registers(struct tg3 *tp)
a71116d1 10845{
b16250e3 10846 int i, is_5705, is_5750;
a71116d1
MC
10847 u32 offset, read_mask, write_mask, val, save_val, read_val;
10848 static struct {
10849 u16 offset;
10850 u16 flags;
10851#define TG3_FL_5705 0x1
10852#define TG3_FL_NOT_5705 0x2
10853#define TG3_FL_NOT_5788 0x4
b16250e3 10854#define TG3_FL_NOT_5750 0x8
a71116d1
MC
10855 u32 read_mask;
10856 u32 write_mask;
10857 } reg_tbl[] = {
10858 /* MAC Control Registers */
10859 { MAC_MODE, TG3_FL_NOT_5705,
10860 0x00000000, 0x00ef6f8c },
10861 { MAC_MODE, TG3_FL_5705,
10862 0x00000000, 0x01ef6b8c },
10863 { MAC_STATUS, TG3_FL_NOT_5705,
10864 0x03800107, 0x00000000 },
10865 { MAC_STATUS, TG3_FL_5705,
10866 0x03800100, 0x00000000 },
10867 { MAC_ADDR_0_HIGH, 0x0000,
10868 0x00000000, 0x0000ffff },
10869 { MAC_ADDR_0_LOW, 0x0000,
c6cdf436 10870 0x00000000, 0xffffffff },
a71116d1
MC
10871 { MAC_RX_MTU_SIZE, 0x0000,
10872 0x00000000, 0x0000ffff },
10873 { MAC_TX_MODE, 0x0000,
10874 0x00000000, 0x00000070 },
10875 { MAC_TX_LENGTHS, 0x0000,
10876 0x00000000, 0x00003fff },
10877 { MAC_RX_MODE, TG3_FL_NOT_5705,
10878 0x00000000, 0x000007fc },
10879 { MAC_RX_MODE, TG3_FL_5705,
10880 0x00000000, 0x000007dc },
10881 { MAC_HASH_REG_0, 0x0000,
10882 0x00000000, 0xffffffff },
10883 { MAC_HASH_REG_1, 0x0000,
10884 0x00000000, 0xffffffff },
10885 { MAC_HASH_REG_2, 0x0000,
10886 0x00000000, 0xffffffff },
10887 { MAC_HASH_REG_3, 0x0000,
10888 0x00000000, 0xffffffff },
10889
10890 /* Receive Data and Receive BD Initiator Control Registers. */
10891 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10892 0x00000000, 0xffffffff },
10893 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10894 0x00000000, 0xffffffff },
10895 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10896 0x00000000, 0x00000003 },
10897 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10898 0x00000000, 0xffffffff },
10899 { RCVDBDI_STD_BD+0, 0x0000,
10900 0x00000000, 0xffffffff },
10901 { RCVDBDI_STD_BD+4, 0x0000,
10902 0x00000000, 0xffffffff },
10903 { RCVDBDI_STD_BD+8, 0x0000,
10904 0x00000000, 0xffff0002 },
10905 { RCVDBDI_STD_BD+0xc, 0x0000,
10906 0x00000000, 0xffffffff },
6aa20a22 10907
a71116d1
MC
10908 /* Receive BD Initiator Control Registers. */
10909 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10910 0x00000000, 0xffffffff },
10911 { RCVBDI_STD_THRESH, TG3_FL_5705,
10912 0x00000000, 0x000003ff },
10913 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10914 0x00000000, 0xffffffff },
6aa20a22 10915
a71116d1
MC
10916 /* Host Coalescing Control Registers. */
10917 { HOSTCC_MODE, TG3_FL_NOT_5705,
10918 0x00000000, 0x00000004 },
10919 { HOSTCC_MODE, TG3_FL_5705,
10920 0x00000000, 0x000000f6 },
10921 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10922 0x00000000, 0xffffffff },
10923 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10924 0x00000000, 0x000003ff },
10925 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10926 0x00000000, 0xffffffff },
10927 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10928 0x00000000, 0x000003ff },
10929 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10930 0x00000000, 0xffffffff },
10931 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10932 0x00000000, 0x000000ff },
10933 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10934 0x00000000, 0xffffffff },
10935 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10936 0x00000000, 0x000000ff },
10937 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10938 0x00000000, 0xffffffff },
10939 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10940 0x00000000, 0xffffffff },
10941 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10942 0x00000000, 0xffffffff },
10943 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10944 0x00000000, 0x000000ff },
10945 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10946 0x00000000, 0xffffffff },
10947 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10948 0x00000000, 0x000000ff },
10949 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10950 0x00000000, 0xffffffff },
10951 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10952 0x00000000, 0xffffffff },
10953 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10954 0x00000000, 0xffffffff },
10955 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10956 0x00000000, 0xffffffff },
10957 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10958 0x00000000, 0xffffffff },
10959 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10960 0xffffffff, 0x00000000 },
10961 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10962 0xffffffff, 0x00000000 },
10963
10964 /* Buffer Manager Control Registers. */
b16250e3 10965 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 10966 0x00000000, 0x007fff80 },
b16250e3 10967 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
10968 0x00000000, 0x007fffff },
10969 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10970 0x00000000, 0x0000003f },
10971 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10972 0x00000000, 0x000001ff },
10973 { BUFMGR_MB_HIGH_WATER, 0x0000,
10974 0x00000000, 0x000001ff },
10975 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10976 0xffffffff, 0x00000000 },
10977 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10978 0xffffffff, 0x00000000 },
6aa20a22 10979
a71116d1
MC
10980 /* Mailbox Registers */
10981 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10982 0x00000000, 0x000001ff },
10983 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10984 0x00000000, 0x000001ff },
10985 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10986 0x00000000, 0x000007ff },
10987 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10988 0x00000000, 0x000001ff },
10989
10990 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10991 };
10992
b16250e3 10993 is_5705 = is_5750 = 0;
63c3a66f 10994 if (tg3_flag(tp, 5705_PLUS)) {
a71116d1 10995 is_5705 = 1;
63c3a66f 10996 if (tg3_flag(tp, 5750_PLUS))
b16250e3
MC
10997 is_5750 = 1;
10998 }
a71116d1
MC
10999
11000 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11001 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11002 continue;
11003
11004 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11005 continue;
11006
63c3a66f 11007 if (tg3_flag(tp, IS_5788) &&
a71116d1
MC
11008 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11009 continue;
11010
b16250e3
MC
11011 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11012 continue;
11013
a71116d1
MC
11014 offset = (u32) reg_tbl[i].offset;
11015 read_mask = reg_tbl[i].read_mask;
11016 write_mask = reg_tbl[i].write_mask;
11017
11018 /* Save the original register content */
11019 save_val = tr32(offset);
11020
11021 /* Determine the read-only value. */
11022 read_val = save_val & read_mask;
11023
11024 /* Write zero to the register, then make sure the read-only bits
11025 * are not changed and the read/write bits are all zeros.
11026 */
11027 tw32(offset, 0);
11028
11029 val = tr32(offset);
11030
11031 /* Test the read-only and read/write bits. */
11032 if (((val & read_mask) != read_val) || (val & write_mask))
11033 goto out;
11034
11035 /* Write ones to all the bits defined by RdMask and WrMask, then
11036 * make sure the read-only bits are not changed and the
11037 * read/write bits are all ones.
11038 */
11039 tw32(offset, read_mask | write_mask);
11040
11041 val = tr32(offset);
11042
11043 /* Test the read-only bits. */
11044 if ((val & read_mask) != read_val)
11045 goto out;
11046
11047 /* Test the read/write bits. */
11048 if ((val & write_mask) != write_mask)
11049 goto out;
11050
11051 tw32(offset, save_val);
11052 }
11053
11054 return 0;
11055
11056out:
9f88f29f 11057 if (netif_msg_hw(tp))
2445e461
MC
11058 netdev_err(tp->dev,
11059 "Register test failed at offset %x\n", offset);
a71116d1
MC
11060 tw32(offset, save_val);
11061 return -EIO;
11062}
11063
7942e1db
MC
11064static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11065{
f71e1309 11066 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
11067 int i;
11068 u32 j;
11069
e9edda69 11070 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
11071 for (j = 0; j < len; j += 4) {
11072 u32 val;
11073
11074 tg3_write_mem(tp, offset + j, test_pattern[i]);
11075 tg3_read_mem(tp, offset + j, &val);
11076 if (val != test_pattern[i])
11077 return -EIO;
11078 }
11079 }
11080 return 0;
11081}
11082
11083static int tg3_test_memory(struct tg3 *tp)
11084{
11085 static struct mem_entry {
11086 u32 offset;
11087 u32 len;
11088 } mem_tbl_570x[] = {
38690194 11089 { 0x00000000, 0x00b50},
7942e1db
MC
11090 { 0x00002000, 0x1c000},
11091 { 0xffffffff, 0x00000}
11092 }, mem_tbl_5705[] = {
11093 { 0x00000100, 0x0000c},
11094 { 0x00000200, 0x00008},
7942e1db
MC
11095 { 0x00004000, 0x00800},
11096 { 0x00006000, 0x01000},
11097 { 0x00008000, 0x02000},
11098 { 0x00010000, 0x0e000},
11099 { 0xffffffff, 0x00000}
79f4d13a
MC
11100 }, mem_tbl_5755[] = {
11101 { 0x00000200, 0x00008},
11102 { 0x00004000, 0x00800},
11103 { 0x00006000, 0x00800},
11104 { 0x00008000, 0x02000},
11105 { 0x00010000, 0x0c000},
11106 { 0xffffffff, 0x00000}
b16250e3
MC
11107 }, mem_tbl_5906[] = {
11108 { 0x00000200, 0x00008},
11109 { 0x00004000, 0x00400},
11110 { 0x00006000, 0x00400},
11111 { 0x00008000, 0x01000},
11112 { 0x00010000, 0x01000},
11113 { 0xffffffff, 0x00000}
8b5a6c42
MC
11114 }, mem_tbl_5717[] = {
11115 { 0x00000200, 0x00008},
11116 { 0x00010000, 0x0a000},
11117 { 0x00020000, 0x13c00},
11118 { 0xffffffff, 0x00000}
11119 }, mem_tbl_57765[] = {
11120 { 0x00000200, 0x00008},
11121 { 0x00004000, 0x00800},
11122 { 0x00006000, 0x09800},
11123 { 0x00010000, 0x0a000},
11124 { 0xffffffff, 0x00000}
7942e1db
MC
11125 };
11126 struct mem_entry *mem_tbl;
11127 int err = 0;
11128 int i;
11129
63c3a66f 11130 if (tg3_flag(tp, 5717_PLUS))
8b5a6c42
MC
11131 mem_tbl = mem_tbl_5717;
11132 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11133 mem_tbl = mem_tbl_57765;
63c3a66f 11134 else if (tg3_flag(tp, 5755_PLUS))
321d32a0
MC
11135 mem_tbl = mem_tbl_5755;
11136 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11137 mem_tbl = mem_tbl_5906;
63c3a66f 11138 else if (tg3_flag(tp, 5705_PLUS))
321d32a0
MC
11139 mem_tbl = mem_tbl_5705;
11140 else
7942e1db
MC
11141 mem_tbl = mem_tbl_570x;
11142
11143 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
be98da6a
MC
11144 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11145 if (err)
7942e1db
MC
11146 break;
11147 }
6aa20a22 11148
7942e1db
MC
11149 return err;
11150}
11151
9f40dead
MC
11152#define TG3_MAC_LOOPBACK 0
11153#define TG3_PHY_LOOPBACK 1
bb158d69
MC
11154#define TG3_TSO_LOOPBACK 2
11155
11156#define TG3_TSO_MSS 500
11157
11158#define TG3_TSO_IP_HDR_LEN 20
11159#define TG3_TSO_TCP_HDR_LEN 20
11160#define TG3_TSO_TCP_OPT_LEN 12
11161
11162static const u8 tg3_tso_header[] = {
111630x08, 0x00,
111640x45, 0x00, 0x00, 0x00,
111650x00, 0x00, 0x40, 0x00,
111660x40, 0x06, 0x00, 0x00,
111670x0a, 0x00, 0x00, 0x01,
111680x0a, 0x00, 0x00, 0x02,
111690x0d, 0x00, 0xe0, 0x00,
111700x00, 0x00, 0x01, 0x00,
111710x00, 0x00, 0x02, 0x00,
111720x80, 0x10, 0x10, 0x00,
111730x14, 0x09, 0x00, 0x00,
111740x01, 0x01, 0x08, 0x0a,
111750x11, 0x11, 0x11, 0x11,
111760x11, 0x11, 0x11, 0x11,
11177};
9f40dead 11178
4852a861 11179static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
c76949a6 11180{
9f40dead 11181 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
bb158d69 11182 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
c76949a6
MC
11183 struct sk_buff *skb, *rx_skb;
11184 u8 *tx_data;
11185 dma_addr_t map;
11186 int num_pkts, tx_len, rx_len, i, err;
11187 struct tg3_rx_buffer_desc *desc;
898a56f8 11188 struct tg3_napi *tnapi, *rnapi;
8fea32b9 11189 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
c76949a6 11190
c8873405
MC
11191 tnapi = &tp->napi[0];
11192 rnapi = &tp->napi[0];
0c1d0e2b 11193 if (tp->irq_cnt > 1) {
63c3a66f 11194 if (tg3_flag(tp, ENABLE_RSS))
1da85aa3 11195 rnapi = &tp->napi[1];
63c3a66f 11196 if (tg3_flag(tp, ENABLE_TSS))
c8873405 11197 tnapi = &tp->napi[1];
0c1d0e2b 11198 }
fd2ce37f 11199 coal_now = tnapi->coal_now | rnapi->coal_now;
898a56f8 11200
9f40dead 11201 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
11202 /* HW errata - mac loopback fails in some cases on 5780.
11203 * Normal traffic and PHY loopback are not affected by
aba49f24
MC
11204 * errata. Also, the MAC loopback test is deprecated for
11205 * all newer ASIC revisions.
c94e3941 11206 */
aba49f24 11207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
63c3a66f 11208 tg3_flag(tp, CPMU_PRESENT))
c94e3941
MC
11209 return 0;
11210
49692ca1
MC
11211 mac_mode = tp->mac_mode &
11212 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11213 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
63c3a66f 11214 if (!tg3_flag(tp, 5705_PLUS))
e8f3f6ca 11215 mac_mode |= MAC_MODE_LINK_POLARITY;
f07e9af3 11216 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3f7045c1
MC
11217 mac_mode |= MAC_MODE_PORT_MODE_MII;
11218 else
11219 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead 11220 tw32(MAC_MODE, mac_mode);
bb158d69 11221 } else {
f07e9af3 11222 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd 11223 tg3_phy_fet_toggle_apd(tp, false);
5d64ad34
MC
11224 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11225 } else
11226 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 11227
9ef8ca99
MC
11228 tg3_phy_toggle_automdix(tp, 0);
11229
3f7045c1 11230 tg3_writephy(tp, MII_BMCR, val);
c94e3941 11231 udelay(40);
5d64ad34 11232
49692ca1
MC
11233 mac_mode = tp->mac_mode &
11234 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
f07e9af3 11235 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1061b7c5
MC
11236 tg3_writephy(tp, MII_TG3_FET_PTEST,
11237 MII_TG3_FET_PTEST_FRC_TX_LINK |
11238 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11239 /* The write needs to be flushed for the AC131 */
11240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11241 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
5d64ad34
MC
11242 mac_mode |= MAC_MODE_PORT_MODE_MII;
11243 } else
11244 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 11245
c94e3941 11246 /* reset to prevent losing 1st rx packet intermittently */
f07e9af3 11247 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
c94e3941
MC
11248 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11249 udelay(10);
11250 tw32_f(MAC_RX_MODE, tp->rx_mode);
11251 }
e8f3f6ca 11252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
79eb6904
MC
11253 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11254 if (masked_phy_id == TG3_PHY_ID_BCM5401)
e8f3f6ca 11255 mac_mode &= ~MAC_MODE_LINK_POLARITY;
79eb6904 11256 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
e8f3f6ca 11257 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
11258 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11259 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11260 }
9f40dead 11261 tw32(MAC_MODE, mac_mode);
49692ca1
MC
11262
11263 /* Wait for link */
11264 for (i = 0; i < 100; i++) {
11265 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11266 break;
11267 mdelay(1);
11268 }
859a5887 11269 }
c76949a6
MC
11270
11271 err = -EIO;
11272
4852a861 11273 tx_len = pktsz;
a20e9c62 11274 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
11275 if (!skb)
11276 return -ENOMEM;
11277
c76949a6
MC
11278 tx_data = skb_put(skb, tx_len);
11279 memcpy(tx_data, tp->dev->dev_addr, 6);
11280 memset(tx_data + 6, 0x0, 8);
11281
4852a861 11282 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
c76949a6 11283
bb158d69
MC
11284 if (loopback_mode == TG3_TSO_LOOPBACK) {
11285 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11286
11287 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11288 TG3_TSO_TCP_OPT_LEN;
11289
11290 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11291 sizeof(tg3_tso_header));
11292 mss = TG3_TSO_MSS;
11293
11294 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11295 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11296
11297 /* Set the total length field in the IP header */
11298 iph->tot_len = htons((u16)(mss + hdr_len));
11299
11300 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11301 TXD_FLAG_CPU_POST_DMA);
11302
63c3a66f
JP
11303 if (tg3_flag(tp, HW_TSO_1) ||
11304 tg3_flag(tp, HW_TSO_2) ||
11305 tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
11306 struct tcphdr *th;
11307 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11308 th = (struct tcphdr *)&tx_data[val];
11309 th->check = 0;
11310 } else
11311 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11312
63c3a66f 11313 if (tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
11314 mss |= (hdr_len & 0xc) << 12;
11315 if (hdr_len & 0x10)
11316 base_flags |= 0x00000010;
11317 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 11318 } else if (tg3_flag(tp, HW_TSO_2))
bb158d69 11319 mss |= hdr_len << 9;
63c3a66f 11320 else if (tg3_flag(tp, HW_TSO_1) ||
bb158d69
MC
11321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11322 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11323 } else {
11324 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11325 }
11326
11327 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11328 } else {
11329 num_pkts = 1;
11330 data_off = ETH_HLEN;
11331 }
11332
11333 for (i = data_off; i < tx_len; i++)
c76949a6
MC
11334 tx_data[i] = (u8) (i & 0xff);
11335
f4188d8a
AD
11336 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11337 if (pci_dma_mapping_error(tp->pdev, map)) {
a21771dd
MC
11338 dev_kfree_skb(skb);
11339 return -EIO;
11340 }
c76949a6
MC
11341
11342 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 11343 rnapi->coal_now);
c76949a6
MC
11344
11345 udelay(10);
11346
898a56f8 11347 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
c76949a6 11348
bb158d69
MC
11349 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11350 base_flags, (mss << 1) | 1);
c76949a6 11351
f3f3f27e 11352 tnapi->tx_prod++;
c76949a6 11353
f3f3f27e
MC
11354 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11355 tr32_mailbox(tnapi->prodmbox);
c76949a6
MC
11356
11357 udelay(10);
11358
303fc921
MC
11359 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11360 for (i = 0; i < 35; i++) {
c76949a6 11361 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 11362 coal_now);
c76949a6
MC
11363
11364 udelay(10);
11365
898a56f8
MC
11366 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11367 rx_idx = rnapi->hw_status->idx[0].rx_producer;
f3f3f27e 11368 if ((tx_idx == tnapi->tx_prod) &&
c76949a6
MC
11369 (rx_idx == (rx_start_idx + num_pkts)))
11370 break;
11371 }
11372
f4188d8a 11373 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
c76949a6
MC
11374 dev_kfree_skb(skb);
11375
f3f3f27e 11376 if (tx_idx != tnapi->tx_prod)
c76949a6
MC
11377 goto out;
11378
11379 if (rx_idx != rx_start_idx + num_pkts)
11380 goto out;
11381
bb158d69
MC
11382 val = data_off;
11383 while (rx_idx != rx_start_idx) {
11384 desc = &rnapi->rx_rcb[rx_start_idx++];
11385 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11386 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
c76949a6 11387
bb158d69
MC
11388 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11389 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11390 goto out;
c76949a6 11391
bb158d69
MC
11392 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11393 - ETH_FCS_LEN;
c76949a6 11394
bb158d69
MC
11395 if (loopback_mode != TG3_TSO_LOOPBACK) {
11396 if (rx_len != tx_len)
11397 goto out;
4852a861 11398
bb158d69
MC
11399 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11400 if (opaque_key != RXD_OPAQUE_RING_STD)
11401 goto out;
11402 } else {
11403 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11404 goto out;
11405 }
11406 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11407 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
54e0a67f 11408 >> RXD_TCPCSUM_SHIFT != 0xffff) {
4852a861 11409 goto out;
bb158d69 11410 }
4852a861 11411
bb158d69
MC
11412 if (opaque_key == RXD_OPAQUE_RING_STD) {
11413 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11414 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11415 mapping);
11416 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11417 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11418 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11419 mapping);
11420 } else
11421 goto out;
c76949a6 11422
bb158d69
MC
11423 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11424 PCI_DMA_FROMDEVICE);
c76949a6 11425
bb158d69
MC
11426 for (i = data_off; i < rx_len; i++, val++) {
11427 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11428 goto out;
11429 }
c76949a6 11430 }
bb158d69 11431
c76949a6 11432 err = 0;
6aa20a22 11433
c76949a6
MC
11434 /* tg3_free_rings will unmap and free the rx_skb */
11435out:
11436 return err;
11437}
11438
00c266b7
MC
11439#define TG3_STD_LOOPBACK_FAILED 1
11440#define TG3_JMB_LOOPBACK_FAILED 2
bb158d69 11441#define TG3_TSO_LOOPBACK_FAILED 4
00c266b7
MC
11442
11443#define TG3_MAC_LOOPBACK_SHIFT 0
11444#define TG3_PHY_LOOPBACK_SHIFT 4
bb158d69 11445#define TG3_LOOPBACK_FAILED 0x00000077
9f40dead
MC
11446
11447static int tg3_test_loopback(struct tg3 *tp)
11448{
11449 int err = 0;
ab789046 11450 u32 eee_cap, cpmuctrl = 0;
9f40dead
MC
11451
11452 if (!netif_running(tp->dev))
11453 return TG3_LOOPBACK_FAILED;
11454
ab789046
MC
11455 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11456 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11457
b9ec6c1b 11458 err = tg3_reset_hw(tp, 1);
ab789046
MC
11459 if (err) {
11460 err = TG3_LOOPBACK_FAILED;
11461 goto done;
11462 }
9f40dead 11463
63c3a66f 11464 if (tg3_flag(tp, ENABLE_RSS)) {
4a85f098
MC
11465 int i;
11466
11467 /* Reroute all rx packets to the 1st queue */
11468 for (i = MAC_RSS_INDIR_TBL_0;
11469 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11470 tw32(i, 0x0);
11471 }
11472
6833c043 11473 /* Turn off gphy autopowerdown. */
f07e9af3 11474 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
11475 tg3_phy_toggle_apd(tp, false);
11476
63c3a66f 11477 if (tg3_flag(tp, CPMU_PRESENT)) {
9936bcf6
MC
11478 int i;
11479 u32 status;
11480
11481 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11482
11483 /* Wait for up to 40 microseconds to acquire lock. */
11484 for (i = 0; i < 4; i++) {
11485 status = tr32(TG3_CPMU_MUTEX_GNT);
11486 if (status == CPMU_MUTEX_GNT_DRIVER)
11487 break;
11488 udelay(10);
11489 }
11490
ab789046
MC
11491 if (status != CPMU_MUTEX_GNT_DRIVER) {
11492 err = TG3_LOOPBACK_FAILED;
11493 goto done;
11494 }
9936bcf6 11495
b2a5c19c 11496 /* Turn off link-based power management. */
e875093c 11497 cpmuctrl = tr32(TG3_CPMU_CTRL);
109115e1
MC
11498 tw32(TG3_CPMU_CTRL,
11499 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11500 CPMU_CTRL_LINK_AWARE_MODE));
9936bcf6
MC
11501 }
11502
4852a861 11503 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
00c266b7 11504 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
9936bcf6 11505
63c3a66f 11506 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
4852a861 11507 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
00c266b7 11508 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
4852a861 11509
63c3a66f 11510 if (tg3_flag(tp, CPMU_PRESENT)) {
9936bcf6
MC
11511 tw32(TG3_CPMU_CTRL, cpmuctrl);
11512
11513 /* Release the mutex */
11514 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11515 }
11516
f07e9af3 11517 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
63c3a66f 11518 !tg3_flag(tp, USE_PHYLIB)) {
4852a861 11519 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
00c266b7
MC
11520 err |= TG3_STD_LOOPBACK_FAILED <<
11521 TG3_PHY_LOOPBACK_SHIFT;
63c3a66f 11522 if (tg3_flag(tp, TSO_CAPABLE) &&
bb158d69
MC
11523 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11524 err |= TG3_TSO_LOOPBACK_FAILED <<
11525 TG3_PHY_LOOPBACK_SHIFT;
63c3a66f 11526 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
4852a861 11527 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
00c266b7
MC
11528 err |= TG3_JMB_LOOPBACK_FAILED <<
11529 TG3_PHY_LOOPBACK_SHIFT;
9f40dead
MC
11530 }
11531
6833c043 11532 /* Re-enable gphy autopowerdown. */
f07e9af3 11533 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
11534 tg3_phy_toggle_apd(tp, true);
11535
ab789046
MC
11536done:
11537 tp->phy_flags |= eee_cap;
11538
9f40dead
MC
11539 return err;
11540}
11541
4cafd3f5
MC
11542static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11543 u64 *data)
11544{
566f86ad
MC
11545 struct tg3 *tp = netdev_priv(dev);
11546
bed9829f
MC
11547 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11548 tg3_power_up(tp)) {
11549 etest->flags |= ETH_TEST_FL_FAILED;
11550 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11551 return;
11552 }
bc1c7567 11553
566f86ad
MC
11554 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11555
11556 if (tg3_test_nvram(tp) != 0) {
11557 etest->flags |= ETH_TEST_FL_FAILED;
11558 data[0] = 1;
11559 }
ca43007a
MC
11560 if (tg3_test_link(tp) != 0) {
11561 etest->flags |= ETH_TEST_FL_FAILED;
11562 data[1] = 1;
11563 }
a71116d1 11564 if (etest->flags & ETH_TEST_FL_OFFLINE) {
b02fd9e3 11565 int err, err2 = 0, irq_sync = 0;
bbe832c0
MC
11566
11567 if (netif_running(dev)) {
b02fd9e3 11568 tg3_phy_stop(tp);
a71116d1 11569 tg3_netif_stop(tp);
bbe832c0
MC
11570 irq_sync = 1;
11571 }
a71116d1 11572
bbe832c0 11573 tg3_full_lock(tp, irq_sync);
a71116d1
MC
11574
11575 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 11576 err = tg3_nvram_lock(tp);
a71116d1 11577 tg3_halt_cpu(tp, RX_CPU_BASE);
63c3a66f 11578 if (!tg3_flag(tp, 5705_PLUS))
a71116d1 11579 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
11580 if (!err)
11581 tg3_nvram_unlock(tp);
a71116d1 11582
f07e9af3 11583 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
d9ab5ad1
MC
11584 tg3_phy_reset(tp);
11585
a71116d1
MC
11586 if (tg3_test_registers(tp) != 0) {
11587 etest->flags |= ETH_TEST_FL_FAILED;
11588 data[2] = 1;
11589 }
7942e1db
MC
11590 if (tg3_test_memory(tp) != 0) {
11591 etest->flags |= ETH_TEST_FL_FAILED;
11592 data[3] = 1;
11593 }
9f40dead 11594 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 11595 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 11596
f47c11ee
DM
11597 tg3_full_unlock(tp);
11598
d4bc3927
MC
11599 if (tg3_test_interrupt(tp) != 0) {
11600 etest->flags |= ETH_TEST_FL_FAILED;
11601 data[5] = 1;
11602 }
f47c11ee
DM
11603
11604 tg3_full_lock(tp, 0);
d4bc3927 11605
a71116d1
MC
11606 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11607 if (netif_running(dev)) {
63c3a66f 11608 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
11609 err2 = tg3_restart_hw(tp, 1);
11610 if (!err2)
b9ec6c1b 11611 tg3_netif_start(tp);
a71116d1 11612 }
f47c11ee
DM
11613
11614 tg3_full_unlock(tp);
b02fd9e3
MC
11615
11616 if (irq_sync && !err2)
11617 tg3_phy_start(tp);
a71116d1 11618 }
80096068 11619 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
c866b7ea 11620 tg3_power_down(tp);
bc1c7567 11621
4cafd3f5
MC
11622}
11623
1da177e4
LT
11624static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11625{
11626 struct mii_ioctl_data *data = if_mii(ifr);
11627 struct tg3 *tp = netdev_priv(dev);
11628 int err;
11629
63c3a66f 11630 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 11631 struct phy_device *phydev;
f07e9af3 11632 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11633 return -EAGAIN;
3f0e3ad7 11634 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
28b04113 11635 return phy_mii_ioctl(phydev, ifr, cmd);
b02fd9e3
MC
11636 }
11637
33f401ae 11638 switch (cmd) {
1da177e4 11639 case SIOCGMIIPHY:
882e9793 11640 data->phy_id = tp->phy_addr;
1da177e4
LT
11641
11642 /* fallthru */
11643 case SIOCGMIIREG: {
11644 u32 mii_regval;
11645
f07e9af3 11646 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
11647 break; /* We have no PHY */
11648
34eea5ac 11649 if (!netif_running(dev))
bc1c7567
MC
11650 return -EAGAIN;
11651
f47c11ee 11652 spin_lock_bh(&tp->lock);
1da177e4 11653 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 11654 spin_unlock_bh(&tp->lock);
1da177e4
LT
11655
11656 data->val_out = mii_regval;
11657
11658 return err;
11659 }
11660
11661 case SIOCSMIIREG:
f07e9af3 11662 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
11663 break; /* We have no PHY */
11664
34eea5ac 11665 if (!netif_running(dev))
bc1c7567
MC
11666 return -EAGAIN;
11667
f47c11ee 11668 spin_lock_bh(&tp->lock);
1da177e4 11669 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 11670 spin_unlock_bh(&tp->lock);
1da177e4
LT
11671
11672 return err;
11673
11674 default:
11675 /* do nothing */
11676 break;
11677 }
11678 return -EOPNOTSUPP;
11679}
11680
15f9850d
DM
11681static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11682{
11683 struct tg3 *tp = netdev_priv(dev);
11684
11685 memcpy(ec, &tp->coal, sizeof(*ec));
11686 return 0;
11687}
11688
d244c892
MC
11689static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11690{
11691 struct tg3 *tp = netdev_priv(dev);
11692 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11693 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11694
63c3a66f 11695 if (!tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
11696 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11697 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11698 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11699 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11700 }
11701
11702 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11703 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11704 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11705 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11706 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11707 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11708 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11709 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11710 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11711 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11712 return -EINVAL;
11713
11714 /* No rx interrupts will be generated if both are zero */
11715 if ((ec->rx_coalesce_usecs == 0) &&
11716 (ec->rx_max_coalesced_frames == 0))
11717 return -EINVAL;
11718
11719 /* No tx interrupts will be generated if both are zero */
11720 if ((ec->tx_coalesce_usecs == 0) &&
11721 (ec->tx_max_coalesced_frames == 0))
11722 return -EINVAL;
11723
11724 /* Only copy relevant parameters, ignore all others. */
11725 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11726 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11727 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11728 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11729 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11730 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11731 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11732 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11733 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11734
11735 if (netif_running(dev)) {
11736 tg3_full_lock(tp, 0);
11737 __tg3_set_coalesce(tp, &tp->coal);
11738 tg3_full_unlock(tp);
11739 }
11740 return 0;
11741}
11742
7282d491 11743static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
11744 .get_settings = tg3_get_settings,
11745 .set_settings = tg3_set_settings,
11746 .get_drvinfo = tg3_get_drvinfo,
11747 .get_regs_len = tg3_get_regs_len,
11748 .get_regs = tg3_get_regs,
11749 .get_wol = tg3_get_wol,
11750 .set_wol = tg3_set_wol,
11751 .get_msglevel = tg3_get_msglevel,
11752 .set_msglevel = tg3_set_msglevel,
11753 .nway_reset = tg3_nway_reset,
11754 .get_link = ethtool_op_get_link,
11755 .get_eeprom_len = tg3_get_eeprom_len,
11756 .get_eeprom = tg3_get_eeprom,
11757 .set_eeprom = tg3_set_eeprom,
11758 .get_ringparam = tg3_get_ringparam,
11759 .set_ringparam = tg3_set_ringparam,
11760 .get_pauseparam = tg3_get_pauseparam,
11761 .set_pauseparam = tg3_set_pauseparam,
4cafd3f5 11762 .self_test = tg3_self_test,
1da177e4 11763 .get_strings = tg3_get_strings,
81b8709c 11764 .set_phys_id = tg3_set_phys_id,
1da177e4 11765 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 11766 .get_coalesce = tg3_get_coalesce,
d244c892 11767 .set_coalesce = tg3_set_coalesce,
b9f2c044 11768 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
11769};
11770
11771static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11772{
1b27777a 11773 u32 cursize, val, magic;
1da177e4
LT
11774
11775 tp->nvram_size = EEPROM_CHIP_SIZE;
11776
e4f34110 11777 if (tg3_nvram_read(tp, 0, &magic) != 0)
1da177e4
LT
11778 return;
11779
b16250e3
MC
11780 if ((magic != TG3_EEPROM_MAGIC) &&
11781 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11782 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
11783 return;
11784
11785 /*
11786 * Size the chip by reading offsets at increasing powers of two.
11787 * When we encounter our validation signature, we know the addressing
11788 * has wrapped around, and thus have our chip size.
11789 */
1b27777a 11790 cursize = 0x10;
1da177e4
LT
11791
11792 while (cursize < tp->nvram_size) {
e4f34110 11793 if (tg3_nvram_read(tp, cursize, &val) != 0)
1da177e4
LT
11794 return;
11795
1820180b 11796 if (val == magic)
1da177e4
LT
11797 break;
11798
11799 cursize <<= 1;
11800 }
11801
11802 tp->nvram_size = cursize;
11803}
6aa20a22 11804
1da177e4
LT
11805static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11806{
11807 u32 val;
11808
63c3a66f 11809 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
1b27777a
MC
11810 return;
11811
11812 /* Selfboot format */
1820180b 11813 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
11814 tg3_get_eeprom_size(tp);
11815 return;
11816 }
11817
6d348f2c 11818 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
1da177e4 11819 if (val != 0) {
6d348f2c
MC
11820 /* This is confusing. We want to operate on the
11821 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11822 * call will read from NVRAM and byteswap the data
11823 * according to the byteswapping settings for all
11824 * other register accesses. This ensures the data we
11825 * want will always reside in the lower 16-bits.
11826 * However, the data in NVRAM is in LE format, which
11827 * means the data from the NVRAM read will always be
11828 * opposite the endianness of the CPU. The 16-bit
11829 * byteswap then brings the data to CPU endianness.
11830 */
11831 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
1da177e4
LT
11832 return;
11833 }
11834 }
fd1122a2 11835 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
11836}
11837
11838static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11839{
11840 u32 nvcfg1;
11841
11842 nvcfg1 = tr32(NVRAM_CFG1);
11843 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
63c3a66f 11844 tg3_flag_set(tp, FLASH);
8590a603 11845 } else {
1da177e4
LT
11846 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11847 tw32(NVRAM_CFG1, nvcfg1);
11848 }
11849
6ff6f81d 11850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
63c3a66f 11851 tg3_flag(tp, 5780_CLASS)) {
1da177e4 11852 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8590a603
MC
11853 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11854 tp->nvram_jedecnum = JEDEC_ATMEL;
11855 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 11856 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
11857 break;
11858 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11859 tp->nvram_jedecnum = JEDEC_ATMEL;
11860 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11861 break;
11862 case FLASH_VENDOR_ATMEL_EEPROM:
11863 tp->nvram_jedecnum = JEDEC_ATMEL;
11864 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
63c3a66f 11865 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
11866 break;
11867 case FLASH_VENDOR_ST:
11868 tp->nvram_jedecnum = JEDEC_ST;
11869 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
63c3a66f 11870 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
11871 break;
11872 case FLASH_VENDOR_SAIFUN:
11873 tp->nvram_jedecnum = JEDEC_SAIFUN;
11874 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11875 break;
11876 case FLASH_VENDOR_SST_SMALL:
11877 case FLASH_VENDOR_SST_LARGE:
11878 tp->nvram_jedecnum = JEDEC_SST;
11879 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11880 break;
1da177e4 11881 }
8590a603 11882 } else {
1da177e4
LT
11883 tp->nvram_jedecnum = JEDEC_ATMEL;
11884 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 11885 tg3_flag_set(tp, NVRAM_BUFFERED);
1da177e4
LT
11886 }
11887}
11888
a1b950d5
MC
11889static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11890{
11891 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11892 case FLASH_5752PAGE_SIZE_256:
11893 tp->nvram_pagesize = 256;
11894 break;
11895 case FLASH_5752PAGE_SIZE_512:
11896 tp->nvram_pagesize = 512;
11897 break;
11898 case FLASH_5752PAGE_SIZE_1K:
11899 tp->nvram_pagesize = 1024;
11900 break;
11901 case FLASH_5752PAGE_SIZE_2K:
11902 tp->nvram_pagesize = 2048;
11903 break;
11904 case FLASH_5752PAGE_SIZE_4K:
11905 tp->nvram_pagesize = 4096;
11906 break;
11907 case FLASH_5752PAGE_SIZE_264:
11908 tp->nvram_pagesize = 264;
11909 break;
11910 case FLASH_5752PAGE_SIZE_528:
11911 tp->nvram_pagesize = 528;
11912 break;
11913 }
11914}
11915
361b4ac2
MC
11916static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11917{
11918 u32 nvcfg1;
11919
11920 nvcfg1 = tr32(NVRAM_CFG1);
11921
e6af301b
MC
11922 /* NVRAM protection for TPM */
11923 if (nvcfg1 & (1 << 27))
63c3a66f 11924 tg3_flag_set(tp, PROTECTED_NVRAM);
e6af301b 11925
361b4ac2 11926 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
11927 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11928 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11929 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 11930 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
11931 break;
11932 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11933 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
11934 tg3_flag_set(tp, NVRAM_BUFFERED);
11935 tg3_flag_set(tp, FLASH);
8590a603
MC
11936 break;
11937 case FLASH_5752VENDOR_ST_M45PE10:
11938 case FLASH_5752VENDOR_ST_M45PE20:
11939 case FLASH_5752VENDOR_ST_M45PE40:
11940 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
11941 tg3_flag_set(tp, NVRAM_BUFFERED);
11942 tg3_flag_set(tp, FLASH);
8590a603 11943 break;
361b4ac2
MC
11944 }
11945
63c3a66f 11946 if (tg3_flag(tp, FLASH)) {
a1b950d5 11947 tg3_nvram_get_pagesize(tp, nvcfg1);
8590a603 11948 } else {
361b4ac2
MC
11949 /* For eeprom, set pagesize to maximum eeprom size */
11950 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11951
11952 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11953 tw32(NVRAM_CFG1, nvcfg1);
11954 }
11955}
11956
d3c7b886
MC
11957static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11958{
989a9d23 11959 u32 nvcfg1, protect = 0;
d3c7b886
MC
11960
11961 nvcfg1 = tr32(NVRAM_CFG1);
11962
11963 /* NVRAM protection for TPM */
989a9d23 11964 if (nvcfg1 & (1 << 27)) {
63c3a66f 11965 tg3_flag_set(tp, PROTECTED_NVRAM);
989a9d23
MC
11966 protect = 1;
11967 }
d3c7b886 11968
989a9d23
MC
11969 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11970 switch (nvcfg1) {
8590a603
MC
11971 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11972 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11973 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11974 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11975 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
11976 tg3_flag_set(tp, NVRAM_BUFFERED);
11977 tg3_flag_set(tp, FLASH);
8590a603
MC
11978 tp->nvram_pagesize = 264;
11979 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11980 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11981 tp->nvram_size = (protect ? 0x3e200 :
11982 TG3_NVRAM_SIZE_512KB);
11983 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11984 tp->nvram_size = (protect ? 0x1f200 :
11985 TG3_NVRAM_SIZE_256KB);
11986 else
11987 tp->nvram_size = (protect ? 0x1f200 :
11988 TG3_NVRAM_SIZE_128KB);
11989 break;
11990 case FLASH_5752VENDOR_ST_M45PE10:
11991 case FLASH_5752VENDOR_ST_M45PE20:
11992 case FLASH_5752VENDOR_ST_M45PE40:
11993 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
11994 tg3_flag_set(tp, NVRAM_BUFFERED);
11995 tg3_flag_set(tp, FLASH);
8590a603
MC
11996 tp->nvram_pagesize = 256;
11997 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11998 tp->nvram_size = (protect ?
11999 TG3_NVRAM_SIZE_64KB :
12000 TG3_NVRAM_SIZE_128KB);
12001 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12002 tp->nvram_size = (protect ?
12003 TG3_NVRAM_SIZE_64KB :
12004 TG3_NVRAM_SIZE_256KB);
12005 else
12006 tp->nvram_size = (protect ?
12007 TG3_NVRAM_SIZE_128KB :
12008 TG3_NVRAM_SIZE_512KB);
12009 break;
d3c7b886
MC
12010 }
12011}
12012
1b27777a
MC
12013static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12014{
12015 u32 nvcfg1;
12016
12017 nvcfg1 = tr32(NVRAM_CFG1);
12018
12019 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12020 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12021 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12022 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12023 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12024 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12025 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603 12026 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
1b27777a 12027
8590a603
MC
12028 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12029 tw32(NVRAM_CFG1, nvcfg1);
12030 break;
12031 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12032 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12033 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12034 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12035 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12036 tg3_flag_set(tp, NVRAM_BUFFERED);
12037 tg3_flag_set(tp, FLASH);
8590a603
MC
12038 tp->nvram_pagesize = 264;
12039 break;
12040 case FLASH_5752VENDOR_ST_M45PE10:
12041 case FLASH_5752VENDOR_ST_M45PE20:
12042 case FLASH_5752VENDOR_ST_M45PE40:
12043 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12044 tg3_flag_set(tp, NVRAM_BUFFERED);
12045 tg3_flag_set(tp, FLASH);
8590a603
MC
12046 tp->nvram_pagesize = 256;
12047 break;
1b27777a
MC
12048 }
12049}
12050
6b91fa02
MC
12051static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12052{
12053 u32 nvcfg1, protect = 0;
12054
12055 nvcfg1 = tr32(NVRAM_CFG1);
12056
12057 /* NVRAM protection for TPM */
12058 if (nvcfg1 & (1 << 27)) {
63c3a66f 12059 tg3_flag_set(tp, PROTECTED_NVRAM);
6b91fa02
MC
12060 protect = 1;
12061 }
12062
12063 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12064 switch (nvcfg1) {
8590a603
MC
12065 case FLASH_5761VENDOR_ATMEL_ADB021D:
12066 case FLASH_5761VENDOR_ATMEL_ADB041D:
12067 case FLASH_5761VENDOR_ATMEL_ADB081D:
12068 case FLASH_5761VENDOR_ATMEL_ADB161D:
12069 case FLASH_5761VENDOR_ATMEL_MDB021D:
12070 case FLASH_5761VENDOR_ATMEL_MDB041D:
12071 case FLASH_5761VENDOR_ATMEL_MDB081D:
12072 case FLASH_5761VENDOR_ATMEL_MDB161D:
12073 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12074 tg3_flag_set(tp, NVRAM_BUFFERED);
12075 tg3_flag_set(tp, FLASH);
12076 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
8590a603
MC
12077 tp->nvram_pagesize = 256;
12078 break;
12079 case FLASH_5761VENDOR_ST_A_M45PE20:
12080 case FLASH_5761VENDOR_ST_A_M45PE40:
12081 case FLASH_5761VENDOR_ST_A_M45PE80:
12082 case FLASH_5761VENDOR_ST_A_M45PE16:
12083 case FLASH_5761VENDOR_ST_M_M45PE20:
12084 case FLASH_5761VENDOR_ST_M_M45PE40:
12085 case FLASH_5761VENDOR_ST_M_M45PE80:
12086 case FLASH_5761VENDOR_ST_M_M45PE16:
12087 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12088 tg3_flag_set(tp, NVRAM_BUFFERED);
12089 tg3_flag_set(tp, FLASH);
8590a603
MC
12090 tp->nvram_pagesize = 256;
12091 break;
6b91fa02
MC
12092 }
12093
12094 if (protect) {
12095 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12096 } else {
12097 switch (nvcfg1) {
8590a603
MC
12098 case FLASH_5761VENDOR_ATMEL_ADB161D:
12099 case FLASH_5761VENDOR_ATMEL_MDB161D:
12100 case FLASH_5761VENDOR_ST_A_M45PE16:
12101 case FLASH_5761VENDOR_ST_M_M45PE16:
12102 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12103 break;
12104 case FLASH_5761VENDOR_ATMEL_ADB081D:
12105 case FLASH_5761VENDOR_ATMEL_MDB081D:
12106 case FLASH_5761VENDOR_ST_A_M45PE80:
12107 case FLASH_5761VENDOR_ST_M_M45PE80:
12108 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12109 break;
12110 case FLASH_5761VENDOR_ATMEL_ADB041D:
12111 case FLASH_5761VENDOR_ATMEL_MDB041D:
12112 case FLASH_5761VENDOR_ST_A_M45PE40:
12113 case FLASH_5761VENDOR_ST_M_M45PE40:
12114 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12115 break;
12116 case FLASH_5761VENDOR_ATMEL_ADB021D:
12117 case FLASH_5761VENDOR_ATMEL_MDB021D:
12118 case FLASH_5761VENDOR_ST_A_M45PE20:
12119 case FLASH_5761VENDOR_ST_M_M45PE20:
12120 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12121 break;
6b91fa02
MC
12122 }
12123 }
12124}
12125
b5d3772c
MC
12126static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12127{
12128 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12129 tg3_flag_set(tp, NVRAM_BUFFERED);
b5d3772c
MC
12130 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12131}
12132
321d32a0
MC
12133static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12134{
12135 u32 nvcfg1;
12136
12137 nvcfg1 = tr32(NVRAM_CFG1);
12138
12139 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12140 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12141 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12142 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12143 tg3_flag_set(tp, NVRAM_BUFFERED);
321d32a0
MC
12144 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12145
12146 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12147 tw32(NVRAM_CFG1, nvcfg1);
12148 return;
12149 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12150 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12151 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12152 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12153 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12154 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12155 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12156 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12157 tg3_flag_set(tp, NVRAM_BUFFERED);
12158 tg3_flag_set(tp, FLASH);
321d32a0
MC
12159
12160 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12161 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12162 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12163 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12164 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12165 break;
12166 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12167 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12168 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12169 break;
12170 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12171 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12172 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12173 break;
12174 }
12175 break;
12176 case FLASH_5752VENDOR_ST_M45PE10:
12177 case FLASH_5752VENDOR_ST_M45PE20:
12178 case FLASH_5752VENDOR_ST_M45PE40:
12179 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12180 tg3_flag_set(tp, NVRAM_BUFFERED);
12181 tg3_flag_set(tp, FLASH);
321d32a0
MC
12182
12183 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12184 case FLASH_5752VENDOR_ST_M45PE10:
12185 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12186 break;
12187 case FLASH_5752VENDOR_ST_M45PE20:
12188 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12189 break;
12190 case FLASH_5752VENDOR_ST_M45PE40:
12191 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12192 break;
12193 }
12194 break;
12195 default:
63c3a66f 12196 tg3_flag_set(tp, NO_NVRAM);
321d32a0
MC
12197 return;
12198 }
12199
a1b950d5
MC
12200 tg3_nvram_get_pagesize(tp, nvcfg1);
12201 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12202 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
a1b950d5
MC
12203}
12204
12205
12206static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12207{
12208 u32 nvcfg1;
12209
12210 nvcfg1 = tr32(NVRAM_CFG1);
12211
12212 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12213 case FLASH_5717VENDOR_ATMEL_EEPROM:
12214 case FLASH_5717VENDOR_MICRO_EEPROM:
12215 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12216 tg3_flag_set(tp, NVRAM_BUFFERED);
a1b950d5
MC
12217 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12218
12219 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12220 tw32(NVRAM_CFG1, nvcfg1);
12221 return;
12222 case FLASH_5717VENDOR_ATMEL_MDB011D:
12223 case FLASH_5717VENDOR_ATMEL_ADB011B:
12224 case FLASH_5717VENDOR_ATMEL_ADB011D:
12225 case FLASH_5717VENDOR_ATMEL_MDB021D:
12226 case FLASH_5717VENDOR_ATMEL_ADB021B:
12227 case FLASH_5717VENDOR_ATMEL_ADB021D:
12228 case FLASH_5717VENDOR_ATMEL_45USPT:
12229 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12230 tg3_flag_set(tp, NVRAM_BUFFERED);
12231 tg3_flag_set(tp, FLASH);
a1b950d5
MC
12232
12233 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12234 case FLASH_5717VENDOR_ATMEL_MDB021D:
66ee33bf
MC
12235 /* Detect size with tg3_nvram_get_size() */
12236 break;
a1b950d5
MC
12237 case FLASH_5717VENDOR_ATMEL_ADB021B:
12238 case FLASH_5717VENDOR_ATMEL_ADB021D:
12239 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12240 break;
12241 default:
12242 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12243 break;
12244 }
321d32a0 12245 break;
a1b950d5
MC
12246 case FLASH_5717VENDOR_ST_M_M25PE10:
12247 case FLASH_5717VENDOR_ST_A_M25PE10:
12248 case FLASH_5717VENDOR_ST_M_M45PE10:
12249 case FLASH_5717VENDOR_ST_A_M45PE10:
12250 case FLASH_5717VENDOR_ST_M_M25PE20:
12251 case FLASH_5717VENDOR_ST_A_M25PE20:
12252 case FLASH_5717VENDOR_ST_M_M45PE20:
12253 case FLASH_5717VENDOR_ST_A_M45PE20:
12254 case FLASH_5717VENDOR_ST_25USPT:
12255 case FLASH_5717VENDOR_ST_45USPT:
12256 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12257 tg3_flag_set(tp, NVRAM_BUFFERED);
12258 tg3_flag_set(tp, FLASH);
a1b950d5
MC
12259
12260 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12261 case FLASH_5717VENDOR_ST_M_M25PE20:
a1b950d5 12262 case FLASH_5717VENDOR_ST_M_M45PE20:
66ee33bf
MC
12263 /* Detect size with tg3_nvram_get_size() */
12264 break;
12265 case FLASH_5717VENDOR_ST_A_M25PE20:
a1b950d5
MC
12266 case FLASH_5717VENDOR_ST_A_M45PE20:
12267 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12268 break;
12269 default:
12270 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12271 break;
12272 }
321d32a0 12273 break;
a1b950d5 12274 default:
63c3a66f 12275 tg3_flag_set(tp, NO_NVRAM);
a1b950d5 12276 return;
321d32a0 12277 }
a1b950d5
MC
12278
12279 tg3_nvram_get_pagesize(tp, nvcfg1);
12280 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12281 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
321d32a0
MC
12282}
12283
9b91b5f1
MC
12284static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12285{
12286 u32 nvcfg1, nvmpinstrp;
12287
12288 nvcfg1 = tr32(NVRAM_CFG1);
12289 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12290
12291 switch (nvmpinstrp) {
12292 case FLASH_5720_EEPROM_HD:
12293 case FLASH_5720_EEPROM_LD:
12294 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12295 tg3_flag_set(tp, NVRAM_BUFFERED);
9b91b5f1
MC
12296
12297 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12298 tw32(NVRAM_CFG1, nvcfg1);
12299 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12300 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12301 else
12302 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12303 return;
12304 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12305 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12306 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12307 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12308 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12309 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12310 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12311 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12312 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12313 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12314 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12315 case FLASH_5720VENDOR_ATMEL_45USPT:
12316 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12317 tg3_flag_set(tp, NVRAM_BUFFERED);
12318 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
12319
12320 switch (nvmpinstrp) {
12321 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12322 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12323 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12324 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12325 break;
12326 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12327 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12328 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12329 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12330 break;
12331 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12332 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12333 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12334 break;
12335 default:
12336 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12337 break;
12338 }
12339 break;
12340 case FLASH_5720VENDOR_M_ST_M25PE10:
12341 case FLASH_5720VENDOR_M_ST_M45PE10:
12342 case FLASH_5720VENDOR_A_ST_M25PE10:
12343 case FLASH_5720VENDOR_A_ST_M45PE10:
12344 case FLASH_5720VENDOR_M_ST_M25PE20:
12345 case FLASH_5720VENDOR_M_ST_M45PE20:
12346 case FLASH_5720VENDOR_A_ST_M25PE20:
12347 case FLASH_5720VENDOR_A_ST_M45PE20:
12348 case FLASH_5720VENDOR_M_ST_M25PE40:
12349 case FLASH_5720VENDOR_M_ST_M45PE40:
12350 case FLASH_5720VENDOR_A_ST_M25PE40:
12351 case FLASH_5720VENDOR_A_ST_M45PE40:
12352 case FLASH_5720VENDOR_M_ST_M25PE80:
12353 case FLASH_5720VENDOR_M_ST_M45PE80:
12354 case FLASH_5720VENDOR_A_ST_M25PE80:
12355 case FLASH_5720VENDOR_A_ST_M45PE80:
12356 case FLASH_5720VENDOR_ST_25USPT:
12357 case FLASH_5720VENDOR_ST_45USPT:
12358 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12359 tg3_flag_set(tp, NVRAM_BUFFERED);
12360 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
12361
12362 switch (nvmpinstrp) {
12363 case FLASH_5720VENDOR_M_ST_M25PE20:
12364 case FLASH_5720VENDOR_M_ST_M45PE20:
12365 case FLASH_5720VENDOR_A_ST_M25PE20:
12366 case FLASH_5720VENDOR_A_ST_M45PE20:
12367 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12368 break;
12369 case FLASH_5720VENDOR_M_ST_M25PE40:
12370 case FLASH_5720VENDOR_M_ST_M45PE40:
12371 case FLASH_5720VENDOR_A_ST_M25PE40:
12372 case FLASH_5720VENDOR_A_ST_M45PE40:
12373 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12374 break;
12375 case FLASH_5720VENDOR_M_ST_M25PE80:
12376 case FLASH_5720VENDOR_M_ST_M45PE80:
12377 case FLASH_5720VENDOR_A_ST_M25PE80:
12378 case FLASH_5720VENDOR_A_ST_M45PE80:
12379 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12380 break;
12381 default:
12382 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12383 break;
12384 }
12385 break;
12386 default:
63c3a66f 12387 tg3_flag_set(tp, NO_NVRAM);
9b91b5f1
MC
12388 return;
12389 }
12390
12391 tg3_nvram_get_pagesize(tp, nvcfg1);
12392 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12393 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
9b91b5f1
MC
12394}
12395
1da177e4
LT
12396/* Chips other than 5700/5701 use the NVRAM for fetching info. */
12397static void __devinit tg3_nvram_init(struct tg3 *tp)
12398{
1da177e4
LT
12399 tw32_f(GRC_EEPROM_ADDR,
12400 (EEPROM_ADDR_FSM_RESET |
12401 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12402 EEPROM_ADDR_CLKPERD_SHIFT)));
12403
9d57f01c 12404 msleep(1);
1da177e4
LT
12405
12406 /* Enable seeprom accesses. */
12407 tw32_f(GRC_LOCAL_CTRL,
12408 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12409 udelay(100);
12410
12411 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12412 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
63c3a66f 12413 tg3_flag_set(tp, NVRAM);
1da177e4 12414
ec41c7df 12415 if (tg3_nvram_lock(tp)) {
5129c3a3
MC
12416 netdev_warn(tp->dev,
12417 "Cannot get nvram lock, %s failed\n",
05dbe005 12418 __func__);
ec41c7df
MC
12419 return;
12420 }
e6af301b 12421 tg3_enable_nvram_access(tp);
1da177e4 12422
989a9d23
MC
12423 tp->nvram_size = 0;
12424
361b4ac2
MC
12425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12426 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
12427 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12428 tg3_get_5755_nvram_info(tp);
d30cdd28 12429 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
57e6983c
MC
12430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12431 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1b27777a 12432 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
12433 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12434 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
12435 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12436 tg3_get_5906_nvram_info(tp);
b703df6f
MC
12437 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
321d32a0 12439 tg3_get_57780_nvram_info(tp);
9b91b5f1
MC
12440 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a1b950d5 12442 tg3_get_5717_nvram_info(tp);
9b91b5f1
MC
12443 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12444 tg3_get_5720_nvram_info(tp);
361b4ac2
MC
12445 else
12446 tg3_get_nvram_info(tp);
12447
989a9d23
MC
12448 if (tp->nvram_size == 0)
12449 tg3_get_nvram_size(tp);
1da177e4 12450
e6af301b 12451 tg3_disable_nvram_access(tp);
381291b7 12452 tg3_nvram_unlock(tp);
1da177e4
LT
12453
12454 } else {
63c3a66f
JP
12455 tg3_flag_clear(tp, NVRAM);
12456 tg3_flag_clear(tp, NVRAM_BUFFERED);
1da177e4
LT
12457
12458 tg3_get_eeprom_size(tp);
12459 }
12460}
12461
1da177e4
LT
12462static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12463 u32 offset, u32 len, u8 *buf)
12464{
12465 int i, j, rc = 0;
12466 u32 val;
12467
12468 for (i = 0; i < len; i += 4) {
b9fc7dc5 12469 u32 addr;
a9dc529d 12470 __be32 data;
1da177e4
LT
12471
12472 addr = offset + i;
12473
12474 memcpy(&data, buf + i, 4);
12475
62cedd11
MC
12476 /*
12477 * The SEEPROM interface expects the data to always be opposite
12478 * the native endian format. We accomplish this by reversing
12479 * all the operations that would have been performed on the
12480 * data from a call to tg3_nvram_read_be32().
12481 */
12482 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
1da177e4
LT
12483
12484 val = tr32(GRC_EEPROM_ADDR);
12485 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12486
12487 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12488 EEPROM_ADDR_READ);
12489 tw32(GRC_EEPROM_ADDR, val |
12490 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12491 (addr & EEPROM_ADDR_ADDR_MASK) |
12492 EEPROM_ADDR_START |
12493 EEPROM_ADDR_WRITE);
6aa20a22 12494
9d57f01c 12495 for (j = 0; j < 1000; j++) {
1da177e4
LT
12496 val = tr32(GRC_EEPROM_ADDR);
12497
12498 if (val & EEPROM_ADDR_COMPLETE)
12499 break;
9d57f01c 12500 msleep(1);
1da177e4
LT
12501 }
12502 if (!(val & EEPROM_ADDR_COMPLETE)) {
12503 rc = -EBUSY;
12504 break;
12505 }
12506 }
12507
12508 return rc;
12509}
12510
12511/* offset and length are dword aligned */
12512static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12513 u8 *buf)
12514{
12515 int ret = 0;
12516 u32 pagesize = tp->nvram_pagesize;
12517 u32 pagemask = pagesize - 1;
12518 u32 nvram_cmd;
12519 u8 *tmp;
12520
12521 tmp = kmalloc(pagesize, GFP_KERNEL);
12522 if (tmp == NULL)
12523 return -ENOMEM;
12524
12525 while (len) {
12526 int j;
e6af301b 12527 u32 phy_addr, page_off, size;
1da177e4
LT
12528
12529 phy_addr = offset & ~pagemask;
6aa20a22 12530
1da177e4 12531 for (j = 0; j < pagesize; j += 4) {
a9dc529d
MC
12532 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12533 (__be32 *) (tmp + j));
12534 if (ret)
1da177e4
LT
12535 break;
12536 }
12537 if (ret)
12538 break;
12539
c6cdf436 12540 page_off = offset & pagemask;
1da177e4
LT
12541 size = pagesize;
12542 if (len < size)
12543 size = len;
12544
12545 len -= size;
12546
12547 memcpy(tmp + page_off, buf, size);
12548
12549 offset = offset + (pagesize - page_off);
12550
e6af301b 12551 tg3_enable_nvram_access(tp);
1da177e4
LT
12552
12553 /*
12554 * Before we can erase the flash page, we need
12555 * to issue a special "write enable" command.
12556 */
12557 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12558
12559 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12560 break;
12561
12562 /* Erase the target page */
12563 tw32(NVRAM_ADDR, phy_addr);
12564
12565 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12566 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12567
c6cdf436 12568 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
1da177e4
LT
12569 break;
12570
12571 /* Issue another write enable to start the write. */
12572 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12573
12574 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12575 break;
12576
12577 for (j = 0; j < pagesize; j += 4) {
b9fc7dc5 12578 __be32 data;
1da177e4 12579
b9fc7dc5 12580 data = *((__be32 *) (tmp + j));
a9dc529d 12581
b9fc7dc5 12582 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4
LT
12583
12584 tw32(NVRAM_ADDR, phy_addr + j);
12585
12586 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12587 NVRAM_CMD_WR;
12588
12589 if (j == 0)
12590 nvram_cmd |= NVRAM_CMD_FIRST;
12591 else if (j == (pagesize - 4))
12592 nvram_cmd |= NVRAM_CMD_LAST;
12593
12594 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12595 break;
12596 }
12597 if (ret)
12598 break;
12599 }
12600
12601 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12602 tg3_nvram_exec_cmd(tp, nvram_cmd);
12603
12604 kfree(tmp);
12605
12606 return ret;
12607}
12608
12609/* offset and length are dword aligned */
12610static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12611 u8 *buf)
12612{
12613 int i, ret = 0;
12614
12615 for (i = 0; i < len; i += 4, offset += 4) {
b9fc7dc5
AV
12616 u32 page_off, phy_addr, nvram_cmd;
12617 __be32 data;
1da177e4
LT
12618
12619 memcpy(&data, buf + i, 4);
b9fc7dc5 12620 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4 12621
c6cdf436 12622 page_off = offset % tp->nvram_pagesize;
1da177e4 12623
1820180b 12624 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
12625
12626 tw32(NVRAM_ADDR, phy_addr);
12627
12628 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12629
c6cdf436 12630 if (page_off == 0 || i == 0)
1da177e4 12631 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 12632 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
12633 nvram_cmd |= NVRAM_CMD_LAST;
12634
12635 if (i == (len - 4))
12636 nvram_cmd |= NVRAM_CMD_LAST;
12637
321d32a0 12638 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
63c3a66f 12639 !tg3_flag(tp, 5755_PLUS) &&
4c987487
MC
12640 (tp->nvram_jedecnum == JEDEC_ST) &&
12641 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
12642
12643 if ((ret = tg3_nvram_exec_cmd(tp,
12644 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12645 NVRAM_CMD_DONE)))
12646
12647 break;
12648 }
63c3a66f 12649 if (!tg3_flag(tp, FLASH)) {
1da177e4
LT
12650 /* We always do complete word writes to eeprom. */
12651 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12652 }
12653
12654 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12655 break;
12656 }
12657 return ret;
12658}
12659
12660/* offset and length are dword aligned */
12661static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12662{
12663 int ret;
12664
63c3a66f 12665 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
314fba34
MC
12666 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12667 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
12668 udelay(40);
12669 }
12670
63c3a66f 12671 if (!tg3_flag(tp, NVRAM)) {
1da177e4 12672 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
859a5887 12673 } else {
1da177e4
LT
12674 u32 grc_mode;
12675
ec41c7df
MC
12676 ret = tg3_nvram_lock(tp);
12677 if (ret)
12678 return ret;
1da177e4 12679
e6af301b 12680 tg3_enable_nvram_access(tp);
63c3a66f 12681 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
1da177e4 12682 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
12683
12684 grc_mode = tr32(GRC_MODE);
12685 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12686
63c3a66f 12687 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
1da177e4
LT
12688 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12689 buf);
859a5887 12690 } else {
1da177e4
LT
12691 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12692 buf);
12693 }
12694
12695 grc_mode = tr32(GRC_MODE);
12696 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12697
e6af301b 12698 tg3_disable_nvram_access(tp);
1da177e4
LT
12699 tg3_nvram_unlock(tp);
12700 }
12701
63c3a66f 12702 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
314fba34 12703 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
12704 udelay(40);
12705 }
12706
12707 return ret;
12708}
12709
12710struct subsys_tbl_ent {
12711 u16 subsys_vendor, subsys_devid;
12712 u32 phy_id;
12713};
12714
24daf2b0 12715static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
1da177e4 12716 /* Broadcom boards. */
24daf2b0 12717 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12718 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
24daf2b0 12719 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12720 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
24daf2b0 12721 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12722 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
24daf2b0
MC
12723 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12724 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12725 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12726 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
24daf2b0 12727 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12728 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
12729 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12730 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12731 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12732 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
24daf2b0 12733 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12734 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
24daf2b0 12735 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12736 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
24daf2b0 12737 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12738 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
1da177e4
LT
12739
12740 /* 3com boards. */
24daf2b0 12741 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12742 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
24daf2b0 12743 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12744 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
12745 { TG3PCI_SUBVENDOR_ID_3COM,
12746 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12747 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12748 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
24daf2b0 12749 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12750 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
1da177e4
LT
12751
12752 /* DELL boards. */
24daf2b0 12753 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12754 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
24daf2b0 12755 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12756 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
24daf2b0 12757 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12758 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
24daf2b0 12759 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12760 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
1da177e4
LT
12761
12762 /* Compaq boards. */
24daf2b0 12763 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12764 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
24daf2b0 12765 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12766 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
12767 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12768 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12769 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12770 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
24daf2b0 12771 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12772 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
1da177e4
LT
12773
12774 /* IBM boards. */
24daf2b0
MC
12775 { TG3PCI_SUBVENDOR_ID_IBM,
12776 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
1da177e4
LT
12777};
12778
24daf2b0 12779static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
1da177e4
LT
12780{
12781 int i;
12782
12783 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12784 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12785 tp->pdev->subsystem_vendor) &&
12786 (subsys_id_to_phy_id[i].subsys_devid ==
12787 tp->pdev->subsystem_device))
12788 return &subsys_id_to_phy_id[i];
12789 }
12790 return NULL;
12791}
12792
7d0c41ef 12793static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 12794{
1da177e4 12795 u32 val;
f49639e6 12796
79eb6904 12797 tp->phy_id = TG3_PHY_ID_INVALID;
7d0c41ef
MC
12798 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12799
a85feb8c 12800 /* Assume an onboard device and WOL capable by default. */
63c3a66f
JP
12801 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12802 tg3_flag_set(tp, WOL_CAP);
72b845e0 12803
b5d3772c 12804 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 12805 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
63c3a66f
JP
12806 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12807 tg3_flag_set(tp, IS_NIC);
9d26e213 12808 }
0527ba35
MC
12809 val = tr32(VCPU_CFGSHDW);
12810 if (val & VCPU_CFGSHDW_ASPM_DBNC)
63c3a66f 12811 tg3_flag_set(tp, ASPM_WORKAROUND);
0527ba35 12812 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
6fdbab9d 12813 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
63c3a66f 12814 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
12815 device_set_wakeup_enable(&tp->pdev->dev, true);
12816 }
05ac4cb7 12817 goto done;
b5d3772c
MC
12818 }
12819
1da177e4
LT
12820 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12821 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12822 u32 nic_cfg, led_cfg;
a9daf367 12823 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
7d0c41ef 12824 int eeprom_phy_serdes = 0;
1da177e4
LT
12825
12826 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12827 tp->nic_sram_data_cfg = nic_cfg;
12828
12829 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12830 ver >>= NIC_SRAM_DATA_VER_SHIFT;
6ff6f81d
MC
12831 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12832 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12833 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
1da177e4
LT
12834 (ver > 0) && (ver < 0x100))
12835 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12836
a9daf367
MC
12837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12838 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12839
1da177e4
LT
12840 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12841 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12842 eeprom_phy_serdes = 1;
12843
12844 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12845 if (nic_phy_id != 0) {
12846 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12847 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12848
12849 eeprom_phy_id = (id1 >> 16) << 10;
12850 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12851 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12852 } else
12853 eeprom_phy_id = 0;
12854
7d0c41ef 12855 tp->phy_id = eeprom_phy_id;
747e8f8b 12856 if (eeprom_phy_serdes) {
63c3a66f 12857 if (!tg3_flag(tp, 5705_PLUS))
f07e9af3 12858 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
a50d0796 12859 else
f07e9af3 12860 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
747e8f8b 12861 }
7d0c41ef 12862
63c3a66f 12863 if (tg3_flag(tp, 5750_PLUS))
1da177e4
LT
12864 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12865 SHASTA_EXT_LED_MODE_MASK);
cbf46853 12866 else
1da177e4
LT
12867 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12868
12869 switch (led_cfg) {
12870 default:
12871 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12872 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12873 break;
12874
12875 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12876 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12877 break;
12878
12879 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12880 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
12881
12882 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12883 * read on some older 5700/5701 bootcode.
12884 */
12885 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12886 ASIC_REV_5700 ||
12887 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12888 ASIC_REV_5701)
12889 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12890
1da177e4
LT
12891 break;
12892
12893 case SHASTA_EXT_LED_SHARED:
12894 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12895 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12896 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12897 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12898 LED_CTRL_MODE_PHY_2);
12899 break;
12900
12901 case SHASTA_EXT_LED_MAC:
12902 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12903 break;
12904
12905 case SHASTA_EXT_LED_COMBO:
12906 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12907 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12908 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12909 LED_CTRL_MODE_PHY_2);
12910 break;
12911
855e1111 12912 }
1da177e4
LT
12913
12914 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12916 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12917 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12918
b2a5c19c
MC
12919 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12920 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 12921
9d26e213 12922 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
63c3a66f 12923 tg3_flag_set(tp, EEPROM_WRITE_PROT);
9d26e213
MC
12924 if ((tp->pdev->subsystem_vendor ==
12925 PCI_VENDOR_ID_ARIMA) &&
12926 (tp->pdev->subsystem_device == 0x205a ||
12927 tp->pdev->subsystem_device == 0x2063))
63c3a66f 12928 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
9d26e213 12929 } else {
63c3a66f
JP
12930 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12931 tg3_flag_set(tp, IS_NIC);
9d26e213 12932 }
1da177e4
LT
12933
12934 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f
JP
12935 tg3_flag_set(tp, ENABLE_ASF);
12936 if (tg3_flag(tp, 5750_PLUS))
12937 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4 12938 }
b2b98d4a
MC
12939
12940 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
63c3a66f
JP
12941 tg3_flag(tp, 5750_PLUS))
12942 tg3_flag_set(tp, ENABLE_APE);
b2b98d4a 12943
f07e9af3 12944 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
a85feb8c 12945 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
63c3a66f 12946 tg3_flag_clear(tp, WOL_CAP);
1da177e4 12947
63c3a66f 12948 if (tg3_flag(tp, WOL_CAP) &&
6fdbab9d 12949 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
63c3a66f 12950 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
12951 device_set_wakeup_enable(&tp->pdev->dev, true);
12952 }
0527ba35 12953
1da177e4 12954 if (cfg2 & (1 << 17))
f07e9af3 12955 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
1da177e4
LT
12956
12957 /* serdes signal pre-emphasis in register 0x590 set by */
12958 /* bootcode if bit 18 is set */
12959 if (cfg2 & (1 << 18))
f07e9af3 12960 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
8ed5d97e 12961
63c3a66f
JP
12962 if ((tg3_flag(tp, 57765_PLUS) ||
12963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12964 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
6833c043 12965 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
f07e9af3 12966 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
6833c043 12967
63c3a66f 12968 if (tg3_flag(tp, PCI_EXPRESS) &&
8c69b1e7 12969 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 12970 !tg3_flag(tp, 57765_PLUS)) {
8ed5d97e
MC
12971 u32 cfg3;
12972
12973 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12974 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
63c3a66f 12975 tg3_flag_set(tp, ASPM_WORKAROUND);
8ed5d97e 12976 }
a9daf367 12977
14417063 12978 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
63c3a66f 12979 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
a9daf367 12980 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
63c3a66f 12981 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
a9daf367 12982 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
63c3a66f 12983 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
1da177e4 12984 }
05ac4cb7 12985done:
63c3a66f 12986 if (tg3_flag(tp, WOL_CAP))
43067ed8 12987 device_set_wakeup_enable(&tp->pdev->dev,
63c3a66f 12988 tg3_flag(tp, WOL_ENABLE));
43067ed8
RW
12989 else
12990 device_set_wakeup_capable(&tp->pdev->dev, false);
7d0c41ef
MC
12991}
12992
b2a5c19c
MC
12993static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12994{
12995 int i;
12996 u32 val;
12997
12998 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12999 tw32(OTP_CTRL, cmd);
13000
13001 /* Wait for up to 1 ms for command to execute. */
13002 for (i = 0; i < 100; i++) {
13003 val = tr32(OTP_STATUS);
13004 if (val & OTP_STATUS_CMD_DONE)
13005 break;
13006 udelay(10);
13007 }
13008
13009 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13010}
13011
13012/* Read the gphy configuration from the OTP region of the chip. The gphy
13013 * configuration is a 32-bit value that straddles the alignment boundary.
13014 * We do two 32-bit reads and then shift and merge the results.
13015 */
13016static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13017{
13018 u32 bhalf_otp, thalf_otp;
13019
13020 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13021
13022 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13023 return 0;
13024
13025 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13026
13027 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13028 return 0;
13029
13030 thalf_otp = tr32(OTP_READ_DATA);
13031
13032 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13033
13034 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13035 return 0;
13036
13037 bhalf_otp = tr32(OTP_READ_DATA);
13038
13039 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13040}
13041
e256f8a3
MC
13042static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13043{
13044 u32 adv = ADVERTISED_Autoneg |
13045 ADVERTISED_Pause;
13046
13047 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13048 adv |= ADVERTISED_1000baseT_Half |
13049 ADVERTISED_1000baseT_Full;
13050
13051 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13052 adv |= ADVERTISED_100baseT_Half |
13053 ADVERTISED_100baseT_Full |
13054 ADVERTISED_10baseT_Half |
13055 ADVERTISED_10baseT_Full |
13056 ADVERTISED_TP;
13057 else
13058 adv |= ADVERTISED_FIBRE;
13059
13060 tp->link_config.advertising = adv;
13061 tp->link_config.speed = SPEED_INVALID;
13062 tp->link_config.duplex = DUPLEX_INVALID;
13063 tp->link_config.autoneg = AUTONEG_ENABLE;
13064 tp->link_config.active_speed = SPEED_INVALID;
13065 tp->link_config.active_duplex = DUPLEX_INVALID;
13066 tp->link_config.orig_speed = SPEED_INVALID;
13067 tp->link_config.orig_duplex = DUPLEX_INVALID;
13068 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13069}
13070
7d0c41ef
MC
13071static int __devinit tg3_phy_probe(struct tg3 *tp)
13072{
13073 u32 hw_phy_id_1, hw_phy_id_2;
13074 u32 hw_phy_id, hw_phy_id_masked;
13075 int err;
1da177e4 13076
e256f8a3 13077 /* flow control autonegotiation is default behavior */
63c3a66f 13078 tg3_flag_set(tp, PAUSE_AUTONEG);
e256f8a3
MC
13079 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13080
63c3a66f 13081 if (tg3_flag(tp, USE_PHYLIB))
b02fd9e3
MC
13082 return tg3_phy_init(tp);
13083
1da177e4 13084 /* Reading the PHY ID register can conflict with ASF
877d0310 13085 * firmware access to the PHY hardware.
1da177e4
LT
13086 */
13087 err = 0;
63c3a66f 13088 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
79eb6904 13089 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
1da177e4
LT
13090 } else {
13091 /* Now read the physical PHY_ID from the chip and verify
13092 * that it is sane. If it doesn't look good, we fall back
13093 * to either the hard-coded table based PHY_ID and failing
13094 * that the value found in the eeprom area.
13095 */
13096 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13097 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13098
13099 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13100 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13101 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13102
79eb6904 13103 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
1da177e4
LT
13104 }
13105
79eb6904 13106 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
1da177e4 13107 tp->phy_id = hw_phy_id;
79eb6904 13108 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
f07e9af3 13109 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
da6b2d01 13110 else
f07e9af3 13111 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
1da177e4 13112 } else {
79eb6904 13113 if (tp->phy_id != TG3_PHY_ID_INVALID) {
7d0c41ef
MC
13114 /* Do nothing, phy ID already set up in
13115 * tg3_get_eeprom_hw_cfg().
13116 */
1da177e4
LT
13117 } else {
13118 struct subsys_tbl_ent *p;
13119
13120 /* No eeprom signature? Try the hardcoded
13121 * subsys device table.
13122 */
24daf2b0 13123 p = tg3_lookup_by_subsys(tp);
1da177e4
LT
13124 if (!p)
13125 return -ENODEV;
13126
13127 tp->phy_id = p->phy_id;
13128 if (!tp->phy_id ||
79eb6904 13129 tp->phy_id == TG3_PHY_ID_BCM8002)
f07e9af3 13130 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
1da177e4
LT
13131 }
13132 }
13133
a6b68dab
MC
13134 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13135 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13136 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13137 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13138 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
52b02d04
MC
13139 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13140
e256f8a3
MC
13141 tg3_phy_init_link_config(tp);
13142
f07e9af3 13143 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
63c3a66f
JP
13144 !tg3_flag(tp, ENABLE_APE) &&
13145 !tg3_flag(tp, ENABLE_ASF)) {
42b64a45 13146 u32 bmsr, mask;
1da177e4
LT
13147
13148 tg3_readphy(tp, MII_BMSR, &bmsr);
13149 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13150 (bmsr & BMSR_LSTATUS))
13151 goto skip_phy_reset;
6aa20a22 13152
1da177e4
LT
13153 err = tg3_phy_reset(tp);
13154 if (err)
13155 return err;
13156
42b64a45 13157 tg3_phy_set_wirespeed(tp);
1da177e4 13158
3600d918
MC
13159 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13160 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13161 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13162 if (!tg3_copper_is_advertising_all(tp, mask)) {
42b64a45
MC
13163 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13164 tp->link_config.flowctrl);
1da177e4
LT
13165
13166 tg3_writephy(tp, MII_BMCR,
13167 BMCR_ANENABLE | BMCR_ANRESTART);
13168 }
1da177e4
LT
13169 }
13170
13171skip_phy_reset:
79eb6904 13172 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
13173 err = tg3_init_5401phy_dsp(tp);
13174 if (err)
13175 return err;
1da177e4 13176
1da177e4
LT
13177 err = tg3_init_5401phy_dsp(tp);
13178 }
13179
1da177e4
LT
13180 return err;
13181}
13182
184b8904 13183static void __devinit tg3_read_vpd(struct tg3 *tp)
1da177e4 13184{
a4a8bb15 13185 u8 *vpd_data;
4181b2c8 13186 unsigned int block_end, rosize, len;
184b8904 13187 int j, i = 0;
a4a8bb15 13188
c3e94500 13189 vpd_data = (u8 *)tg3_vpd_readblock(tp);
a4a8bb15
MC
13190 if (!vpd_data)
13191 goto out_no_vpd;
1da177e4 13192
4181b2c8
MC
13193 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13194 PCI_VPD_LRDT_RO_DATA);
13195 if (i < 0)
13196 goto out_not_found;
1da177e4 13197
4181b2c8
MC
13198 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13199 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13200 i += PCI_VPD_LRDT_TAG_SIZE;
1da177e4 13201
4181b2c8
MC
13202 if (block_end > TG3_NVM_VPD_LEN)
13203 goto out_not_found;
af2c6a4a 13204
184b8904
MC
13205 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13206 PCI_VPD_RO_KEYWORD_MFR_ID);
13207 if (j > 0) {
13208 len = pci_vpd_info_field_size(&vpd_data[j]);
13209
13210 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13211 if (j + len > block_end || len != 4 ||
13212 memcmp(&vpd_data[j], "1028", 4))
13213 goto partno;
13214
13215 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13216 PCI_VPD_RO_KEYWORD_VENDOR0);
13217 if (j < 0)
13218 goto partno;
13219
13220 len = pci_vpd_info_field_size(&vpd_data[j]);
13221
13222 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13223 if (j + len > block_end)
13224 goto partno;
13225
13226 memcpy(tp->fw_ver, &vpd_data[j], len);
13227 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13228 }
13229
13230partno:
4181b2c8
MC
13231 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13232 PCI_VPD_RO_KEYWORD_PARTNO);
13233 if (i < 0)
13234 goto out_not_found;
af2c6a4a 13235
4181b2c8 13236 len = pci_vpd_info_field_size(&vpd_data[i]);
1da177e4 13237
4181b2c8
MC
13238 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13239 if (len > TG3_BPN_SIZE ||
13240 (len + i) > TG3_NVM_VPD_LEN)
13241 goto out_not_found;
1da177e4 13242
4181b2c8 13243 memcpy(tp->board_part_number, &vpd_data[i], len);
1da177e4 13244
1da177e4 13245out_not_found:
a4a8bb15 13246 kfree(vpd_data);
37a949c5 13247 if (tp->board_part_number[0])
a4a8bb15
MC
13248 return;
13249
13250out_no_vpd:
37a949c5
MC
13251 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13252 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13253 strcpy(tp->board_part_number, "BCM5717");
13254 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13255 strcpy(tp->board_part_number, "BCM5718");
13256 else
13257 goto nomatch;
13258 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13259 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13260 strcpy(tp->board_part_number, "BCM57780");
13261 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13262 strcpy(tp->board_part_number, "BCM57760");
13263 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13264 strcpy(tp->board_part_number, "BCM57790");
13265 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13266 strcpy(tp->board_part_number, "BCM57788");
13267 else
13268 goto nomatch;
13269 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13270 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13271 strcpy(tp->board_part_number, "BCM57761");
13272 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13273 strcpy(tp->board_part_number, "BCM57765");
13274 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13275 strcpy(tp->board_part_number, "BCM57781");
13276 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13277 strcpy(tp->board_part_number, "BCM57785");
13278 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13279 strcpy(tp->board_part_number, "BCM57791");
13280 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13281 strcpy(tp->board_part_number, "BCM57795");
13282 else
13283 goto nomatch;
13284 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b5d3772c 13285 strcpy(tp->board_part_number, "BCM95906");
37a949c5
MC
13286 } else {
13287nomatch:
b5d3772c 13288 strcpy(tp->board_part_number, "none");
37a949c5 13289 }
1da177e4
LT
13290}
13291
9c8a620e
MC
13292static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13293{
13294 u32 val;
13295
e4f34110 13296 if (tg3_nvram_read(tp, offset, &val) ||
9c8a620e 13297 (val & 0xfc000000) != 0x0c000000 ||
e4f34110 13298 tg3_nvram_read(tp, offset + 4, &val) ||
9c8a620e
MC
13299 val != 0)
13300 return 0;
13301
13302 return 1;
13303}
13304
acd9c119
MC
13305static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13306{
ff3a7cb2 13307 u32 val, offset, start, ver_offset;
75f9936e 13308 int i, dst_off;
ff3a7cb2 13309 bool newver = false;
acd9c119
MC
13310
13311 if (tg3_nvram_read(tp, 0xc, &offset) ||
13312 tg3_nvram_read(tp, 0x4, &start))
13313 return;
13314
13315 offset = tg3_nvram_logical_addr(tp, offset);
13316
ff3a7cb2 13317 if (tg3_nvram_read(tp, offset, &val))
acd9c119
MC
13318 return;
13319
ff3a7cb2
MC
13320 if ((val & 0xfc000000) == 0x0c000000) {
13321 if (tg3_nvram_read(tp, offset + 4, &val))
acd9c119
MC
13322 return;
13323
ff3a7cb2
MC
13324 if (val == 0)
13325 newver = true;
13326 }
13327
75f9936e
MC
13328 dst_off = strlen(tp->fw_ver);
13329
ff3a7cb2 13330 if (newver) {
75f9936e
MC
13331 if (TG3_VER_SIZE - dst_off < 16 ||
13332 tg3_nvram_read(tp, offset + 8, &ver_offset))
ff3a7cb2
MC
13333 return;
13334
13335 offset = offset + ver_offset - start;
13336 for (i = 0; i < 16; i += 4) {
13337 __be32 v;
13338 if (tg3_nvram_read_be32(tp, offset + i, &v))
13339 return;
13340
75f9936e 13341 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
ff3a7cb2
MC
13342 }
13343 } else {
13344 u32 major, minor;
13345
13346 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13347 return;
13348
13349 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13350 TG3_NVM_BCVER_MAJSFT;
13351 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
75f9936e
MC
13352 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13353 "v%d.%02d", major, minor);
acd9c119
MC
13354 }
13355}
13356
a6f6cb1c
MC
13357static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13358{
13359 u32 val, major, minor;
13360
13361 /* Use native endian representation */
13362 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13363 return;
13364
13365 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13366 TG3_NVM_HWSB_CFG1_MAJSFT;
13367 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13368 TG3_NVM_HWSB_CFG1_MINSFT;
13369
13370 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13371}
13372
dfe00d7d
MC
13373static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13374{
13375 u32 offset, major, minor, build;
13376
75f9936e 13377 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
dfe00d7d
MC
13378
13379 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13380 return;
13381
13382 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13383 case TG3_EEPROM_SB_REVISION_0:
13384 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13385 break;
13386 case TG3_EEPROM_SB_REVISION_2:
13387 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13388 break;
13389 case TG3_EEPROM_SB_REVISION_3:
13390 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13391 break;
a4153d40
MC
13392 case TG3_EEPROM_SB_REVISION_4:
13393 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13394 break;
13395 case TG3_EEPROM_SB_REVISION_5:
13396 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13397 break;
bba226ac
MC
13398 case TG3_EEPROM_SB_REVISION_6:
13399 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13400 break;
dfe00d7d
MC
13401 default:
13402 return;
13403 }
13404
e4f34110 13405 if (tg3_nvram_read(tp, offset, &val))
dfe00d7d
MC
13406 return;
13407
13408 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13409 TG3_EEPROM_SB_EDH_BLD_SHFT;
13410 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13411 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13412 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13413
13414 if (minor > 99 || build > 26)
13415 return;
13416
75f9936e
MC
13417 offset = strlen(tp->fw_ver);
13418 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13419 " v%d.%02d", major, minor);
dfe00d7d
MC
13420
13421 if (build > 0) {
75f9936e
MC
13422 offset = strlen(tp->fw_ver);
13423 if (offset < TG3_VER_SIZE - 1)
13424 tp->fw_ver[offset] = 'a' + build - 1;
dfe00d7d
MC
13425 }
13426}
13427
acd9c119 13428static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
c4e6575c
MC
13429{
13430 u32 val, offset, start;
acd9c119 13431 int i, vlen;
9c8a620e
MC
13432
13433 for (offset = TG3_NVM_DIR_START;
13434 offset < TG3_NVM_DIR_END;
13435 offset += TG3_NVM_DIRENT_SIZE) {
e4f34110 13436 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
13437 return;
13438
9c8a620e
MC
13439 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13440 break;
13441 }
13442
13443 if (offset == TG3_NVM_DIR_END)
13444 return;
13445
63c3a66f 13446 if (!tg3_flag(tp, 5705_PLUS))
9c8a620e 13447 start = 0x08000000;
e4f34110 13448 else if (tg3_nvram_read(tp, offset - 4, &start))
9c8a620e
MC
13449 return;
13450
e4f34110 13451 if (tg3_nvram_read(tp, offset + 4, &offset) ||
9c8a620e 13452 !tg3_fw_img_is_valid(tp, offset) ||
e4f34110 13453 tg3_nvram_read(tp, offset + 8, &val))
9c8a620e
MC
13454 return;
13455
13456 offset += val - start;
13457
acd9c119 13458 vlen = strlen(tp->fw_ver);
9c8a620e 13459
acd9c119
MC
13460 tp->fw_ver[vlen++] = ',';
13461 tp->fw_ver[vlen++] = ' ';
9c8a620e
MC
13462
13463 for (i = 0; i < 4; i++) {
a9dc529d
MC
13464 __be32 v;
13465 if (tg3_nvram_read_be32(tp, offset, &v))
c4e6575c
MC
13466 return;
13467
b9fc7dc5 13468 offset += sizeof(v);
c4e6575c 13469
acd9c119
MC
13470 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13471 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
9c8a620e 13472 break;
c4e6575c 13473 }
9c8a620e 13474
acd9c119
MC
13475 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13476 vlen += sizeof(v);
c4e6575c 13477 }
acd9c119
MC
13478}
13479
7fd76445
MC
13480static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13481{
13482 int vlen;
13483 u32 apedata;
ecc79648 13484 char *fwtype;
7fd76445 13485
63c3a66f 13486 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
7fd76445
MC
13487 return;
13488
13489 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13490 if (apedata != APE_SEG_SIG_MAGIC)
13491 return;
13492
13493 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13494 if (!(apedata & APE_FW_STATUS_READY))
13495 return;
13496
13497 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13498
dc6d0744 13499 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
63c3a66f 13500 tg3_flag_set(tp, APE_HAS_NCSI);
ecc79648 13501 fwtype = "NCSI";
dc6d0744 13502 } else {
ecc79648 13503 fwtype = "DASH";
dc6d0744 13504 }
ecc79648 13505
7fd76445
MC
13506 vlen = strlen(tp->fw_ver);
13507
ecc79648
MC
13508 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13509 fwtype,
7fd76445
MC
13510 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13511 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13512 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13513 (apedata & APE_FW_VERSION_BLDMSK));
13514}
13515
acd9c119
MC
13516static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13517{
13518 u32 val;
75f9936e 13519 bool vpd_vers = false;
acd9c119 13520
75f9936e
MC
13521 if (tp->fw_ver[0] != 0)
13522 vpd_vers = true;
df259d8c 13523
63c3a66f 13524 if (tg3_flag(tp, NO_NVRAM)) {
75f9936e 13525 strcat(tp->fw_ver, "sb");
df259d8c
MC
13526 return;
13527 }
13528
acd9c119
MC
13529 if (tg3_nvram_read(tp, 0, &val))
13530 return;
13531
13532 if (val == TG3_EEPROM_MAGIC)
13533 tg3_read_bc_ver(tp);
13534 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13535 tg3_read_sb_ver(tp, val);
a6f6cb1c
MC
13536 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13537 tg3_read_hwsb_ver(tp);
acd9c119
MC
13538 else
13539 return;
13540
c9cab24e 13541 if (vpd_vers)
75f9936e 13542 goto done;
acd9c119 13543
c9cab24e
MC
13544 if (tg3_flag(tp, ENABLE_APE)) {
13545 if (tg3_flag(tp, ENABLE_ASF))
13546 tg3_read_dash_ver(tp);
13547 } else if (tg3_flag(tp, ENABLE_ASF)) {
13548 tg3_read_mgmtfw_ver(tp);
13549 }
9c8a620e 13550
75f9936e 13551done:
9c8a620e 13552 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
13553}
13554
7544b097
MC
13555static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13556
7cb32cf2
MC
13557static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13558{
63c3a66f 13559 if (tg3_flag(tp, LRG_PROD_RING_CAP))
de9f5230 13560 return TG3_RX_RET_MAX_SIZE_5717;
63c3a66f 13561 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
de9f5230 13562 return TG3_RX_RET_MAX_SIZE_5700;
7cb32cf2 13563 else
de9f5230 13564 return TG3_RX_RET_MAX_SIZE_5705;
7cb32cf2
MC
13565}
13566
4143470c 13567static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
895950c2
JP
13568 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13569 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13570 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13571 { },
13572};
13573
1da177e4
LT
13574static int __devinit tg3_get_invariants(struct tg3 *tp)
13575{
1da177e4 13576 u32 misc_ctrl_reg;
1da177e4
LT
13577 u32 pci_state_reg, grc_misc_cfg;
13578 u32 val;
13579 u16 pci_cmd;
5e7dfd0f 13580 int err;
1da177e4 13581
1da177e4
LT
13582 /* Force memory write invalidate off. If we leave it on,
13583 * then on 5700_BX chips we have to enable a workaround.
13584 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13585 * to match the cacheline size. The Broadcom driver have this
13586 * workaround but turns MWI off all the times so never uses
13587 * it. This seems to suggest that the workaround is insufficient.
13588 */
13589 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13590 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13591 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13592
16821285
MC
13593 /* Important! -- Make sure register accesses are byteswapped
13594 * correctly. Also, for those chips that require it, make
13595 * sure that indirect register accesses are enabled before
13596 * the first operation.
1da177e4
LT
13597 */
13598 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13599 &misc_ctrl_reg);
16821285
MC
13600 tp->misc_host_ctrl |= (misc_ctrl_reg &
13601 MISC_HOST_CTRL_CHIPREV);
13602 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13603 tp->misc_host_ctrl);
1da177e4
LT
13604
13605 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13606 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
13607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13608 u32 prod_id_asic_rev;
13609
5001e2f6
MC
13610 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13611 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
d78b59f5
MC
13612 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13613 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
f6eb9b1f
MC
13614 pci_read_config_dword(tp->pdev,
13615 TG3PCI_GEN2_PRODID_ASICREV,
13616 &prod_id_asic_rev);
b703df6f
MC
13617 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13618 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13619 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13620 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13621 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13622 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13623 pci_read_config_dword(tp->pdev,
13624 TG3PCI_GEN15_PRODID_ASICREV,
13625 &prod_id_asic_rev);
f6eb9b1f
MC
13626 else
13627 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13628 &prod_id_asic_rev);
13629
321d32a0 13630 tp->pci_chip_rev_id = prod_id_asic_rev;
795d01c5 13631 }
1da177e4 13632
ff645bec
MC
13633 /* Wrong chip ID in 5752 A0. This code can be removed later
13634 * as A0 is not in production.
13635 */
13636 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13637 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13638
6892914f
MC
13639 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13640 * we need to disable memory and use config. cycles
13641 * only to access all registers. The 5702/03 chips
13642 * can mistakenly decode the special cycles from the
13643 * ICH chipsets as memory write cycles, causing corruption
13644 * of register and memory space. Only certain ICH bridges
13645 * will drive special cycles with non-zero data during the
13646 * address phase which can fall within the 5703's address
13647 * range. This is not an ICH bug as the PCI spec allows
13648 * non-zero address during special cycles. However, only
13649 * these ICH bridges are known to drive non-zero addresses
13650 * during special cycles.
13651 *
13652 * Since special cycles do not cross PCI bridges, we only
13653 * enable this workaround if the 5703 is on the secondary
13654 * bus of these ICH bridges.
13655 */
13656 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13657 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13658 static struct tg3_dev_id {
13659 u32 vendor;
13660 u32 device;
13661 u32 rev;
13662 } ich_chipsets[] = {
13663 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13664 PCI_ANY_ID },
13665 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13666 PCI_ANY_ID },
13667 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13668 0xa },
13669 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13670 PCI_ANY_ID },
13671 { },
13672 };
13673 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13674 struct pci_dev *bridge = NULL;
13675
13676 while (pci_id->vendor != 0) {
13677 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13678 bridge);
13679 if (!bridge) {
13680 pci_id++;
13681 continue;
13682 }
13683 if (pci_id->rev != PCI_ANY_ID) {
44c10138 13684 if (bridge->revision > pci_id->rev)
6892914f
MC
13685 continue;
13686 }
13687 if (bridge->subordinate &&
13688 (bridge->subordinate->number ==
13689 tp->pdev->bus->number)) {
63c3a66f 13690 tg3_flag_set(tp, ICH_WORKAROUND);
6892914f
MC
13691 pci_dev_put(bridge);
13692 break;
13693 }
13694 }
13695 }
13696
6ff6f81d 13697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
41588ba1
MC
13698 static struct tg3_dev_id {
13699 u32 vendor;
13700 u32 device;
13701 } bridge_chipsets[] = {
13702 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13703 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13704 { },
13705 };
13706 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13707 struct pci_dev *bridge = NULL;
13708
13709 while (pci_id->vendor != 0) {
13710 bridge = pci_get_device(pci_id->vendor,
13711 pci_id->device,
13712 bridge);
13713 if (!bridge) {
13714 pci_id++;
13715 continue;
13716 }
13717 if (bridge->subordinate &&
13718 (bridge->subordinate->number <=
13719 tp->pdev->bus->number) &&
13720 (bridge->subordinate->subordinate >=
13721 tp->pdev->bus->number)) {
63c3a66f 13722 tg3_flag_set(tp, 5701_DMA_BUG);
41588ba1
MC
13723 pci_dev_put(bridge);
13724 break;
13725 }
13726 }
13727 }
13728
4a29cc2e
MC
13729 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13730 * DMA addresses > 40-bit. This bridge may have other additional
13731 * 57xx devices behind it in some 4-port NIC designs for example.
13732 * Any tg3 device found behind the bridge will also need the 40-bit
13733 * DMA workaround.
13734 */
a4e2b347
MC
13735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13736 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
63c3a66f
JP
13737 tg3_flag_set(tp, 5780_CLASS);
13738 tg3_flag_set(tp, 40BIT_DMA_BUG);
4cf78e4f 13739 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
859a5887 13740 } else {
4a29cc2e
MC
13741 struct pci_dev *bridge = NULL;
13742
13743 do {
13744 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13745 PCI_DEVICE_ID_SERVERWORKS_EPB,
13746 bridge);
13747 if (bridge && bridge->subordinate &&
13748 (bridge->subordinate->number <=
13749 tp->pdev->bus->number) &&
13750 (bridge->subordinate->subordinate >=
13751 tp->pdev->bus->number)) {
63c3a66f 13752 tg3_flag_set(tp, 40BIT_DMA_BUG);
4a29cc2e
MC
13753 pci_dev_put(bridge);
13754 break;
13755 }
13756 } while (bridge);
13757 }
4cf78e4f 13758
f6eb9b1f 13759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3a1e19d3 13760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
7544b097
MC
13761 tp->pdev_peer = tg3_find_peer(tp);
13762
c885e824 13763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
d78b59f5
MC
13764 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13765 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
63c3a66f 13766 tg3_flag_set(tp, 5717_PLUS);
0a58d668
MC
13767
13768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
63c3a66f
JP
13769 tg3_flag(tp, 5717_PLUS))
13770 tg3_flag_set(tp, 57765_PLUS);
c885e824 13771
321d32a0
MC
13772 /* Intentionally exclude ASIC_REV_5906 */
13773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 13774 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 13775 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 13776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c 13777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
f6eb9b1f 13778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f
JP
13779 tg3_flag(tp, 57765_PLUS))
13780 tg3_flag_set(tp, 5755_PLUS);
321d32a0
MC
13781
13782 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
b5d3772c 13784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
63c3a66f
JP
13785 tg3_flag(tp, 5755_PLUS) ||
13786 tg3_flag(tp, 5780_CLASS))
13787 tg3_flag_set(tp, 5750_PLUS);
6708e5cc 13788
6ff6f81d 13789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
63c3a66f
JP
13790 tg3_flag(tp, 5750_PLUS))
13791 tg3_flag_set(tp, 5705_PLUS);
1b440c56 13792
507399f1 13793 /* Determine TSO capabilities */
2866d956 13794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
4d163b75 13795 ; /* Do nothing. HW bug. */
63c3a66f
JP
13796 else if (tg3_flag(tp, 57765_PLUS))
13797 tg3_flag_set(tp, HW_TSO_3);
13798 else if (tg3_flag(tp, 5755_PLUS) ||
e849cdc3 13799 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f
JP
13800 tg3_flag_set(tp, HW_TSO_2);
13801 else if (tg3_flag(tp, 5750_PLUS)) {
13802 tg3_flag_set(tp, HW_TSO_1);
13803 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
13804 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13805 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
63c3a66f 13806 tg3_flag_clear(tp, TSO_BUG);
507399f1
MC
13807 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13808 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13809 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 13810 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
13811 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13812 tp->fw_needed = FIRMWARE_TG3TSO5;
13813 else
13814 tp->fw_needed = FIRMWARE_TG3TSO;
13815 }
13816
dabc5c67 13817 /* Selectively allow TSO based on operating conditions */
6ff6f81d
MC
13818 if (tg3_flag(tp, HW_TSO_1) ||
13819 tg3_flag(tp, HW_TSO_2) ||
13820 tg3_flag(tp, HW_TSO_3) ||
dabc5c67
MC
13821 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13822 tg3_flag_set(tp, TSO_CAPABLE);
13823 else {
13824 tg3_flag_clear(tp, TSO_CAPABLE);
13825 tg3_flag_clear(tp, TSO_BUG);
13826 tp->fw_needed = NULL;
13827 }
13828
13829 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13830 tp->fw_needed = FIRMWARE_TG3;
13831
507399f1
MC
13832 tp->irq_max = 1;
13833
63c3a66f
JP
13834 if (tg3_flag(tp, 5750_PLUS)) {
13835 tg3_flag_set(tp, SUPPORT_MSI);
7544b097
MC
13836 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13837 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13838 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13839 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13840 tp->pdev_peer == tp->pdev))
63c3a66f 13841 tg3_flag_clear(tp, SUPPORT_MSI);
7544b097 13842
63c3a66f 13843 if (tg3_flag(tp, 5755_PLUS) ||
b5d3772c 13844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
63c3a66f 13845 tg3_flag_set(tp, 1SHOT_MSI);
52c0fd83 13846 }
4f125f42 13847
63c3a66f
JP
13848 if (tg3_flag(tp, 57765_PLUS)) {
13849 tg3_flag_set(tp, SUPPORT_MSIX);
507399f1
MC
13850 tp->irq_max = TG3_IRQ_MAX_VECS;
13851 }
f6eb9b1f 13852 }
0e1406dd 13853
2ffcc981 13854 if (tg3_flag(tp, 5755_PLUS))
63c3a66f 13855 tg3_flag_set(tp, SHORT_DMA_BUG);
f6eb9b1f 13856
63c3a66f
JP
13857 if (tg3_flag(tp, 5717_PLUS))
13858 tg3_flag_set(tp, LRG_PROD_RING_CAP);
de9f5230 13859
63c3a66f 13860 if (tg3_flag(tp, 57765_PLUS) &&
2866d956 13861 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
63c3a66f 13862 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
b703df6f 13863
63c3a66f
JP
13864 if (!tg3_flag(tp, 5705_PLUS) ||
13865 tg3_flag(tp, 5780_CLASS) ||
13866 tg3_flag(tp, USE_JUMBO_BDFLAG))
13867 tg3_flag_set(tp, JUMBO_CAPABLE);
0f893dc6 13868
52f4490c
MC
13869 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13870 &pci_state_reg);
13871
708ebb3a 13872 if (pci_is_pcie(tp->pdev)) {
5e7dfd0f
MC
13873 u16 lnkctl;
13874
63c3a66f 13875 tg3_flag_set(tp, PCI_EXPRESS);
5f5c51e3 13876
cf79003d 13877 tp->pcie_readrq = 4096;
d78b59f5
MC
13878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
b4495ed8 13880 tp->pcie_readrq = 2048;
cf79003d
MC
13881
13882 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
5f5c51e3 13883
5e7dfd0f 13884 pci_read_config_word(tp->pdev,
708ebb3a 13885 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
13886 &lnkctl);
13887 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
7196cd6c
MC
13888 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13889 ASIC_REV_5906) {
63c3a66f 13890 tg3_flag_clear(tp, HW_TSO_2);
dabc5c67 13891 tg3_flag_clear(tp, TSO_CAPABLE);
7196cd6c 13892 }
5e7dfd0f 13893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0 13894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9cf74ebb
MC
13895 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13896 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
63c3a66f 13897 tg3_flag_set(tp, CLKREQ_BUG);
614b0590 13898 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
63c3a66f 13899 tg3_flag_set(tp, L1PLLPD_EN);
c7835a77 13900 }
52f4490c 13901 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
708ebb3a
JM
13902 /* BCM5785 devices are effectively PCIe devices, and should
13903 * follow PCIe codepaths, but do not have a PCIe capabilities
13904 * section.
13905 */
63c3a66f
JP
13906 tg3_flag_set(tp, PCI_EXPRESS);
13907 } else if (!tg3_flag(tp, 5705_PLUS) ||
13908 tg3_flag(tp, 5780_CLASS)) {
52f4490c
MC
13909 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13910 if (!tp->pcix_cap) {
2445e461
MC
13911 dev_err(&tp->pdev->dev,
13912 "Cannot find PCI-X capability, aborting\n");
52f4490c
MC
13913 return -EIO;
13914 }
13915
13916 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
63c3a66f 13917 tg3_flag_set(tp, PCIX_MODE);
52f4490c 13918 }
1da177e4 13919
399de50b
MC
13920 /* If we have an AMD 762 or VIA K8T800 chipset, write
13921 * reordering to the mailbox registers done by the host
13922 * controller can cause major troubles. We read back from
13923 * every mailbox register write to force the writes to be
13924 * posted to the chip in order.
13925 */
4143470c 13926 if (pci_dev_present(tg3_write_reorder_chipsets) &&
63c3a66f
JP
13927 !tg3_flag(tp, PCI_EXPRESS))
13928 tg3_flag_set(tp, MBOX_WRITE_REORDER);
399de50b 13929
69fc4053
MC
13930 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13931 &tp->pci_cacheline_sz);
13932 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13933 &tp->pci_lat_timer);
1da177e4
LT
13934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13935 tp->pci_lat_timer < 64) {
13936 tp->pci_lat_timer = 64;
69fc4053
MC
13937 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13938 tp->pci_lat_timer);
1da177e4
LT
13939 }
13940
16821285
MC
13941 /* Important! -- It is critical that the PCI-X hw workaround
13942 * situation is decided before the first MMIO register access.
13943 */
52f4490c
MC
13944 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13945 /* 5700 BX chips need to have their TX producer index
13946 * mailboxes written twice to workaround a bug.
13947 */
63c3a66f 13948 tg3_flag_set(tp, TXD_MBOX_HWBUG);
1da177e4 13949
52f4490c 13950 /* If we are in PCI-X mode, enable register write workaround.
1da177e4
LT
13951 *
13952 * The workaround is to use indirect register accesses
13953 * for all chip writes not to mailbox registers.
13954 */
63c3a66f 13955 if (tg3_flag(tp, PCIX_MODE)) {
1da177e4 13956 u32 pm_reg;
1da177e4 13957
63c3a66f 13958 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
13959
13960 /* The chip can have it's power management PCI config
13961 * space registers clobbered due to this bug.
13962 * So explicitly force the chip into D0 here.
13963 */
9974a356
MC
13964 pci_read_config_dword(tp->pdev,
13965 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
13966 &pm_reg);
13967 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13968 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
13969 pci_write_config_dword(tp->pdev,
13970 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
13971 pm_reg);
13972
13973 /* Also, force SERR#/PERR# in PCI command. */
13974 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13975 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13976 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13977 }
13978 }
13979
1da177e4 13980 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
63c3a66f 13981 tg3_flag_set(tp, PCI_HIGH_SPEED);
1da177e4 13982 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
63c3a66f 13983 tg3_flag_set(tp, PCI_32BIT);
1da177e4
LT
13984
13985 /* Chip-specific fixup from Broadcom driver */
13986 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13987 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13988 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13989 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13990 }
13991
1ee582d8 13992 /* Default fast path register access methods */
20094930 13993 tp->read32 = tg3_read32;
1ee582d8 13994 tp->write32 = tg3_write32;
09ee929c 13995 tp->read32_mbox = tg3_read32;
20094930 13996 tp->write32_mbox = tg3_write32;
1ee582d8
MC
13997 tp->write32_tx_mbox = tg3_write32;
13998 tp->write32_rx_mbox = tg3_write32;
13999
14000 /* Various workaround register access methods */
63c3a66f 14001 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
1ee582d8 14002 tp->write32 = tg3_write_indirect_reg32;
98efd8a6 14003 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
63c3a66f 14004 (tg3_flag(tp, PCI_EXPRESS) &&
98efd8a6
MC
14005 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14006 /*
14007 * Back to back register writes can cause problems on these
14008 * chips, the workaround is to read back all reg writes
14009 * except those to mailbox regs.
14010 *
14011 * See tg3_write_indirect_reg32().
14012 */
1ee582d8 14013 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
14014 }
14015
63c3a66f 14016 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
1ee582d8 14017 tp->write32_tx_mbox = tg3_write32_tx_mbox;
63c3a66f 14018 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1ee582d8
MC
14019 tp->write32_rx_mbox = tg3_write_flush_reg32;
14020 }
20094930 14021
63c3a66f 14022 if (tg3_flag(tp, ICH_WORKAROUND)) {
6892914f
MC
14023 tp->read32 = tg3_read_indirect_reg32;
14024 tp->write32 = tg3_write_indirect_reg32;
14025 tp->read32_mbox = tg3_read_indirect_mbox;
14026 tp->write32_mbox = tg3_write_indirect_mbox;
14027 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14028 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14029
14030 iounmap(tp->regs);
22abe310 14031 tp->regs = NULL;
6892914f
MC
14032
14033 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14034 pci_cmd &= ~PCI_COMMAND_MEMORY;
14035 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14036 }
b5d3772c
MC
14037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14038 tp->read32_mbox = tg3_read32_mbox_5906;
14039 tp->write32_mbox = tg3_write32_mbox_5906;
14040 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14041 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14042 }
6892914f 14043
bbadf503 14044 if (tp->write32 == tg3_write_indirect_reg32 ||
63c3a66f 14045 (tg3_flag(tp, PCIX_MODE) &&
bbadf503 14046 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 14047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
63c3a66f 14048 tg3_flag_set(tp, SRAM_USE_CONFIG);
bbadf503 14049
16821285
MC
14050 /* The memory arbiter has to be enabled in order for SRAM accesses
14051 * to succeed. Normally on powerup the tg3 chip firmware will make
14052 * sure it is enabled, but other entities such as system netboot
14053 * code might disable it.
14054 */
14055 val = tr32(MEMARB_MODE);
14056 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14057
69f11c99
MC
14058 if (tg3_flag(tp, PCIX_MODE)) {
14059 pci_read_config_dword(tp->pdev,
14060 tp->pcix_cap + PCI_X_STATUS, &val);
14061 tp->pci_fn = val & 0x7;
14062 } else {
14063 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14064 }
14065
7d0c41ef 14066 /* Get eeprom hw config before calling tg3_set_power_state().
63c3a66f 14067 * In particular, the TG3_FLAG_IS_NIC flag must be
7d0c41ef
MC
14068 * determined before calling tg3_set_power_state() so that
14069 * we know whether or not to switch out of Vaux power.
14070 * When the flag is set, it means that GPIO1 is used for eeprom
14071 * write protect and also implies that it is a LOM where GPIOs
14072 * are not used to switch power.
6aa20a22 14073 */
7d0c41ef
MC
14074 tg3_get_eeprom_hw_cfg(tp);
14075
63c3a66f 14076 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
14077 /* Allow reads and writes to the
14078 * APE register and memory space.
14079 */
14080 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
14081 PCISTATE_ALLOW_APE_SHMEM_WR |
14082 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
14083 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14084 pci_state_reg);
c9cab24e
MC
14085
14086 tg3_ape_lock_init(tp);
0d3031d9
MC
14087 }
14088
9936bcf6 14089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
57e6983c 14090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
321d32a0 14091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
f6eb9b1f 14092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f
JP
14093 tg3_flag(tp, 57765_PLUS))
14094 tg3_flag_set(tp, CPMU_PRESENT);
d30cdd28 14095
16821285
MC
14096 /* Set up tp->grc_local_ctrl before calling
14097 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14098 * will bring 5700's external PHY out of reset.
314fba34
MC
14099 * It is also used as eeprom write protect on LOMs.
14100 */
14101 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
6ff6f81d 14102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
63c3a66f 14103 tg3_flag(tp, EEPROM_WRITE_PROT))
314fba34
MC
14104 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14105 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
14106 /* Unused GPIO3 must be driven as output on 5752 because there
14107 * are no pull-up resistors on unused GPIO pins.
14108 */
14109 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14110 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 14111
321d32a0 14112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
cb4ed1fd
MC
14113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
af36e6b6
MC
14115 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14116
8d519ab2
MC
14117 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14118 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
5f0c4a3c
MC
14119 /* Turn off the debug UART. */
14120 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
63c3a66f 14121 if (tg3_flag(tp, IS_NIC))
5f0c4a3c
MC
14122 /* Keep VMain power. */
14123 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14124 GRC_LCLCTRL_GPIO_OUTPUT0;
14125 }
14126
16821285
MC
14127 /* Switch out of Vaux if it is a NIC */
14128 tg3_pwrsrc_switch_to_vmain(tp);
1da177e4 14129
1da177e4
LT
14130 /* Derive initial jumbo mode from MTU assigned in
14131 * ether_setup() via the alloc_etherdev() call
14132 */
63c3a66f
JP
14133 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14134 tg3_flag_set(tp, JUMBO_RING_ENABLE);
1da177e4
LT
14135
14136 /* Determine WakeOnLan speed to use. */
14137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14138 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14139 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14140 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
63c3a66f 14141 tg3_flag_clear(tp, WOL_SPEED_100MB);
1da177e4 14142 } else {
63c3a66f 14143 tg3_flag_set(tp, WOL_SPEED_100MB);
1da177e4
LT
14144 }
14145
7f97a4bd 14146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
f07e9af3 14147 tp->phy_flags |= TG3_PHYFLG_IS_FET;
7f97a4bd 14148
1da177e4 14149 /* A few boards don't want Ethernet@WireSpeed phy feature */
6ff6f81d
MC
14150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14151 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
1da177e4 14152 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 14153 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
f07e9af3
MC
14154 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14155 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14156 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
1da177e4
LT
14157
14158 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14159 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
f07e9af3 14160 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
1da177e4 14161 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
f07e9af3 14162 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
1da177e4 14163
63c3a66f 14164 if (tg3_flag(tp, 5705_PLUS) &&
f07e9af3 14165 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
321d32a0 14166 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
f6eb9b1f 14167 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
63c3a66f 14168 !tg3_flag(tp, 57765_PLUS)) {
c424cb24 14169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 14170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
14171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
14173 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14174 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
f07e9af3 14175 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
c1d2a196 14176 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
f07e9af3 14177 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
321d32a0 14178 } else
f07e9af3 14179 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
c424cb24 14180 }
1da177e4 14181
b2a5c19c
MC
14182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14183 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14184 tp->phy_otp = tg3_read_otp_phycfg(tp);
14185 if (tp->phy_otp == 0)
14186 tp->phy_otp = TG3_OTP_DEFAULT;
14187 }
14188
63c3a66f 14189 if (tg3_flag(tp, CPMU_PRESENT))
8ef21428
MC
14190 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14191 else
14192 tp->mi_mode = MAC_MI_MODE_BASE;
14193
1da177e4 14194 tp->coalesce_mode = 0;
1da177e4
LT
14195 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14196 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14197 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14198
4d958473
MC
14199 /* Set these bits to enable statistics workaround. */
14200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14201 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14202 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14203 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14204 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14205 }
14206
321d32a0
MC
14207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14208 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
63c3a66f 14209 tg3_flag_set(tp, USE_PHYLIB);
57e6983c 14210
158d7abd
MC
14211 err = tg3_mdio_init(tp);
14212 if (err)
14213 return err;
1da177e4
LT
14214
14215 /* Initialize data/descriptor byte/word swapping. */
14216 val = tr32(GRC_MODE);
f2096f94
MC
14217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14218 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14219 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14220 GRC_MODE_B2HRX_ENABLE |
14221 GRC_MODE_HTX2B_ENABLE |
14222 GRC_MODE_HOST_STACKUP);
14223 else
14224 val &= GRC_MODE_HOST_STACKUP;
14225
1da177e4
LT
14226 tw32(GRC_MODE, val | tp->grc_mode);
14227
14228 tg3_switch_clocks(tp);
14229
14230 /* Clear this out for sanity. */
14231 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14232
14233 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14234 &pci_state_reg);
14235 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
63c3a66f 14236 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
1da177e4
LT
14237 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14238
14239 if (chiprevid == CHIPREV_ID_5701_A0 ||
14240 chiprevid == CHIPREV_ID_5701_B0 ||
14241 chiprevid == CHIPREV_ID_5701_B2 ||
14242 chiprevid == CHIPREV_ID_5701_B5) {
14243 void __iomem *sram_base;
14244
14245 /* Write some dummy words into the SRAM status block
14246 * area, see if it reads back correctly. If the return
14247 * value is bad, force enable the PCIX workaround.
14248 */
14249 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14250
14251 writel(0x00000000, sram_base);
14252 writel(0x00000000, sram_base + 4);
14253 writel(0xffffffff, sram_base + 4);
14254 if (readl(sram_base) != 0x00000000)
63c3a66f 14255 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14256 }
14257 }
14258
14259 udelay(50);
14260 tg3_nvram_init(tp);
14261
14262 grc_misc_cfg = tr32(GRC_MISC_CFG);
14263 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14264
1da177e4
LT
14265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14266 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14267 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
63c3a66f 14268 tg3_flag_set(tp, IS_5788);
1da177e4 14269
63c3a66f 14270 if (!tg3_flag(tp, IS_5788) &&
6ff6f81d 14271 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
63c3a66f
JP
14272 tg3_flag_set(tp, TAGGED_STATUS);
14273 if (tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
14274 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14275 HOSTCC_MODE_CLRTICK_TXBD);
14276
14277 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14278 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14279 tp->misc_host_ctrl);
14280 }
14281
3bda1258 14282 /* Preserve the APE MAC_MODE bits */
63c3a66f 14283 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 14284 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258
MC
14285 else
14286 tp->mac_mode = TG3_DEF_MAC_MODE;
14287
1da177e4
LT
14288 /* these are limited to 10/100 only */
14289 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14290 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14291 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14292 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14293 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14294 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14295 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14296 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14297 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
14298 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14299 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
321d32a0 14300 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
d1101142
MC
14301 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14302 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
f07e9af3
MC
14303 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14304 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
1da177e4
LT
14305
14306 err = tg3_phy_probe(tp);
14307 if (err) {
2445e461 14308 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
1da177e4 14309 /* ... but do not return immediately ... */
b02fd9e3 14310 tg3_mdio_fini(tp);
1da177e4
LT
14311 }
14312
184b8904 14313 tg3_read_vpd(tp);
c4e6575c 14314 tg3_read_fw_ver(tp);
1da177e4 14315
f07e9af3
MC
14316 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14317 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
14318 } else {
14319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
f07e9af3 14320 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4 14321 else
f07e9af3 14322 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
14323 }
14324
14325 /* 5700 {AX,BX} chips have a broken status block link
14326 * change bit implementation, so we must use the
14327 * status register in those cases.
14328 */
14329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
63c3a66f 14330 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4 14331 else
63c3a66f 14332 tg3_flag_clear(tp, USE_LINKCHG_REG);
1da177e4
LT
14333
14334 /* The led_ctrl is set during tg3_phy_probe, here we might
14335 * have to force the link status polling mechanism based
14336 * upon subsystem IDs.
14337 */
14338 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 14339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
f07e9af3
MC
14340 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14341 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
63c3a66f 14342 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4
LT
14343 }
14344
14345 /* For all SERDES we poll the MAC status register. */
f07e9af3 14346 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
63c3a66f 14347 tg3_flag_set(tp, POLL_SERDES);
1da177e4 14348 else
63c3a66f 14349 tg3_flag_clear(tp, POLL_SERDES);
1da177e4 14350
bf933c80 14351 tp->rx_offset = NET_IP_ALIGN;
d2757fc4 14352 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
1da177e4 14353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
63c3a66f 14354 tg3_flag(tp, PCIX_MODE)) {
bf933c80 14355 tp->rx_offset = 0;
d2757fc4 14356#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9dc7a113 14357 tp->rx_copy_thresh = ~(u16)0;
d2757fc4
MC
14358#endif
14359 }
1da177e4 14360
2c49a44d
MC
14361 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14362 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
7cb32cf2
MC
14363 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14364
2c49a44d 14365 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
f92905de
MC
14366
14367 /* Increment the rx prod index on the rx std ring by at most
14368 * 8 for these chips to workaround hw errata.
14369 */
14370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14372 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14373 tp->rx_std_max_post = 8;
14374
63c3a66f 14375 if (tg3_flag(tp, ASPM_WORKAROUND))
8ed5d97e
MC
14376 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14377 PCIE_PWR_MGMT_L1_THRESH_MSK;
14378
1da177e4
LT
14379 return err;
14380}
14381
49b6e95f 14382#ifdef CONFIG_SPARC
1da177e4
LT
14383static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14384{
14385 struct net_device *dev = tp->dev;
14386 struct pci_dev *pdev = tp->pdev;
49b6e95f 14387 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 14388 const unsigned char *addr;
49b6e95f
DM
14389 int len;
14390
14391 addr = of_get_property(dp, "local-mac-address", &len);
14392 if (addr && len == 6) {
14393 memcpy(dev->dev_addr, addr, 6);
14394 memcpy(dev->perm_addr, dev->dev_addr, 6);
14395 return 0;
1da177e4
LT
14396 }
14397 return -ENODEV;
14398}
14399
14400static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14401{
14402 struct net_device *dev = tp->dev;
14403
14404 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 14405 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
14406 return 0;
14407}
14408#endif
14409
14410static int __devinit tg3_get_device_address(struct tg3 *tp)
14411{
14412 struct net_device *dev = tp->dev;
14413 u32 hi, lo, mac_offset;
008652b3 14414 int addr_ok = 0;
1da177e4 14415
49b6e95f 14416#ifdef CONFIG_SPARC
1da177e4
LT
14417 if (!tg3_get_macaddr_sparc(tp))
14418 return 0;
14419#endif
14420
14421 mac_offset = 0x7c;
6ff6f81d 14422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
63c3a66f 14423 tg3_flag(tp, 5780_CLASS)) {
1da177e4
LT
14424 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14425 mac_offset = 0xcc;
14426 if (tg3_nvram_lock(tp))
14427 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14428 else
14429 tg3_nvram_unlock(tp);
63c3a66f 14430 } else if (tg3_flag(tp, 5717_PLUS)) {
69f11c99 14431 if (tp->pci_fn & 1)
a1b950d5 14432 mac_offset = 0xcc;
69f11c99 14433 if (tp->pci_fn > 1)
a50d0796 14434 mac_offset += 0x18c;
a1b950d5 14435 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
b5d3772c 14436 mac_offset = 0x10;
1da177e4
LT
14437
14438 /* First try to get it from MAC address mailbox. */
14439 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14440 if ((hi >> 16) == 0x484b) {
14441 dev->dev_addr[0] = (hi >> 8) & 0xff;
14442 dev->dev_addr[1] = (hi >> 0) & 0xff;
14443
14444 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14445 dev->dev_addr[2] = (lo >> 24) & 0xff;
14446 dev->dev_addr[3] = (lo >> 16) & 0xff;
14447 dev->dev_addr[4] = (lo >> 8) & 0xff;
14448 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 14449
008652b3
MC
14450 /* Some old bootcode may report a 0 MAC address in SRAM */
14451 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14452 }
14453 if (!addr_ok) {
14454 /* Next, try NVRAM. */
63c3a66f 14455 if (!tg3_flag(tp, NO_NVRAM) &&
df259d8c 14456 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
6d348f2c 14457 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
62cedd11
MC
14458 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14459 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
008652b3
MC
14460 }
14461 /* Finally just fetch it out of the MAC control regs. */
14462 else {
14463 hi = tr32(MAC_ADDR_0_HIGH);
14464 lo = tr32(MAC_ADDR_0_LOW);
14465
14466 dev->dev_addr[5] = lo & 0xff;
14467 dev->dev_addr[4] = (lo >> 8) & 0xff;
14468 dev->dev_addr[3] = (lo >> 16) & 0xff;
14469 dev->dev_addr[2] = (lo >> 24) & 0xff;
14470 dev->dev_addr[1] = hi & 0xff;
14471 dev->dev_addr[0] = (hi >> 8) & 0xff;
14472 }
1da177e4
LT
14473 }
14474
14475 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 14476#ifdef CONFIG_SPARC
1da177e4
LT
14477 if (!tg3_get_default_macaddr_sparc(tp))
14478 return 0;
14479#endif
14480 return -EINVAL;
14481 }
2ff43697 14482 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
14483 return 0;
14484}
14485
59e6b434
DM
14486#define BOUNDARY_SINGLE_CACHELINE 1
14487#define BOUNDARY_MULTI_CACHELINE 2
14488
14489static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14490{
14491 int cacheline_size;
14492 u8 byte;
14493 int goal;
14494
14495 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14496 if (byte == 0)
14497 cacheline_size = 1024;
14498 else
14499 cacheline_size = (int) byte * 4;
14500
14501 /* On 5703 and later chips, the boundary bits have no
14502 * effect.
14503 */
14504 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14505 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
63c3a66f 14506 !tg3_flag(tp, PCI_EXPRESS))
59e6b434
DM
14507 goto out;
14508
14509#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14510 goal = BOUNDARY_MULTI_CACHELINE;
14511#else
14512#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14513 goal = BOUNDARY_SINGLE_CACHELINE;
14514#else
14515 goal = 0;
14516#endif
14517#endif
14518
63c3a66f 14519 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
14520 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14521 goto out;
14522 }
14523
59e6b434
DM
14524 if (!goal)
14525 goto out;
14526
14527 /* PCI controllers on most RISC systems tend to disconnect
14528 * when a device tries to burst across a cache-line boundary.
14529 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14530 *
14531 * Unfortunately, for PCI-E there are only limited
14532 * write-side controls for this, and thus for reads
14533 * we will still get the disconnects. We'll also waste
14534 * these PCI cycles for both read and write for chips
14535 * other than 5700 and 5701 which do not implement the
14536 * boundary bits.
14537 */
63c3a66f 14538 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
14539 switch (cacheline_size) {
14540 case 16:
14541 case 32:
14542 case 64:
14543 case 128:
14544 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14545 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14546 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14547 } else {
14548 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14549 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14550 }
14551 break;
14552
14553 case 256:
14554 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14555 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14556 break;
14557
14558 default:
14559 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14560 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14561 break;
855e1111 14562 }
63c3a66f 14563 } else if (tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
14564 switch (cacheline_size) {
14565 case 16:
14566 case 32:
14567 case 64:
14568 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14569 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14570 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14571 break;
14572 }
14573 /* fallthrough */
14574 case 128:
14575 default:
14576 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14577 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14578 break;
855e1111 14579 }
59e6b434
DM
14580 } else {
14581 switch (cacheline_size) {
14582 case 16:
14583 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14584 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14585 DMA_RWCTRL_WRITE_BNDRY_16);
14586 break;
14587 }
14588 /* fallthrough */
14589 case 32:
14590 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14591 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14592 DMA_RWCTRL_WRITE_BNDRY_32);
14593 break;
14594 }
14595 /* fallthrough */
14596 case 64:
14597 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14598 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14599 DMA_RWCTRL_WRITE_BNDRY_64);
14600 break;
14601 }
14602 /* fallthrough */
14603 case 128:
14604 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14605 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14606 DMA_RWCTRL_WRITE_BNDRY_128);
14607 break;
14608 }
14609 /* fallthrough */
14610 case 256:
14611 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14612 DMA_RWCTRL_WRITE_BNDRY_256);
14613 break;
14614 case 512:
14615 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14616 DMA_RWCTRL_WRITE_BNDRY_512);
14617 break;
14618 case 1024:
14619 default:
14620 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14621 DMA_RWCTRL_WRITE_BNDRY_1024);
14622 break;
855e1111 14623 }
59e6b434
DM
14624 }
14625
14626out:
14627 return val;
14628}
14629
1da177e4
LT
14630static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14631{
14632 struct tg3_internal_buffer_desc test_desc;
14633 u32 sram_dma_descs;
14634 int i, ret;
14635
14636 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14637
14638 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14639 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14640 tw32(RDMAC_STATUS, 0);
14641 tw32(WDMAC_STATUS, 0);
14642
14643 tw32(BUFMGR_MODE, 0);
14644 tw32(FTQ_RESET, 0);
14645
14646 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14647 test_desc.addr_lo = buf_dma & 0xffffffff;
14648 test_desc.nic_mbuf = 0x00002100;
14649 test_desc.len = size;
14650
14651 /*
14652 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14653 * the *second* time the tg3 driver was getting loaded after an
14654 * initial scan.
14655 *
14656 * Broadcom tells me:
14657 * ...the DMA engine is connected to the GRC block and a DMA
14658 * reset may affect the GRC block in some unpredictable way...
14659 * The behavior of resets to individual blocks has not been tested.
14660 *
14661 * Broadcom noted the GRC reset will also reset all sub-components.
14662 */
14663 if (to_device) {
14664 test_desc.cqid_sqid = (13 << 8) | 2;
14665
14666 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14667 udelay(40);
14668 } else {
14669 test_desc.cqid_sqid = (16 << 8) | 7;
14670
14671 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14672 udelay(40);
14673 }
14674 test_desc.flags = 0x00000005;
14675
14676 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14677 u32 val;
14678
14679 val = *(((u32 *)&test_desc) + i);
14680 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14681 sram_dma_descs + (i * sizeof(u32)));
14682 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14683 }
14684 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14685
859a5887 14686 if (to_device)
1da177e4 14687 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
859a5887 14688 else
1da177e4 14689 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
1da177e4
LT
14690
14691 ret = -ENODEV;
14692 for (i = 0; i < 40; i++) {
14693 u32 val;
14694
14695 if (to_device)
14696 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14697 else
14698 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14699 if ((val & 0xffff) == sram_dma_descs) {
14700 ret = 0;
14701 break;
14702 }
14703
14704 udelay(100);
14705 }
14706
14707 return ret;
14708}
14709
ded7340d 14710#define TEST_BUFFER_SIZE 0x2000
1da177e4 14711
4143470c 14712static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
895950c2
JP
14713 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14714 { },
14715};
14716
1da177e4
LT
14717static int __devinit tg3_test_dma(struct tg3 *tp)
14718{
14719 dma_addr_t buf_dma;
59e6b434 14720 u32 *buf, saved_dma_rwctrl;
cbf9ca6c 14721 int ret = 0;
1da177e4 14722
4bae65c8
MC
14723 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14724 &buf_dma, GFP_KERNEL);
1da177e4
LT
14725 if (!buf) {
14726 ret = -ENOMEM;
14727 goto out_nofree;
14728 }
14729
14730 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14731 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14732
59e6b434 14733 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4 14734
63c3a66f 14735 if (tg3_flag(tp, 57765_PLUS))
cbf9ca6c
MC
14736 goto out;
14737
63c3a66f 14738 if (tg3_flag(tp, PCI_EXPRESS)) {
1da177e4
LT
14739 /* DMA read watermark not used on PCIE */
14740 tp->dma_rwctrl |= 0x00180000;
63c3a66f 14741 } else if (!tg3_flag(tp, PCIX_MODE)) {
85e94ced
MC
14742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
14744 tp->dma_rwctrl |= 0x003f0000;
14745 else
14746 tp->dma_rwctrl |= 0x003f000f;
14747 } else {
14748 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14750 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 14751 u32 read_water = 0x7;
1da177e4 14752
4a29cc2e
MC
14753 /* If the 5704 is behind the EPB bridge, we can
14754 * do the less restrictive ONE_DMA workaround for
14755 * better performance.
14756 */
63c3a66f 14757 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
4a29cc2e
MC
14758 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14759 tp->dma_rwctrl |= 0x8000;
14760 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
14761 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14762
49afdeb6
MC
14763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14764 read_water = 4;
59e6b434 14765 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
14766 tp->dma_rwctrl |=
14767 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14768 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14769 (1 << 23);
4cf78e4f
MC
14770 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14771 /* 5780 always in PCIX mode */
14772 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
14773 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14774 /* 5714 always in PCIX mode */
14775 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
14776 } else {
14777 tp->dma_rwctrl |= 0x001b000f;
14778 }
14779 }
14780
14781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14783 tp->dma_rwctrl &= 0xfffffff0;
14784
14785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14787 /* Remove this if it causes problems for some boards. */
14788 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14789
14790 /* On 5700/5701 chips, we need to set this bit.
14791 * Otherwise the chip will issue cacheline transactions
14792 * to streamable DMA memory with not all the byte
14793 * enables turned on. This is an error on several
14794 * RISC PCI controllers, in particular sparc64.
14795 *
14796 * On 5703/5704 chips, this bit has been reassigned
14797 * a different meaning. In particular, it is used
14798 * on those chips to enable a PCI-X workaround.
14799 */
14800 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14801 }
14802
14803 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14804
14805#if 0
14806 /* Unneeded, already done by tg3_get_invariants. */
14807 tg3_switch_clocks(tp);
14808#endif
14809
1da177e4
LT
14810 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14811 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14812 goto out;
14813
59e6b434
DM
14814 /* It is best to perform DMA test with maximum write burst size
14815 * to expose the 5700/5701 write DMA bug.
14816 */
14817 saved_dma_rwctrl = tp->dma_rwctrl;
14818 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14819 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14820
1da177e4
LT
14821 while (1) {
14822 u32 *p = buf, i;
14823
14824 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14825 p[i] = i;
14826
14827 /* Send the buffer to the chip. */
14828 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14829 if (ret) {
2445e461
MC
14830 dev_err(&tp->pdev->dev,
14831 "%s: Buffer write failed. err = %d\n",
14832 __func__, ret);
1da177e4
LT
14833 break;
14834 }
14835
14836#if 0
14837 /* validate data reached card RAM correctly. */
14838 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14839 u32 val;
14840 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14841 if (le32_to_cpu(val) != p[i]) {
2445e461
MC
14842 dev_err(&tp->pdev->dev,
14843 "%s: Buffer corrupted on device! "
14844 "(%d != %d)\n", __func__, val, i);
1da177e4
LT
14845 /* ret = -ENODEV here? */
14846 }
14847 p[i] = 0;
14848 }
14849#endif
14850 /* Now read it back. */
14851 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14852 if (ret) {
5129c3a3
MC
14853 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14854 "err = %d\n", __func__, ret);
1da177e4
LT
14855 break;
14856 }
14857
14858 /* Verify it. */
14859 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14860 if (p[i] == i)
14861 continue;
14862
59e6b434
DM
14863 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14864 DMA_RWCTRL_WRITE_BNDRY_16) {
14865 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
14866 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14867 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14868 break;
14869 } else {
2445e461
MC
14870 dev_err(&tp->pdev->dev,
14871 "%s: Buffer corrupted on read back! "
14872 "(%d != %d)\n", __func__, p[i], i);
1da177e4
LT
14873 ret = -ENODEV;
14874 goto out;
14875 }
14876 }
14877
14878 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14879 /* Success. */
14880 ret = 0;
14881 break;
14882 }
14883 }
59e6b434
DM
14884 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14885 DMA_RWCTRL_WRITE_BNDRY_16) {
14886 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
14887 * now look for chipsets that are known to expose the
14888 * DMA bug without failing the test.
59e6b434 14889 */
4143470c 14890 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
6d1cfbab
MC
14891 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14892 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
859a5887 14893 } else {
6d1cfbab
MC
14894 /* Safe to use the calculated DMA boundary. */
14895 tp->dma_rwctrl = saved_dma_rwctrl;
859a5887 14896 }
6d1cfbab 14897
59e6b434
DM
14898 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14899 }
1da177e4
LT
14900
14901out:
4bae65c8 14902 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
1da177e4
LT
14903out_nofree:
14904 return ret;
14905}
14906
1da177e4
LT
14907static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14908{
63c3a66f 14909 if (tg3_flag(tp, 57765_PLUS)) {
666bc831
MC
14910 tp->bufmgr_config.mbuf_read_dma_low_water =
14911 DEFAULT_MB_RDMA_LOW_WATER_5705;
14912 tp->bufmgr_config.mbuf_mac_rx_low_water =
14913 DEFAULT_MB_MACRX_LOW_WATER_57765;
14914 tp->bufmgr_config.mbuf_high_water =
14915 DEFAULT_MB_HIGH_WATER_57765;
14916
14917 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14918 DEFAULT_MB_RDMA_LOW_WATER_5705;
14919 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14920 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14921 tp->bufmgr_config.mbuf_high_water_jumbo =
14922 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
63c3a66f 14923 } else if (tg3_flag(tp, 5705_PLUS)) {
fdfec172
MC
14924 tp->bufmgr_config.mbuf_read_dma_low_water =
14925 DEFAULT_MB_RDMA_LOW_WATER_5705;
14926 tp->bufmgr_config.mbuf_mac_rx_low_water =
14927 DEFAULT_MB_MACRX_LOW_WATER_5705;
14928 tp->bufmgr_config.mbuf_high_water =
14929 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
14930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14931 tp->bufmgr_config.mbuf_mac_rx_low_water =
14932 DEFAULT_MB_MACRX_LOW_WATER_5906;
14933 tp->bufmgr_config.mbuf_high_water =
14934 DEFAULT_MB_HIGH_WATER_5906;
14935 }
fdfec172
MC
14936
14937 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14938 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14939 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14940 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14941 tp->bufmgr_config.mbuf_high_water_jumbo =
14942 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14943 } else {
14944 tp->bufmgr_config.mbuf_read_dma_low_water =
14945 DEFAULT_MB_RDMA_LOW_WATER;
14946 tp->bufmgr_config.mbuf_mac_rx_low_water =
14947 DEFAULT_MB_MACRX_LOW_WATER;
14948 tp->bufmgr_config.mbuf_high_water =
14949 DEFAULT_MB_HIGH_WATER;
14950
14951 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14952 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14953 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14954 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14955 tp->bufmgr_config.mbuf_high_water_jumbo =
14956 DEFAULT_MB_HIGH_WATER_JUMBO;
14957 }
1da177e4
LT
14958
14959 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14960 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14961}
14962
14963static char * __devinit tg3_phy_string(struct tg3 *tp)
14964{
79eb6904
MC
14965 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14966 case TG3_PHY_ID_BCM5400: return "5400";
14967 case TG3_PHY_ID_BCM5401: return "5401";
14968 case TG3_PHY_ID_BCM5411: return "5411";
14969 case TG3_PHY_ID_BCM5701: return "5701";
14970 case TG3_PHY_ID_BCM5703: return "5703";
14971 case TG3_PHY_ID_BCM5704: return "5704";
14972 case TG3_PHY_ID_BCM5705: return "5705";
14973 case TG3_PHY_ID_BCM5750: return "5750";
14974 case TG3_PHY_ID_BCM5752: return "5752";
14975 case TG3_PHY_ID_BCM5714: return "5714";
14976 case TG3_PHY_ID_BCM5780: return "5780";
14977 case TG3_PHY_ID_BCM5755: return "5755";
14978 case TG3_PHY_ID_BCM5787: return "5787";
14979 case TG3_PHY_ID_BCM5784: return "5784";
14980 case TG3_PHY_ID_BCM5756: return "5722/5756";
14981 case TG3_PHY_ID_BCM5906: return "5906";
14982 case TG3_PHY_ID_BCM5761: return "5761";
14983 case TG3_PHY_ID_BCM5718C: return "5718C";
14984 case TG3_PHY_ID_BCM5718S: return "5718S";
14985 case TG3_PHY_ID_BCM57765: return "57765";
302b500b 14986 case TG3_PHY_ID_BCM5719C: return "5719C";
6418f2c1 14987 case TG3_PHY_ID_BCM5720C: return "5720C";
79eb6904 14988 case TG3_PHY_ID_BCM8002: return "8002/serdes";
1da177e4
LT
14989 case 0: return "serdes";
14990 default: return "unknown";
855e1111 14991 }
1da177e4
LT
14992}
14993
f9804ddb
MC
14994static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14995{
63c3a66f 14996 if (tg3_flag(tp, PCI_EXPRESS)) {
f9804ddb
MC
14997 strcpy(str, "PCI Express");
14998 return str;
63c3a66f 14999 } else if (tg3_flag(tp, PCIX_MODE)) {
f9804ddb
MC
15000 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15001
15002 strcpy(str, "PCIX:");
15003
15004 if ((clock_ctrl == 7) ||
15005 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15006 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15007 strcat(str, "133MHz");
15008 else if (clock_ctrl == 0)
15009 strcat(str, "33MHz");
15010 else if (clock_ctrl == 2)
15011 strcat(str, "50MHz");
15012 else if (clock_ctrl == 4)
15013 strcat(str, "66MHz");
15014 else if (clock_ctrl == 6)
15015 strcat(str, "100MHz");
f9804ddb
MC
15016 } else {
15017 strcpy(str, "PCI:");
63c3a66f 15018 if (tg3_flag(tp, PCI_HIGH_SPEED))
f9804ddb
MC
15019 strcat(str, "66MHz");
15020 else
15021 strcat(str, "33MHz");
15022 }
63c3a66f 15023 if (tg3_flag(tp, PCI_32BIT))
f9804ddb
MC
15024 strcat(str, ":32-bit");
15025 else
15026 strcat(str, ":64-bit");
15027 return str;
15028}
15029
8c2dc7e1 15030static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
15031{
15032 struct pci_dev *peer;
15033 unsigned int func, devnr = tp->pdev->devfn & ~7;
15034
15035 for (func = 0; func < 8; func++) {
15036 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15037 if (peer && peer != tp->pdev)
15038 break;
15039 pci_dev_put(peer);
15040 }
16fe9d74
MC
15041 /* 5704 can be configured in single-port mode, set peer to
15042 * tp->pdev in that case.
15043 */
15044 if (!peer) {
15045 peer = tp->pdev;
15046 return peer;
15047 }
1da177e4
LT
15048
15049 /*
15050 * We don't need to keep the refcount elevated; there's no way
15051 * to remove one half of this device without removing the other
15052 */
15053 pci_dev_put(peer);
15054
15055 return peer;
15056}
15057
15f9850d
DM
15058static void __devinit tg3_init_coal(struct tg3 *tp)
15059{
15060 struct ethtool_coalesce *ec = &tp->coal;
15061
15062 memset(ec, 0, sizeof(*ec));
15063 ec->cmd = ETHTOOL_GCOALESCE;
15064 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15065 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15066 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15067 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15068 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15069 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15070 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15071 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15072 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15073
15074 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15075 HOSTCC_MODE_CLRTICK_TXBD)) {
15076 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15077 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15078 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15079 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15080 }
d244c892 15081
63c3a66f 15082 if (tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
15083 ec->rx_coalesce_usecs_irq = 0;
15084 ec->tx_coalesce_usecs_irq = 0;
15085 ec->stats_block_coalesce_usecs = 0;
15086 }
15f9850d
DM
15087}
15088
7c7d64b8
SH
15089static const struct net_device_ops tg3_netdev_ops = {
15090 .ndo_open = tg3_open,
15091 .ndo_stop = tg3_close,
00829823 15092 .ndo_start_xmit = tg3_start_xmit,
511d2224 15093 .ndo_get_stats64 = tg3_get_stats64,
00829823
SH
15094 .ndo_validate_addr = eth_validate_addr,
15095 .ndo_set_multicast_list = tg3_set_rx_mode,
15096 .ndo_set_mac_address = tg3_set_mac_addr,
15097 .ndo_do_ioctl = tg3_ioctl,
15098 .ndo_tx_timeout = tg3_tx_timeout,
15099 .ndo_change_mtu = tg3_change_mtu,
dc668910 15100 .ndo_fix_features = tg3_fix_features,
06c03c02 15101 .ndo_set_features = tg3_set_features,
00829823
SH
15102#ifdef CONFIG_NET_POLL_CONTROLLER
15103 .ndo_poll_controller = tg3_poll_controller,
15104#endif
15105};
15106
1da177e4
LT
15107static int __devinit tg3_init_one(struct pci_dev *pdev,
15108 const struct pci_device_id *ent)
15109{
1da177e4
LT
15110 struct net_device *dev;
15111 struct tg3 *tp;
646c9edd
MC
15112 int i, err, pm_cap;
15113 u32 sndmbx, rcvmbx, intmbx;
f9804ddb 15114 char str[40];
72f2afb8 15115 u64 dma_mask, persist_dma_mask;
0da0606f 15116 u32 features = 0;
1da177e4 15117
05dbe005 15118 printk_once(KERN_INFO "%s\n", version);
1da177e4
LT
15119
15120 err = pci_enable_device(pdev);
15121 if (err) {
2445e461 15122 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
15123 return err;
15124 }
15125
1da177e4
LT
15126 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15127 if (err) {
2445e461 15128 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
15129 goto err_out_disable_pdev;
15130 }
15131
15132 pci_set_master(pdev);
15133
15134 /* Find power-management capability. */
15135 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15136 if (pm_cap == 0) {
2445e461
MC
15137 dev_err(&pdev->dev,
15138 "Cannot find Power Management capability, aborting\n");
1da177e4
LT
15139 err = -EIO;
15140 goto err_out_free_res;
15141 }
15142
16821285
MC
15143 err = pci_set_power_state(pdev, PCI_D0);
15144 if (err) {
15145 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15146 goto err_out_free_res;
15147 }
15148
fe5f5787 15149 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
1da177e4 15150 if (!dev) {
2445e461 15151 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
1da177e4 15152 err = -ENOMEM;
16821285 15153 goto err_out_power_down;
1da177e4
LT
15154 }
15155
1da177e4
LT
15156 SET_NETDEV_DEV(dev, &pdev->dev);
15157
1da177e4
LT
15158 tp = netdev_priv(dev);
15159 tp->pdev = pdev;
15160 tp->dev = dev;
15161 tp->pm_cap = pm_cap;
1da177e4
LT
15162 tp->rx_mode = TG3_DEF_RX_MODE;
15163 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 15164
1da177e4
LT
15165 if (tg3_debug > 0)
15166 tp->msg_enable = tg3_debug;
15167 else
15168 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15169
15170 /* The word/byte swap controls here control register access byte
15171 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15172 * setting below.
15173 */
15174 tp->misc_host_ctrl =
15175 MISC_HOST_CTRL_MASK_PCI_INT |
15176 MISC_HOST_CTRL_WORD_SWAP |
15177 MISC_HOST_CTRL_INDIR_ACCESS |
15178 MISC_HOST_CTRL_PCISTATE_RW;
15179
15180 /* The NONFRM (non-frame) byte/word swap controls take effect
15181 * on descriptor entries, anything which isn't packet data.
15182 *
15183 * The StrongARM chips on the board (one for tx, one for rx)
15184 * are running in big-endian mode.
15185 */
15186 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15187 GRC_MODE_WSWAP_NONFRM_DATA);
15188#ifdef __BIG_ENDIAN
15189 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15190#endif
15191 spin_lock_init(&tp->lock);
1da177e4 15192 spin_lock_init(&tp->indirect_lock);
c4028958 15193 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4 15194
d5fe488a 15195 tp->regs = pci_ioremap_bar(pdev, BAR_0);
ab0049b4 15196 if (!tp->regs) {
ab96b241 15197 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
1da177e4
LT
15198 err = -ENOMEM;
15199 goto err_out_free_dev;
15200 }
15201
c9cab24e
MC
15202 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15203 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15204 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15205 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15206 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15207 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15208 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15209 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15210 tg3_flag_set(tp, ENABLE_APE);
15211 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15212 if (!tp->aperegs) {
15213 dev_err(&pdev->dev,
15214 "Cannot map APE registers, aborting\n");
15215 err = -ENOMEM;
15216 goto err_out_iounmap;
15217 }
15218 }
15219
1da177e4
LT
15220 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15221 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
1da177e4 15222
1da177e4 15223 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4 15224 dev->watchdog_timeo = TG3_TX_TIMEOUT;
2ffcc981 15225 dev->netdev_ops = &tg3_netdev_ops;
1da177e4 15226 dev->irq = pdev->irq;
1da177e4
LT
15227
15228 err = tg3_get_invariants(tp);
15229 if (err) {
ab96b241
MC
15230 dev_err(&pdev->dev,
15231 "Problem fetching invariants of chip, aborting\n");
c9cab24e 15232 goto err_out_apeunmap;
1da177e4
LT
15233 }
15234
4a29cc2e
MC
15235 /* The EPB bridge inside 5714, 5715, and 5780 and any
15236 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
15237 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15238 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15239 * do DMA address check in tg3_start_xmit().
15240 */
63c3a66f 15241 if (tg3_flag(tp, IS_5788))
284901a9 15242 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
63c3a66f 15243 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
50cf156a 15244 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
72f2afb8 15245#ifdef CONFIG_HIGHMEM
6a35528a 15246 dma_mask = DMA_BIT_MASK(64);
72f2afb8 15247#endif
4a29cc2e 15248 } else
6a35528a 15249 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
72f2afb8
MC
15250
15251 /* Configure DMA attributes. */
284901a9 15252 if (dma_mask > DMA_BIT_MASK(32)) {
72f2afb8
MC
15253 err = pci_set_dma_mask(pdev, dma_mask);
15254 if (!err) {
0da0606f 15255 features |= NETIF_F_HIGHDMA;
72f2afb8
MC
15256 err = pci_set_consistent_dma_mask(pdev,
15257 persist_dma_mask);
15258 if (err < 0) {
ab96b241
MC
15259 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15260 "DMA for consistent allocations\n");
c9cab24e 15261 goto err_out_apeunmap;
72f2afb8
MC
15262 }
15263 }
15264 }
284901a9
YH
15265 if (err || dma_mask == DMA_BIT_MASK(32)) {
15266 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
72f2afb8 15267 if (err) {
ab96b241
MC
15268 dev_err(&pdev->dev,
15269 "No usable DMA configuration, aborting\n");
c9cab24e 15270 goto err_out_apeunmap;
72f2afb8
MC
15271 }
15272 }
15273
fdfec172 15274 tg3_init_bufmgr_config(tp);
1da177e4 15275
0da0606f
MC
15276 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15277
15278 /* 5700 B0 chips do not support checksumming correctly due
15279 * to hardware bugs.
15280 */
15281 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15282 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15283
15284 if (tg3_flag(tp, 5755_PLUS))
15285 features |= NETIF_F_IPV6_CSUM;
15286 }
15287
4e3a7aaa
MC
15288 /* TSO is on by default on chips that support hardware TSO.
15289 * Firmware TSO on older chips gives lower performance, so it
15290 * is off by default, but can be enabled using ethtool.
15291 */
63c3a66f
JP
15292 if ((tg3_flag(tp, HW_TSO_1) ||
15293 tg3_flag(tp, HW_TSO_2) ||
15294 tg3_flag(tp, HW_TSO_3)) &&
0da0606f
MC
15295 (features & NETIF_F_IP_CSUM))
15296 features |= NETIF_F_TSO;
63c3a66f 15297 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
0da0606f
MC
15298 if (features & NETIF_F_IPV6_CSUM)
15299 features |= NETIF_F_TSO6;
63c3a66f 15300 if (tg3_flag(tp, HW_TSO_3) ||
e849cdc3 15301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c
MC
15302 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15303 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
63c3a66f 15304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
dc668910 15305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
0da0606f 15306 features |= NETIF_F_TSO_ECN;
b0026624 15307 }
1da177e4 15308
d542fe27
MC
15309 dev->features |= features;
15310 dev->vlan_features |= features;
15311
06c03c02
MB
15312 /*
15313 * Add loopback capability only for a subset of devices that support
15314 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15315 * loopback for the remaining devices.
15316 */
15317 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15318 !tg3_flag(tp, CPMU_PRESENT))
15319 /* Add the loopback capability */
0da0606f
MC
15320 features |= NETIF_F_LOOPBACK;
15321
0da0606f 15322 dev->hw_features |= features;
06c03c02 15323
1da177e4 15324 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
63c3a66f 15325 !tg3_flag(tp, TSO_CAPABLE) &&
1da177e4 15326 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
63c3a66f 15327 tg3_flag_set(tp, MAX_RXPEND_64);
1da177e4
LT
15328 tp->rx_pending = 63;
15329 }
15330
1da177e4
LT
15331 err = tg3_get_device_address(tp);
15332 if (err) {
ab96b241
MC
15333 dev_err(&pdev->dev,
15334 "Could not obtain valid ethernet address, aborting\n");
c9cab24e 15335 goto err_out_apeunmap;
c88864df
MC
15336 }
15337
1da177e4
LT
15338 /*
15339 * Reset chip in case UNDI or EFI driver did not shutdown
15340 * DMA self test will enable WDMAC and we'll see (spurious)
15341 * pending DMA on the PCI bus at that point.
15342 */
15343 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15344 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 15345 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 15346 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
15347 }
15348
15349 err = tg3_test_dma(tp);
15350 if (err) {
ab96b241 15351 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
c88864df 15352 goto err_out_apeunmap;
1da177e4
LT
15353 }
15354
78f90dcf
MC
15355 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15356 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15357 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6fd45cb8 15358 for (i = 0; i < tp->irq_max; i++) {
78f90dcf
MC
15359 struct tg3_napi *tnapi = &tp->napi[i];
15360
15361 tnapi->tp = tp;
15362 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15363
15364 tnapi->int_mbox = intmbx;
15365 if (i < 4)
15366 intmbx += 0x8;
15367 else
15368 intmbx += 0x4;
15369
15370 tnapi->consmbox = rcvmbx;
15371 tnapi->prodmbox = sndmbx;
15372
66cfd1bd 15373 if (i)
78f90dcf 15374 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
66cfd1bd 15375 else
78f90dcf 15376 tnapi->coal_now = HOSTCC_MODE_NOW;
78f90dcf 15377
63c3a66f 15378 if (!tg3_flag(tp, SUPPORT_MSIX))
78f90dcf
MC
15379 break;
15380
15381 /*
15382 * If we support MSIX, we'll be using RSS. If we're using
15383 * RSS, the first vector only handles link interrupts and the
15384 * remaining vectors handle rx and tx interrupts. Reuse the
15385 * mailbox values for the next iteration. The values we setup
15386 * above are still useful for the single vectored mode.
15387 */
15388 if (!i)
15389 continue;
15390
15391 rcvmbx += 0x8;
15392
15393 if (sndmbx & 0x4)
15394 sndmbx -= 0x4;
15395 else
15396 sndmbx += 0xc;
15397 }
15398
15f9850d
DM
15399 tg3_init_coal(tp);
15400
c49a1561
MC
15401 pci_set_drvdata(pdev, dev);
15402
1da177e4
LT
15403 err = register_netdev(dev);
15404 if (err) {
ab96b241 15405 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
0d3031d9 15406 goto err_out_apeunmap;
1da177e4
LT
15407 }
15408
05dbe005
JP
15409 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15410 tp->board_part_number,
15411 tp->pci_chip_rev_id,
15412 tg3_bus_string(tp, str),
15413 dev->dev_addr);
1da177e4 15414
f07e9af3 15415 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7
MC
15416 struct phy_device *phydev;
15417 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
5129c3a3
MC
15418 netdev_info(dev,
15419 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
05dbe005 15420 phydev->drv->name, dev_name(&phydev->dev));
f07e9af3
MC
15421 } else {
15422 char *ethtype;
15423
15424 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15425 ethtype = "10/100Base-TX";
15426 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15427 ethtype = "1000Base-SX";
15428 else
15429 ethtype = "10/100/1000Base-T";
15430
5129c3a3 15431 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
47007831
MC
15432 "(WireSpeed[%d], EEE[%d])\n",
15433 tg3_phy_string(tp), ethtype,
15434 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15435 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
f07e9af3 15436 }
05dbe005
JP
15437
15438 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
dc668910 15439 (dev->features & NETIF_F_RXCSUM) != 0,
63c3a66f 15440 tg3_flag(tp, USE_LINKCHG_REG) != 0,
f07e9af3 15441 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
63c3a66f
JP
15442 tg3_flag(tp, ENABLE_ASF) != 0,
15443 tg3_flag(tp, TSO_CAPABLE) != 0);
05dbe005
JP
15444 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15445 tp->dma_rwctrl,
15446 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15447 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
1da177e4 15448
b45aa2f6
MC
15449 pci_save_state(pdev);
15450
1da177e4
LT
15451 return 0;
15452
0d3031d9
MC
15453err_out_apeunmap:
15454 if (tp->aperegs) {
15455 iounmap(tp->aperegs);
15456 tp->aperegs = NULL;
15457 }
15458
1da177e4 15459err_out_iounmap:
6892914f
MC
15460 if (tp->regs) {
15461 iounmap(tp->regs);
22abe310 15462 tp->regs = NULL;
6892914f 15463 }
1da177e4
LT
15464
15465err_out_free_dev:
15466 free_netdev(dev);
15467
16821285
MC
15468err_out_power_down:
15469 pci_set_power_state(pdev, PCI_D3hot);
15470
1da177e4
LT
15471err_out_free_res:
15472 pci_release_regions(pdev);
15473
15474err_out_disable_pdev:
15475 pci_disable_device(pdev);
15476 pci_set_drvdata(pdev, NULL);
15477 return err;
15478}
15479
15480static void __devexit tg3_remove_one(struct pci_dev *pdev)
15481{
15482 struct net_device *dev = pci_get_drvdata(pdev);
15483
15484 if (dev) {
15485 struct tg3 *tp = netdev_priv(dev);
15486
077f849d
JSR
15487 if (tp->fw)
15488 release_firmware(tp->fw);
15489
23f333a2 15490 cancel_work_sync(&tp->reset_task);
158d7abd 15491
63c3a66f 15492 if (!tg3_flag(tp, USE_PHYLIB)) {
b02fd9e3 15493 tg3_phy_fini(tp);
158d7abd 15494 tg3_mdio_fini(tp);
b02fd9e3 15495 }
158d7abd 15496
1da177e4 15497 unregister_netdev(dev);
0d3031d9
MC
15498 if (tp->aperegs) {
15499 iounmap(tp->aperegs);
15500 tp->aperegs = NULL;
15501 }
6892914f
MC
15502 if (tp->regs) {
15503 iounmap(tp->regs);
22abe310 15504 tp->regs = NULL;
6892914f 15505 }
1da177e4
LT
15506 free_netdev(dev);
15507 pci_release_regions(pdev);
15508 pci_disable_device(pdev);
15509 pci_set_drvdata(pdev, NULL);
15510 }
15511}
15512
aa6027ca 15513#ifdef CONFIG_PM_SLEEP
c866b7ea 15514static int tg3_suspend(struct device *device)
1da177e4 15515{
c866b7ea 15516 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
15517 struct net_device *dev = pci_get_drvdata(pdev);
15518 struct tg3 *tp = netdev_priv(dev);
15519 int err;
15520
15521 if (!netif_running(dev))
15522 return 0;
15523
23f333a2 15524 flush_work_sync(&tp->reset_task);
b02fd9e3 15525 tg3_phy_stop(tp);
1da177e4
LT
15526 tg3_netif_stop(tp);
15527
15528 del_timer_sync(&tp->timer);
15529
f47c11ee 15530 tg3_full_lock(tp, 1);
1da177e4 15531 tg3_disable_ints(tp);
f47c11ee 15532 tg3_full_unlock(tp);
1da177e4
LT
15533
15534 netif_device_detach(dev);
15535
f47c11ee 15536 tg3_full_lock(tp, 0);
944d980e 15537 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
63c3a66f 15538 tg3_flag_clear(tp, INIT_COMPLETE);
f47c11ee 15539 tg3_full_unlock(tp);
1da177e4 15540
c866b7ea 15541 err = tg3_power_down_prepare(tp);
1da177e4 15542 if (err) {
b02fd9e3
MC
15543 int err2;
15544
f47c11ee 15545 tg3_full_lock(tp, 0);
1da177e4 15546
63c3a66f 15547 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
15548 err2 = tg3_restart_hw(tp, 1);
15549 if (err2)
b9ec6c1b 15550 goto out;
1da177e4
LT
15551
15552 tp->timer.expires = jiffies + tp->timer_offset;
15553 add_timer(&tp->timer);
15554
15555 netif_device_attach(dev);
15556 tg3_netif_start(tp);
15557
b9ec6c1b 15558out:
f47c11ee 15559 tg3_full_unlock(tp);
b02fd9e3
MC
15560
15561 if (!err2)
15562 tg3_phy_start(tp);
1da177e4
LT
15563 }
15564
15565 return err;
15566}
15567
c866b7ea 15568static int tg3_resume(struct device *device)
1da177e4 15569{
c866b7ea 15570 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
15571 struct net_device *dev = pci_get_drvdata(pdev);
15572 struct tg3 *tp = netdev_priv(dev);
15573 int err;
15574
15575 if (!netif_running(dev))
15576 return 0;
15577
1da177e4
LT
15578 netif_device_attach(dev);
15579
f47c11ee 15580 tg3_full_lock(tp, 0);
1da177e4 15581
63c3a66f 15582 tg3_flag_set(tp, INIT_COMPLETE);
b9ec6c1b
MC
15583 err = tg3_restart_hw(tp, 1);
15584 if (err)
15585 goto out;
1da177e4
LT
15586
15587 tp->timer.expires = jiffies + tp->timer_offset;
15588 add_timer(&tp->timer);
15589
1da177e4
LT
15590 tg3_netif_start(tp);
15591
b9ec6c1b 15592out:
f47c11ee 15593 tg3_full_unlock(tp);
1da177e4 15594
b02fd9e3
MC
15595 if (!err)
15596 tg3_phy_start(tp);
15597
b9ec6c1b 15598 return err;
1da177e4
LT
15599}
15600
c866b7ea 15601static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
aa6027ca
ED
15602#define TG3_PM_OPS (&tg3_pm_ops)
15603
15604#else
15605
15606#define TG3_PM_OPS NULL
15607
15608#endif /* CONFIG_PM_SLEEP */
c866b7ea 15609
b45aa2f6
MC
15610/**
15611 * tg3_io_error_detected - called when PCI error is detected
15612 * @pdev: Pointer to PCI device
15613 * @state: The current pci connection state
15614 *
15615 * This function is called after a PCI bus error affecting
15616 * this device has been detected.
15617 */
15618static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15619 pci_channel_state_t state)
15620{
15621 struct net_device *netdev = pci_get_drvdata(pdev);
15622 struct tg3 *tp = netdev_priv(netdev);
15623 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15624
15625 netdev_info(netdev, "PCI I/O error detected\n");
15626
15627 rtnl_lock();
15628
15629 if (!netif_running(netdev))
15630 goto done;
15631
15632 tg3_phy_stop(tp);
15633
15634 tg3_netif_stop(tp);
15635
15636 del_timer_sync(&tp->timer);
63c3a66f 15637 tg3_flag_clear(tp, RESTART_TIMER);
b45aa2f6
MC
15638
15639 /* Want to make sure that the reset task doesn't run */
15640 cancel_work_sync(&tp->reset_task);
63c3a66f
JP
15641 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15642 tg3_flag_clear(tp, RESTART_TIMER);
b45aa2f6
MC
15643
15644 netif_device_detach(netdev);
15645
15646 /* Clean up software state, even if MMIO is blocked */
15647 tg3_full_lock(tp, 0);
15648 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15649 tg3_full_unlock(tp);
15650
15651done:
15652 if (state == pci_channel_io_perm_failure)
15653 err = PCI_ERS_RESULT_DISCONNECT;
15654 else
15655 pci_disable_device(pdev);
15656
15657 rtnl_unlock();
15658
15659 return err;
15660}
15661
15662/**
15663 * tg3_io_slot_reset - called after the pci bus has been reset.
15664 * @pdev: Pointer to PCI device
15665 *
15666 * Restart the card from scratch, as if from a cold-boot.
15667 * At this point, the card has exprienced a hard reset,
15668 * followed by fixups by BIOS, and has its config space
15669 * set up identically to what it was at cold boot.
15670 */
15671static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15672{
15673 struct net_device *netdev = pci_get_drvdata(pdev);
15674 struct tg3 *tp = netdev_priv(netdev);
15675 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15676 int err;
15677
15678 rtnl_lock();
15679
15680 if (pci_enable_device(pdev)) {
15681 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15682 goto done;
15683 }
15684
15685 pci_set_master(pdev);
15686 pci_restore_state(pdev);
15687 pci_save_state(pdev);
15688
15689 if (!netif_running(netdev)) {
15690 rc = PCI_ERS_RESULT_RECOVERED;
15691 goto done;
15692 }
15693
15694 err = tg3_power_up(tp);
bed9829f 15695 if (err)
b45aa2f6 15696 goto done;
b45aa2f6
MC
15697
15698 rc = PCI_ERS_RESULT_RECOVERED;
15699
15700done:
15701 rtnl_unlock();
15702
15703 return rc;
15704}
15705
15706/**
15707 * tg3_io_resume - called when traffic can start flowing again.
15708 * @pdev: Pointer to PCI device
15709 *
15710 * This callback is called when the error recovery driver tells
15711 * us that its OK to resume normal operation.
15712 */
15713static void tg3_io_resume(struct pci_dev *pdev)
15714{
15715 struct net_device *netdev = pci_get_drvdata(pdev);
15716 struct tg3 *tp = netdev_priv(netdev);
15717 int err;
15718
15719 rtnl_lock();
15720
15721 if (!netif_running(netdev))
15722 goto done;
15723
15724 tg3_full_lock(tp, 0);
63c3a66f 15725 tg3_flag_set(tp, INIT_COMPLETE);
b45aa2f6
MC
15726 err = tg3_restart_hw(tp, 1);
15727 tg3_full_unlock(tp);
15728 if (err) {
15729 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15730 goto done;
15731 }
15732
15733 netif_device_attach(netdev);
15734
15735 tp->timer.expires = jiffies + tp->timer_offset;
15736 add_timer(&tp->timer);
15737
15738 tg3_netif_start(tp);
15739
15740 tg3_phy_start(tp);
15741
15742done:
15743 rtnl_unlock();
15744}
15745
15746static struct pci_error_handlers tg3_err_handler = {
15747 .error_detected = tg3_io_error_detected,
15748 .slot_reset = tg3_io_slot_reset,
15749 .resume = tg3_io_resume
15750};
15751
1da177e4
LT
15752static struct pci_driver tg3_driver = {
15753 .name = DRV_MODULE_NAME,
15754 .id_table = tg3_pci_tbl,
15755 .probe = tg3_init_one,
15756 .remove = __devexit_p(tg3_remove_one),
b45aa2f6 15757 .err_handler = &tg3_err_handler,
aa6027ca 15758 .driver.pm = TG3_PM_OPS,
1da177e4
LT
15759};
15760
15761static int __init tg3_init(void)
15762{
29917620 15763 return pci_register_driver(&tg3_driver);
1da177e4
LT
15764}
15765
15766static void __exit tg3_cleanup(void)
15767{
15768 pci_unregister_driver(&tg3_driver);
15769}
15770
15771module_init(tg3_init);
15772module_exit(tg3_cleanup);
This page took 3.475503 seconds and 5 git commands to generate.